Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-pages.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Thomas <nick@gitlab.com>2017-02-07 21:58:00 +0300
committerNick Thomas <nick@gitlab.com>2017-02-07 21:58:00 +0300
commiteae671b28e0d7eb621d9fdca9abf140b5094af92 (patch)
tree88bdfc87ba74866da6224ddea200062fee31e5aa
parentbc4aa822bff4ec2a3a3fdb1daa915782331ad08a (diff)
parentddff37bc7cecf2f73a7bb82d60391505ad96a426 (diff)
Merge branch 'zj-prometheus' into 'master'
Prometheus monitoring for GitLab Pages Closes #42 See merge request !11
-rw-r--r--Godeps/Godeps.json57
-rw-r--r--README.md13
-rw-r--r--acceptance_test.go25
-rw-r--r--app.go23
-rw-r--r--app_config.go7
-rw-r--r--domains.go9
-rw-r--r--helpers_test.go60
-rw-r--r--main.go58
-rw-r--r--metrics/metrics.go42
-rw-r--r--vendor/github.com/beorn7/perks/LICENSE20
-rw-r--r--vendor/github.com/beorn7/perks/quantile/exampledata.txt2388
-rw-r--r--vendor/github.com/beorn7/perks/quantile/stream.go292
-rw-r--r--vendor/github.com/golang/protobuf/AUTHORS3
-rw-r--r--vendor/github.com/golang/protobuf/CONTRIBUTORS3
-rw-r--r--vendor/github.com/golang/protobuf/LICENSE (renamed from vendor/golang.org/x/crypto/LICENSE)12
-rw-r--r--vendor/github.com/golang/protobuf/proto/Makefile43
-rw-r--r--vendor/github.com/golang/protobuf/proto/clone.go229
-rw-r--r--vendor/github.com/golang/protobuf/proto/decode.go874
-rw-r--r--vendor/github.com/golang/protobuf/proto/encode.go1363
-rw-r--r--vendor/github.com/golang/protobuf/proto/equal.go296
-rw-r--r--vendor/github.com/golang/protobuf/proto/extensions.go586
-rw-r--r--vendor/github.com/golang/protobuf/proto/lib.go898
-rw-r--r--vendor/github.com/golang/protobuf/proto/message_set.go311
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_reflect.go484
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_unsafe.go270
-rw-r--r--vendor/github.com/golang/protobuf/proto/properties.go864
-rw-r--r--vendor/github.com/golang/protobuf/proto/text.go854
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_parser.go891
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE201
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE1
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore1
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile7
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go75
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go16
-rw-r--r--vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go46
-rw-r--r--vendor/github.com/prometheus/client_golang/AUTHORS.md18
-rw-r--r--vendor/github.com/prometheus/client_golang/LICENSE201
-rw-r--r--vendor/github.com/prometheus/client_golang/NOTICE23
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/.gitignore1
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/README.md1
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/collector.go75
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/counter.go172
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/desc.go205
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/doc.go181
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go119
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/fnv.go29
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/gauge.go140
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector.go263
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/histogram.go444
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/http.go499
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/metric.go166
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector.go142
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go201
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/registry.go806
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/summary.go534
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/untyped.go138
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/value.go234
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/vec.go404
-rw-r--r--vendor/github.com/prometheus/client_model/AUTHORS.md13
-rw-r--r--vendor/github.com/prometheus/client_model/LICENSE201
-rw-r--r--vendor/github.com/prometheus/client_model/NOTICE5
-rw-r--r--vendor/github.com/prometheus/client_model/go/metrics.pb.go364
-rw-r--r--vendor/github.com/prometheus/common/AUTHORS.md11
-rw-r--r--vendor/github.com/prometheus/common/LICENSE201
-rw-r--r--vendor/github.com/prometheus/common/NOTICE5
-rw-r--r--vendor/github.com/prometheus/common/expfmt/decode.go412
-rw-r--r--vendor/github.com/prometheus/common/expfmt/encode.go88
-rw-r--r--vendor/github.com/prometheus/common/expfmt/expfmt.go40
-rw-r--r--vendor/github.com/prometheus/common/expfmt/fuzz.go36
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_create.go300
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_parse.go753
-rw-r--r--vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt67
-rw-r--r--vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go162
-rw-r--r--vendor/github.com/prometheus/common/model/alert.go136
-rw-r--r--vendor/github.com/prometheus/common/model/fingerprinting.go105
-rw-r--r--vendor/github.com/prometheus/common/model/fnv.go42
-rw-r--r--vendor/github.com/prometheus/common/model/labels.go206
-rw-r--r--vendor/github.com/prometheus/common/model/labelset.go169
-rw-r--r--vendor/github.com/prometheus/common/model/metric.go98
-rw-r--r--vendor/github.com/prometheus/common/model/model.go16
-rw-r--r--vendor/github.com/prometheus/common/model/signature.go144
-rw-r--r--vendor/github.com/prometheus/common/model/silence.go106
-rw-r--r--vendor/github.com/prometheus/common/model/time.go249
-rw-r--r--vendor/github.com/prometheus/common/model/value.go403
-rw-r--r--vendor/github.com/prometheus/procfs/.travis.yml5
-rw-r--r--vendor/github.com/prometheus/procfs/AUTHORS.md20
-rw-r--r--vendor/github.com/prometheus/procfs/CONTRIBUTING.md18
-rw-r--r--vendor/github.com/prometheus/procfs/LICENSE201
-rw-r--r--vendor/github.com/prometheus/procfs/Makefile6
-rw-r--r--vendor/github.com/prometheus/procfs/NOTICE7
-rw-r--r--vendor/github.com/prometheus/procfs/README.md10
-rw-r--r--vendor/github.com/prometheus/procfs/doc.go45
-rw-r--r--vendor/github.com/prometheus/procfs/fs.go33
-rw-r--r--vendor/github.com/prometheus/procfs/ipvs.go224
-rw-r--r--vendor/github.com/prometheus/procfs/mdstat.go138
-rw-r--r--vendor/github.com/prometheus/procfs/proc.go212
-rw-r--r--vendor/github.com/prometheus/procfs/proc_io.go55
-rw-r--r--vendor/github.com/prometheus/procfs/proc_limits.go137
-rw-r--r--vendor/github.com/prometheus/procfs/proc_stat.go175
-rw-r--r--vendor/github.com/prometheus/procfs/stat.go56
-rw-r--r--vendor/golang.org/x/crypto/PATENTS22
-rw-r--r--vendor/golang.org/x/crypto/ssh/terminal/terminal.go892
-rw-r--r--vendor/golang.org/x/crypto/ssh/terminal/util.go128
-rw-r--r--vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go12
-rw-r--r--vendor/golang.org/x/crypto/ssh/terminal/util_linux.go11
-rw-r--r--vendor/golang.org/x/crypto/ssh/terminal/util_windows.go174
-rw-r--r--vendor/golang.org/x/net/AUTHORS3
-rw-r--r--vendor/golang.org/x/net/CONTRIBUTORS3
-rw-r--r--vendor/golang.org/x/net/http2/client_conn_pool.go256
-rw-r--r--vendor/golang.org/x/net/http2/configure_transport.go80
-rw-r--r--vendor/golang.org/x/net/http2/errors.go55
-rw-r--r--vendor/golang.org/x/net/http2/frame.go479
-rw-r--r--vendor/golang.org/x/net/http2/go16.go43
-rw-r--r--vendor/golang.org/x/net/http2/go17.go94
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/.gitignore5
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/Makefile5
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/README16
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/h2demo.go436
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/launch.go302
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/rootCA.key27
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/rootCA.pem26
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/rootCA.srl1
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/server.crt20
-rw-r--r--vendor/golang.org/x/net/http2/h2demo/server.key27
-rw-r--r--vendor/golang.org/x/net/http2/h2i/README.md97
-rw-r--r--vendor/golang.org/x/net/http2/h2i/h2i.go486
-rw-r--r--vendor/golang.org/x/net/http2/headermap.go1
-rw-r--r--vendor/golang.org/x/net/http2/hpack/encode.go2
-rw-r--r--vendor/golang.org/x/net/http2/hpack/hpack.go30
-rw-r--r--vendor/golang.org/x/net/http2/hpack/huffman.go42
-rw-r--r--vendor/golang.org/x/net/http2/http2.go127
-rw-r--r--vendor/golang.org/x/net/http2/not_go16.go46
-rw-r--r--vendor/golang.org/x/net/http2/not_go17.go51
-rw-r--r--vendor/golang.org/x/net/http2/pipe.go87
-rw-r--r--vendor/golang.org/x/net/http2/server.go958
-rw-r--r--vendor/golang.org/x/net/http2/transport.go1715
-rw-r--r--vendor/golang.org/x/net/http2/write.go92
-rw-r--r--vendor/golang.org/x/net/lex/httplex/httplex.go312
138 files changed, 25613 insertions, 3542 deletions
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index a3ef6478..4a9fbf8e 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -1,15 +1,60 @@
{
"ImportPath": "gitlab.com/gitlab-org/gitlab-pages",
"GoVersion": "go1.5",
+ "GodepVersion": "v79",
"Packages": [
"./..."
],
"Deps": [
{
+ "ImportPath": "github.com/beorn7/perks/quantile",
+ "Rev": "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
+ },
+ {
+ "ImportPath": "github.com/golang/protobuf/proto",
+ "Rev": "f592bd283e9ef86337a432eb50e592278c3d534d"
+ },
+ {
"ImportPath": "github.com/kardianos/osext",
"Rev": "efacde03154693404c65e7aa7d461ac9014acd0c"
},
{
+ "ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
+ "Comment": "v1.0.0-2-gc12348c",
+ "Rev": "c12348ce28de40eed0136aa2b644d0ee0650e56c"
+ },
+ {
+ "ImportPath": "github.com/prometheus/client_golang/prometheus",
+ "Comment": "v0.8.0-9-g334af01",
+ "Rev": "334af0119a8f8fb6af5bb950d535c482cac7f836"
+ },
+ {
+ "ImportPath": "github.com/prometheus/client_golang/prometheus/promhttp",
+ "Comment": "v0.8.0-9-g334af01",
+ "Rev": "334af0119a8f8fb6af5bb950d535c482cac7f836"
+ },
+ {
+ "ImportPath": "github.com/prometheus/client_model/go",
+ "Comment": "model-0.0.2-12-gfa8ad6f",
+ "Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
+ },
+ {
+ "ImportPath": "github.com/prometheus/common/expfmt",
+ "Rev": "ebdfc6da46522d58825777cf1f90490a5b1ef1d8"
+ },
+ {
+ "ImportPath": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
+ "Rev": "ebdfc6da46522d58825777cf1f90490a5b1ef1d8"
+ },
+ {
+ "ImportPath": "github.com/prometheus/common/model",
+ "Rev": "ebdfc6da46522d58825777cf1f90490a5b1ef1d8"
+ },
+ {
+ "ImportPath": "github.com/prometheus/procfs",
+ "Rev": "abf152e5f3e97f2fafac028d2cc06c1feb87ffa5"
+ },
+ {
"ImportPath": "github.com/stretchr/testify/assert",
"Rev": "1297dc01ed0a819ff634c89707081a4df43baf6b"
},
@@ -18,12 +63,16 @@
"Rev": "1297dc01ed0a819ff634c89707081a4df43baf6b"
},
{
- "ImportPath": "golang.org/x/crypto/ssh/terminal",
- "Rev": "1351f936d976c60a0a48d728281922cf63eafb8d"
+ "ImportPath": "golang.org/x/net/http2",
+ "Rev": "07b51741c1d6423d4a6abab1c49940ec09cb1aaf"
+ },
+ {
+ "ImportPath": "golang.org/x/net/http2/hpack",
+ "Rev": "07b51741c1d6423d4a6abab1c49940ec09cb1aaf"
},
{
- "ImportPath": "golang.org/x/net/http2",
- "Rev": "b4e17d61b15679caf2335da776c614169a1b4643"
+ "ImportPath": "golang.org/x/net/lex/httplex",
+ "Rev": "07b51741c1d6423d4a6abab1c49940ec09cb1aaf"
}
]
}
diff --git a/README.md b/README.md
index 087a2152..5310d47e 100644
--- a/README.md
+++ b/README.md
@@ -13,7 +13,7 @@ In large environment it can be time consuming to list all directories, and CNAME
4. Periodically (every second) it checks the `pages-root/.update` file and reads its content to verify if there was update.
To force route refresh, reload of configs fill the `pages-root/.update` with random content.
-The reload will be done asynchronously, and it will not interrupt the current requests.
+The reload will be done asynchronously, and it will not interrupt the current requests.
### How it serves content
@@ -72,6 +72,17 @@ go build
This is most useful in dual-stack environments (IPv4+IPv6) where both Gitlab Pages and another HTTP server have to co-exist on the same server.
+### Enable Prometheus Metrics
+
+For monitoring purposes, one could pass the `-metrics-address` flag when
+starting. This will expose general metrics about the Go runtime and pages
+application for [Prometheus](https://prometheus.io/) to scrape.
+
+Example:
+```
+./gitlab-pages -listen-http ":8090" -metrics-address ":9101" -pages-root path/to/gitlab/shared/pages -pages-domain example.com
+```
+
### License
MIT
diff --git a/acceptance_test.go b/acceptance_test.go
index 2e36acd1..1cfd133a 100644
--- a/acceptance_test.go
+++ b/acceptance_test.go
@@ -2,9 +2,11 @@ package main
import (
"flag"
- "github.com/stretchr/testify/assert"
+ "io/ioutil"
"net/http"
"testing"
+
+ "github.com/stretchr/testify/assert"
)
var shouldRun = flag.Bool("run-acceptance-tests", false, "Run the acceptance tests?")
@@ -33,7 +35,7 @@ func skipUnlessEnabled(t *testing.T) {
func TestUnknownHostReturnsNotFound(t *testing.T) {
skipUnlessEnabled(t)
- teardown := RunPagesProcess(t, *pagesBinary, listeners)
+ teardown := RunPagesProcess(t, *pagesBinary, listeners, "")
defer teardown()
for _, spec := range listeners {
@@ -48,7 +50,7 @@ func TestUnknownHostReturnsNotFound(t *testing.T) {
func TestKnownHostReturns200(t *testing.T) {
skipUnlessEnabled(t)
- teardown := RunPagesProcess(t, *pagesBinary, listeners)
+ teardown := RunPagesProcess(t, *pagesBinary, listeners, "")
defer teardown()
for _, spec := range listeners {
@@ -60,3 +62,20 @@ func TestKnownHostReturns200(t *testing.T) {
}
}
}
+
+func TestPrometheusMetricsCanBeScraped(t *testing.T) {
+ skipUnlessEnabled(t)
+ listener := []ListenSpec{{"http", "127.0.0.1", "37003"}}
+ teardown := RunPagesProcess(t, *pagesBinary, listener, ":42345")
+ defer teardown()
+
+ resp, err := http.Get("http://localhost:42345/metrics")
+
+ if assert.NoError(t, err) {
+ defer resp.Body.Close()
+ body, _ := ioutil.ReadAll(resp.Body)
+
+ assert.Contains(t, string(body), "gitlab_pages_http_sessions_active 0")
+ assert.Contains(t, string(body), "gitlab_pages_domains_served_total 7")
+ }
+}
diff --git a/app.go b/app.go
index 35b661cc..4ef5e730 100644
--- a/app.go
+++ b/app.go
@@ -4,9 +4,14 @@ import (
"crypto/tls"
"log"
"net/http"
+ "strconv"
"strings"
"sync"
"time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+ "gitlab.com/gitlab-org/gitlab-pages/metrics"
)
const xForwardedProto = "X-Forwarded-Proto"
@@ -43,6 +48,9 @@ func (a *theApp) serveContent(ww http.ResponseWriter, r *http.Request, https boo
w := newLoggingResponseWriter(ww)
defer w.Log(r)
+ metrics.SessionsActive.Inc()
+ defer metrics.SessionsActive.Dec()
+
// Add auto redirect
if https && !a.RedirectHTTP {
u := *r.URL
@@ -62,6 +70,7 @@ func (a *theApp) serveContent(ww http.ResponseWriter, r *http.Request, https boo
// Serve static file
domain.ServeHTTP(&w, r)
+ metrics.ProcessedRequests.WithLabelValues(strconv.Itoa(w.status), r.Method).Inc()
}
func (a *theApp) ServeHTTP(ww http.ResponseWriter, r *http.Request) {
@@ -121,6 +130,20 @@ func (a *theApp) Run() {
}(fd)
}
+ // Serve metrics for Prometheus
+ if a.ListenMetrics != 0 {
+ wg.Add(1)
+ go func(fd uintptr) {
+ defer wg.Done()
+
+ handler := promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{}).ServeHTTP
+ err := listenAndServe(fd, handler, false, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }(a.ListenMetrics)
+ }
+
go watchDomains(a.Domain, a.UpdateDomains, time.Second)
wg.Wait()
diff --git a/app_config.go b/app_config.go
index 7ffa1e76..04c787e1 100644
--- a/app_config.go
+++ b/app_config.go
@@ -6,9 +6,10 @@ type appConfig struct {
RootCertificate []byte
RootKey []byte
- ListenHTTP []uintptr
- ListenHTTPS []uintptr
- ListenProxy []uintptr
+ ListenHTTP []uintptr
+ ListenHTTPS []uintptr
+ ListenProxy []uintptr
+ ListenMetrics uintptr
HTTP2 bool
RedirectHTTP bool
diff --git a/domains.go b/domains.go
index de038f5e..d298b63b 100644
--- a/domains.go
+++ b/domains.go
@@ -9,6 +9,8 @@ import (
"path/filepath"
"strings"
"time"
+
+ "gitlab.com/gitlab-org/gitlab-pages/metrics"
)
type domains map[string]*domain
@@ -30,6 +32,7 @@ func (d domains) addDomain(rootDomain, group, project string, config *domainConf
}
domainName = strings.ToLower(domainName)
d[domainName] = newDomain
+
return nil
}
@@ -147,6 +150,12 @@ func watchDomains(rootDomain string, updater domainsUpdater, interval time.Durat
if updater != nil {
updater(domains)
}
+
+ // Update prometheus metrics
+ metrics.DomainLastUpdateTime.Set(float64(time.Now().UTC().Unix()))
+ metrics.DomainsServed.Set(float64(len(domains)))
+ metrics.DomainUpdates.Inc()
+
time.Sleep(interval)
}
}
diff --git a/helpers_test.go b/helpers_test.go
index 3bab08e0..e7fd744e 100644
--- a/helpers_test.go
+++ b/helpers_test.go
@@ -3,7 +3,6 @@ package main
import (
"crypto/tls"
"fmt"
- "github.com/stretchr/testify/assert"
"io/ioutil"
"log"
"net"
@@ -12,6 +11,8 @@ import (
"os/exec"
"testing"
"time"
+
+ "github.com/stretchr/testify/assert"
)
var chdirSet = false
@@ -124,36 +125,14 @@ func (l ListenSpec) JoinHostPort() string {
// GetPageFromProcess to do a HTTP GET against a listener.
//
// If run as root via sudo, the gitlab-pages process will drop privileges
-func RunPagesProcess(t *testing.T, pagesPath string, listeners []ListenSpec) (teardown func()) {
- var tempfiles []string
- var args []string
- var hasHTTPS bool
-
+func RunPagesProcess(t *testing.T, pagesPath string, listeners []ListenSpec, promPort string) (teardown func()) {
_, err := os.Stat(pagesPath)
assert.NoError(t, err)
- for _, spec := range listeners {
- args = append(args, "-listen-"+spec.Type, spec.JoinHostPort())
-
- if spec.Type == "https" {
- hasHTTPS = true
- }
- }
-
- if hasHTTPS {
- key, cert := CreateHTTPSFixtureFiles(t)
- tempfiles = []string{key, cert}
- args = append(args, "-root-key", key, "-root-cert", cert)
- }
-
- if os.Geteuid() == 0 && os.Getenv("SUDO_UID") != "" && os.Getenv("SUDO_GID") != "" {
- t.Log("Pages process will drop privileges")
- args = append(args, "-daemon-uid", os.Getenv("SUDO_UID"), "-daemon-gid", os.Getenv("SUDO_GID"))
- }
-
+ args, tempfiles := getPagesArgs(t, listeners, promPort)
cmd := exec.Command(pagesPath, args...)
- t.Logf("Running %s %v", pagesPath, args)
cmd.Start()
+ t.Logf("Running %s %v", pagesPath, args)
// Wait for all TCP servers to be open. Even with this, gitlab-pages
// will sometimes return 404 if a HTTP request comes in before it has
@@ -176,6 +155,35 @@ func RunPagesProcess(t *testing.T, pagesPath string, listeners []ListenSpec) (te
}
}
+func getPagesArgs(t *testing.T, listeners []ListenSpec, promPort string) (args, tempfiles []string) {
+ var hasHTTPS bool
+
+ for _, spec := range listeners {
+ args = append(args, "-listen-"+spec.Type, spec.JoinHostPort())
+
+ if spec.Type == "https" {
+ hasHTTPS = true
+ }
+ }
+
+ if hasHTTPS {
+ key, cert := CreateHTTPSFixtureFiles(t)
+ tempfiles = []string{key, cert}
+ args = append(args, "-root-key", key, "-root-cert", cert)
+ }
+
+ if promPort != "" {
+ args = append(args, "-metrics-address", promPort)
+ }
+
+ if os.Geteuid() == 0 && os.Getenv("SUDO_UID") != "" && os.Getenv("SUDO_GID") != "" {
+ t.Log("Pages process will drop privileges")
+ args = append(args, "-daemon-uid", os.Getenv("SUDO_UID"), "-daemon-gid", os.Getenv("SUDO_GID"))
+ }
+
+ return
+}
+
// Does an insecure HTTP GET against the listener specified, setting a fake
// Host: and constructing the URL from the listener and the URL suffix.
func GetPageFromListener(t *testing.T, spec ListenSpec, host, urlsuffix string) (*http.Response, error) {
diff --git a/main.go b/main.go
index 62d28e0e..172d41e8 100644
--- a/main.go
+++ b/main.go
@@ -13,6 +13,36 @@ var VERSION = "dev"
// REVISION stores the information about the git revision of application
var REVISION = "HEAD"
+var (
+ pagesRootCert = flag.String("root-cert", "", "The default path to file certificate to serve static pages")
+ pagesRootKey = flag.String("root-key", "", "The default path to file certificate to serve static pages")
+ redirectHTTP = flag.Bool("redirect-http", true, "Serve the pages under HTTP")
+ useHTTP2 = flag.Bool("use-http2", true, "Enable HTTP2 support")
+ pagesRoot = flag.String("pages-root", "shared/pages", "The directory where pages are stored")
+ pagesDomain = flag.String("pages-domain", "gitlab-example.com", "The domain to serve static pages")
+ metricsAddress = flag.String("metrics-address", "", "The address to listen on for metrics requests")
+ daemonUID = flag.Uint("daemon-uid", 0, "Drop privileges to this user")
+ daemonGID = flag.Uint("daemon-gid", 0, "Drop privileges to this group")
+)
+
+func configFromFlags() appConfig {
+ var config appConfig
+
+ config.Domain = strings.ToLower(*pagesDomain)
+ config.RedirectHTTP = *redirectHTTP
+ config.HTTP2 = *useHTTP2
+
+ if *pagesRootCert != "" {
+ config.RootCertificate = readFile(*pagesRootCert)
+ }
+
+ if *pagesRootKey != "" {
+ config.RootKey = readFile(*pagesRootKey)
+ }
+
+ return config
+}
+
func appMain() {
var showVersion = flag.Bool("version", false, "Show version")
var listenHTTP, listenHTTPS, listenProxy MultiStringFlag
@@ -21,15 +51,6 @@ func appMain() {
flag.Var(&listenHTTPS, "listen-https", "The address(es) to listen on for HTTPS requests")
flag.Var(&listenProxy, "listen-proxy", "The address(es) to listen on for proxy requests")
- var pagesRootCert = flag.String("root-cert", "", "The default path to file certificate to serve static pages")
- var pagesRootKey = flag.String("root-key", "", "The default path to file certificate to serve static pages")
- var redirectHTTP = flag.Bool("redirect-http", true, "Serve the pages under HTTP")
- var useHTTP2 = flag.Bool("use-http2", true, "Enable HTTP2 support")
- var pagesRoot = flag.String("pages-root", "shared/pages", "The directory where pages are stored")
- var pagesDomain = flag.String("pages-domain", "gitlab-example.com", "The domain to serve static pages")
- var daemonUID = flag.Uint("daemon-uid", 0, "Drop privileges to this user")
- var daemonGID = flag.Uint("daemon-gid", 0, "Drop privileges to this group")
-
flag.Parse()
printVersion(*showVersion, VERSION)
@@ -42,18 +63,7 @@ func appMain() {
log.Fatalln(err)
}
- var config appConfig
- config.Domain = strings.ToLower(*pagesDomain)
- config.RedirectHTTP = *redirectHTTP
- config.HTTP2 = *useHTTP2
-
- if *pagesRootCert != "" {
- config.RootCertificate = readFile(*pagesRootCert)
- }
-
- if *pagesRootKey != "" {
- config.RootKey = readFile(*pagesRootKey)
- }
+ config := configFromFlags()
for _, addr := range listenHTTP {
l, fd := createSocket(addr)
@@ -73,6 +83,12 @@ func appMain() {
config.ListenProxy = append(config.ListenProxy, fd)
}
+ if *metricsAddress != "" {
+ l, fd := createSocket(*metricsAddress)
+ defer l.Close()
+ config.ListenMetrics = fd
+ }
+
if *daemonUID != 0 || *daemonGID != 0 {
daemonize(config, *daemonUID, *daemonGID)
return
diff --git a/metrics/metrics.go b/metrics/metrics.go
new file mode 100644
index 00000000..00e39460
--- /dev/null
+++ b/metrics/metrics.go
@@ -0,0 +1,42 @@
+package metrics
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ DomainsServed = prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "gitlab_pages_domains_served_total",
+ Help: "The total number of sites served by this Pages app",
+ })
+
+ DomainUpdates = prometheus.NewCounter(prometheus.CounterOpts{
+ Name: "gitlab_pages_domains_updated_total",
+ Help: "The total number of site updates processed since daemon start",
+ })
+
+ DomainLastUpdateTime = prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "gitlab_pages_last_domain_update_seconds",
+ Help: "UNIX timestamp of the last update",
+ })
+
+ ProcessedRequests = prometheus.NewCounterVec(prometheus.CounterOpts{
+ Name: "gitlab_pages_http_requests_total",
+ Help: "Total number of HTTP requests done serving",
+ },
+ []string{"code", "method"},
+ )
+
+ SessionsActive = prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "gitlab_pages_http_sessions_active",
+ Help: "The number of HTTP requests currently being processed",
+ })
+)
+
+func init() {
+ prometheus.MustRegister(DomainsServed)
+ prometheus.MustRegister(DomainUpdates)
+ prometheus.MustRegister(DomainLastUpdateTime)
+ prometheus.MustRegister(ProcessedRequests)
+ prometheus.MustRegister(SessionsActive)
+}
diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE
new file mode 100644
index 00000000..339177be
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/LICENSE
@@ -0,0 +1,20 @@
+Copyright (C) 2013 Blake Mizerany
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 00000000..1602287d
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 00000000..f4cabd66
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,292 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+ "math"
+ "sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+ Value float64 `json:",string"`
+ Width float64 `json:",string"`
+ Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * r
+ }
+ return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ return 2 * epsilon * (s.n - r)
+ }
+ return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targets map[float64]float64) *Stream {
+ ƒ := func(s *stream, r float64) float64 {
+ var m = math.MaxFloat64
+ var f float64
+ for quantile, epsilon := range targets {
+ if quantile*s.n <= r {
+ f = (2 * epsilon * r) / quantile
+ } else {
+ f = (2 * epsilon * (s.n - r)) / (1 - quantile)
+ }
+ if f < m {
+ m = f
+ }
+ }
+ return m
+ }
+ return newStream(ƒ)
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+ *stream
+ b Samples
+ sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+ x := &stream{ƒ: ƒ}
+ return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+ s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+ s.b = append(s.b, sample)
+ s.sorted = false
+ if len(s.b) == cap(s.b) {
+ s.flush()
+ }
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+ if !s.flushed() {
+ // Fast path when there hasn't been enough data for a flush;
+ // this also yields better accuracy for small sets of data.
+ l := len(s.b)
+ if l == 0 {
+ return 0
+ }
+ i := int(math.Ceil(float64(l) * q))
+ if i > 0 {
+ i -= 1
+ }
+ s.maybeSort()
+ return s.b[i].Value
+ }
+ s.flush()
+ return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+ sort.Sort(samples)
+ s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+ s.stream.reset()
+ s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+ if !s.flushed() {
+ return s.b
+ }
+ s.flush()
+ return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+ return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+ s.maybeSort()
+ s.stream.merge(s.b)
+ s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+ if !s.sorted {
+ s.sorted = true
+ sort.Sort(s.b)
+ }
+}
+
+func (s *Stream) flushed() bool {
+ return len(s.stream.l) > 0
+}
+
+type stream struct {
+ n float64
+ l []Sample
+ ƒ invariant
+}
+
+func (s *stream) reset() {
+ s.l = s.l[:0]
+ s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+ s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+ // TODO(beorn7): This tries to merge not only individual samples, but
+ // whole summaries. The paper doesn't mention merging summaries at
+ // all. Unittests show that the merging is inaccurate. Find out how to
+ // do merges properly.
+ var r float64
+ i := 0
+ for _, sample := range samples {
+ for ; i < len(s.l); i++ {
+ c := s.l[i]
+ if c.Value > sample.Value {
+ // Insert at position i.
+ s.l = append(s.l, Sample{})
+ copy(s.l[i+1:], s.l[i:])
+ s.l[i] = Sample{
+ sample.Value,
+ sample.Width,
+ math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+ // TODO(beorn7): How to calculate delta correctly?
+ }
+ i++
+ goto inserted
+ }
+ r += c.Width
+ }
+ s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+ i++
+ inserted:
+ s.n += sample.Width
+ r += sample.Width
+ }
+ s.compress()
+}
+
+func (s *stream) count() int {
+ return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+ t := math.Ceil(q * s.n)
+ t += math.Ceil(s.ƒ(s, t) / 2)
+ p := s.l[0]
+ var r float64
+ for _, c := range s.l[1:] {
+ r += p.Width
+ if r+c.Width+c.Delta > t {
+ return p.Value
+ }
+ p = c
+ }
+ return p.Value
+}
+
+func (s *stream) compress() {
+ if len(s.l) < 2 {
+ return
+ }
+ x := s.l[len(s.l)-1]
+ xi := len(s.l) - 1
+ r := s.n - 1 - x.Width
+
+ for i := len(s.l) - 2; i >= 0; i-- {
+ c := s.l[i]
+ if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+ x.Width += c.Width
+ s.l[xi] = x
+ // Remove element at i.
+ copy(s.l[i:], s.l[i+1:])
+ s.l = s.l[:len(s.l)-1]
+ xi -= 1
+ } else {
+ x = c
+ xi = i
+ }
+ r -= c.Width
+ }
+}
+
+func (s *stream) samples() Samples {
+ samples := make(Samples, len(s.l))
+ copy(samples, s.l)
+ return samples
+}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 00000000..15167cd7
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 00000000..1c4577e9
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
index 6a66aea5..1b1b1921 100644
--- a/vendor/golang.org/x/crypto/LICENSE
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -1,16 +1,19 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
- * Redistributions of source code must retain the above copyright
+ * Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
+ * Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
@@ -25,3 +28,4 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile
new file mode 100644
index 00000000..e2e0651a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C testdata
+ protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
+ make
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 00000000..e392575b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,229 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := extendable(in.Addr().Interface()); ok {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 00000000..04dcb881
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,874 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ // x, n already 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ // x, err already 0
+
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ if required > 0 {
+ // Not enough information to determine the exact field.
+ // (See below.)
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ extmap := e.extensionsWrite()
+ ext := extmap[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ extmap[int32(tag)] = ext
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+
+ y := *v
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() {
+ keyelem = reflect.Zero(p.mtype.Key())
+ }
+ if !valelem.IsValid() {
+ valelem = reflect.Zero(p.mtype.Elem())
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 00000000..8c1b8fd1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,1363 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // errOneofHasNil is the error returned if Marshal is called with
+ // a struct with a oneof field containing a nil element.
+ errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // ErrTooLarge is the error returned if Marshal is called with a
+ // message that encodes to >2GB.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// maxMarshalSize is the largest allowed size of an encoded protobuf,
+// since C++ and Java use signed int32s for the size.
+const maxMarshalSize = 1<<31 - 1
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ return sizeVarint(x)
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ var state errorState
+ if err != nil && !state.shouldContinue(err, nil) {
+ return nil, err
+ }
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ if err != nil {
+ return err
+ }
+ p.buf = append(p.buf, data...)
+ return nil
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Encode++
+ }
+
+ if len(p.buf) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Size++
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v && !p.oneof {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ exts := structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionsMap(*exts); err != nil {
+ return err
+ }
+
+ return o.enc_map_body(*exts)
+}
+
+func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
+ exts := structPointer_Extensions(base, p.field)
+ if err := encodeExtensions(exts); err != nil {
+ return err
+ }
+ v, _ := exts.extensionsRead()
+
+ return o.enc_map_body(v)
+}
+
+func (o *Buffer) enc_map_body(v map[int32]Extension) error {
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := structPointer_ExtMap(base, p.field)
+ return extensionsMapSize(*v)
+}
+
+func size_exts(p *Properties, base structPointer) int {
+ v := structPointer_Extensions(base, p.field)
+ return extensionsSize(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map<key_type, value_type> map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
+ return err
+ }
+ return nil
+ }
+
+ // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ if len(o.buf) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err == ErrNil {
+ return errOneofHasNil
+ } else if err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(o.buf)+len(v) > maxMarshalSize {
+ return ErrTooLarge
+ }
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ if prop.oneofSizer != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ n += prop.oneofSizer(m)
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 00000000..8b16f951
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,296 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal (a "bytes" field,
+ although represented by []byte, is not a repeated field)
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_InternalExtensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ // Maps may have nil values in them, so check for nil.
+ if v1.IsNil() && v2.IsNil() {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+ em1, _ := x1.extensionsRead()
+ em2, _ := x2.extensionsRead()
+ return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 00000000..6b9b3637
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,586 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ extensionsWrite() map[int32]Extension
+ extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+ extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+ return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+ return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock() {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, bool) {
+ if ep, ok := p.(extendableProto); ok {
+ return ep, ok
+ }
+ if ep, ok := p.(extendableProtoV1); ok {
+ return extensionAdapter{ep}, ok
+ }
+ return nil, false
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+ // The struct must be indirect so that if a user inadvertently copies a
+ // generated message and its embedded XXX_InternalExtensions, they
+ // avoid the mayhem of a copied mutex.
+ //
+ // The mutex serializes all logically read-only operations to p.extensionMap.
+ // It is up to the client to ensure that write operations to p.extensionMap are
+ // mutually exclusive with other accesses.
+ p *struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+ if e.p == nil {
+ e.p = new(struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ })
+ e.p.extensionMap = make(map[int32]Extension)
+ }
+ return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use. It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+ if e.p == nil {
+ return nil, nil
+ }
+ return e.p.extensionMap, &e.p.mu
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+ epb, ok := extendable(base)
+ if !ok {
+ return
+ }
+ extmap := epb.extensionsWrite()
+ extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ var pbi interface{} = pb
+ // Check the extended type.
+ if ea, ok := pbi.(extensionAdapter); ok {
+ pbi = ea.extendableProtoV1
+ }
+ if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensions(e *XXX_InternalExtensions) error {
+ m, mu := e.extensionsRead()
+ if m == nil {
+ return nil // fast path
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ return encodeExtensionsMap(m)
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensionsMap(m map[int32]Extension) error {
+ for k, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ m[k] = e
+ }
+ return nil
+}
+
+func extensionsSize(e *XXX_InternalExtensions) (n int) {
+ m, mu := e.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ return extensionsMapSize(m)
+}
+
+func extensionsMapSize(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ epb, ok := extendable(pb)
+ if !ok {
+ return false
+ }
+ extmap, mu := epb.extensionsRead()
+ if extmap == nil {
+ return false
+ }
+ mu.Lock()
+ _, ok = extmap[extension.Field]
+ mu.Unlock()
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return
+ }
+ // TODO: Check types, field numbers, etc.?
+ extmap := epb.extensionsWrite()
+ delete(extmap, extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, errors.New("proto: not an extendable proto")
+ }
+
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return nil, err
+ }
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return defaultExtensionValue(extension)
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, errors.New("proto: not an extendable proto")
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
+ }
+ registeredExtensions := RegisteredExtensions(pb)
+
+ emap, mu := epb.extensionsRead()
+ if emap == nil {
+ return nil, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+ extensions := make([]*ExtensionDesc, 0, len(emap))
+ for extid, e := range emap {
+ desc := e.desc
+ if desc == nil {
+ desc = registeredExtensions[extid]
+ if desc == nil {
+ desc = &ExtensionDesc{Field: extid}
+ }
+ }
+
+ extensions = append(extensions, desc)
+ }
+ return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+ epb, ok := extendable(pb)
+ if !ok {
+ return errors.New("proto: not an extendable proto")
+ }
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ extmap := epb.extensionsWrite()
+ extmap[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+ epb, ok := extendable(pb)
+ if !ok {
+ return
+ }
+ m := epb.extensionsWrite()
+ for k := range m {
+ delete(m, k)
+ }
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 00000000..ac4ddbc0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,898 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Getters are only generated for message and oneof fields.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // read point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion2 = true
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 00000000..fd982dec
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,311 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ if err := encodeExtensions(exts); err != nil {
+ return nil, err
+ }
+ m, _ = exts.extensionsRead()
+ case map[int32]Extension:
+ if err := encodeExtensionsMap(exts); err != nil {
+ return nil, err
+ }
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m = exts.extensionsWrite()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return errors.New("proto: not an extension map")
+ }
+
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+ var m map[int32]Extension
+ switch exts := exts.(type) {
+ case *XXX_InternalExtensions:
+ m, _ = exts.extensionsRead()
+ case map[int32]Extension:
+ m = exts
+ default:
+ return nil, errors.New("proto: not an extension map")
+ }
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 00000000..fb512e2e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,484 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// Extensions returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+ return structPointer_ifield(p, f).(*XXX_InternalExtensions)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 00000000..6b5567d4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,270 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 00000000..69ddda8d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,864 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// A oneofSizer does the sizing for all oneof fields in a message.
+type oneofSizer func(Message) int
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ oneofSizer oneofSizer
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.dec = (*Buffer).dec_slice_byte
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ } else {
+ p.enc = (*Buffer).enc_slice_byte
+ p.size = size_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isMarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isMarshaler")
+ }
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isUnmarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isUnmarshaler")
+ }
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
+ reflect.PtrTo(t).Implements(extendableProtoV1Type)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_InternalExtensions" { // special case
+ p.enc = (*Buffer).enc_exts
+ p.dec = nil // not needed
+ p.size = size_exts
+ } else if f.Name == "XXX_extensions" { // special case
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ } else if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") // special case
+ if oneof != "" {
+ // Oneof fields don't use the traditional protobuf tag.
+ p.OrigName = oneof
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypes = make(map[string]reflect.Type)
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypes[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
+
+// A registry of all linked proto files.
+var (
+ protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+ protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 00000000..965876bf
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,854 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Print("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+ type wkt interface {
+ XXX_WellKnownType() string
+ }
+ t, ok := sv.Addr().Interface().(wkt)
+ return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+ turl := sv.FieldByName("TypeUrl")
+ val := sv.FieldByName("Value")
+ if !turl.IsValid() || !val.IsValid() {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ b, ok := val.Interface().([]byte)
+ if !ok {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ parts := strings.Split(turl.String(), "/")
+ mt := MessageType(parts[len(parts)-1])
+ if mt == nil {
+ return false, nil
+ }
+ m := reflect.New(mt.Elem())
+ if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ u := turl.String()
+ if requiresQuotes(u) {
+ writeString(w, u)
+ } else {
+ w.Write([]byte(u))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.ind++
+ }
+ if err := tm.writeStruct(w, m.Elem()); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.ind--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+ if tm.ExpandAny && isAny(sv) {
+ if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+ return err
+ }
+ }
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("<nil>\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := tm.writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if _, ok := extendable(pv.Interface()); ok {
+ if err := tm.writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Bytes())); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep, _ := extendable(pv.Interface())
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m, mu := ep.extensionsRead()
+ if m == nil {
+ return nil
+ }
+ mu.Lock()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ mu.Unlock()
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte("<nil>"))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: tm.Compact,
+ }
+
+ if etm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := tm.writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ tm.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 00000000..7e6f145a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,891 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension or an Any.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ extName, err := p.consumeExtName()
+ if err != nil {
+ return err
+ }
+
+ if s := strings.LastIndex(extName, "/"); s >= 0 {
+ // If it contains a slash, it's an Any type URL.
+ messageName := extName[s+1:]
+ mt := MessageType(messageName)
+ if mt == nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ v := reflect.New(mt.Elem())
+ if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+ return pe
+ }
+ b, err := Marshal(v.Interface().(Message))
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+ }
+ if fieldSet["type_url"] {
+ return p.errorf(anyRepeatedlyUnpacked, "type_url")
+ }
+ if fieldSet["value"] {
+ return p.errorf(anyRepeatedlyUnpacked, "value")
+ }
+ sv.FieldByName("TypeUrl").SetString(extName)
+ sv.FieldByName("Value").SetBytes(b)
+ fieldSet["type_url"] = true
+ fieldSet["value"] = true
+ continue
+ }
+
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == extName {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", extName)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(Message)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ sv.Field(oop.Field).Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order. See b/28924776 for a time
+ // this went wrong.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ case "value":
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ default:
+ p.back()
+ return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // Either "true", "false", 1 or 0.
+ switch tok.value {
+ case "true", "1":
+ fv.SetBool(true)
+ return nil
+ case "false", "0":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
new file mode 100644
index 00000000..8dada3ed
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
new file mode 100644
index 00000000..5d8cb5b7
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
@@ -0,0 +1 @@
+Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
new file mode 100644
index 00000000..e16fb946
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
@@ -0,0 +1 @@
+cover.dat
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
new file mode 100644
index 00000000..81be2143
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
@@ -0,0 +1,7 @@
+all:
+
+cover:
+ go test -cover -v -coverprofile=cover.dat ./...
+ go tool cover -func cover.dat
+
+.PHONY: cover
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
new file mode 100644
index 00000000..258c0636
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
@@ -0,0 +1,75 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+var errInvalidVarint = errors.New("invalid varint32 encountered")
+
+// ReadDelimited decodes a message from the provided length-delimited stream,
+// where the length is encoded as 32-bit varint prefix to the message body.
+// It returns the total number of bytes read and any applicable error. This is
+// roughly equivalent to the companion Java API's
+// MessageLite#parseDelimitedFrom. As per the reader contract, this function
+// calls r.Read repeatedly as required until exactly one message including its
+// prefix is read and decoded (or an error has occurred). The function never
+// reads more bytes from the stream than required. The function never returns
+// an error if a message has been read and decoded correctly, even if the end
+// of the stream has been reached in doing so. In that case, any subsequent
+// calls return (0, io.EOF).
+func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
+ // Per AbstractParser#parsePartialDelimitedFrom with
+ // CodedInputStream#readRawVarint32.
+ var headerBuf [binary.MaxVarintLen32]byte
+ var bytesRead, varIntBytes int
+ var messageLength uint64
+ for varIntBytes == 0 { // i.e. no varint has been decoded yet.
+ if bytesRead >= len(headerBuf) {
+ return bytesRead, errInvalidVarint
+ }
+ // We have to read byte by byte here to avoid reading more bytes
+ // than required. Each read byte is appended to what we have
+ // read before.
+ newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
+ if newBytesRead == 0 {
+ if err != nil {
+ return bytesRead, err
+ }
+ // A Reader should not return (0, nil), but if it does,
+ // it should be treated as no-op (according to the
+ // Reader contract). So let's go on...
+ continue
+ }
+ bytesRead += newBytesRead
+ // Now present everything read so far to the varint decoder and
+ // see if a varint can be decoded already.
+ messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
+ }
+
+ messageBuf := make([]byte, messageLength)
+ newBytesRead, err := io.ReadFull(r, messageBuf)
+ bytesRead += newBytesRead
+ if err != nil {
+ return bytesRead, err
+ }
+
+ return bytesRead, proto.Unmarshal(messageBuf, m)
+}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
new file mode 100644
index 00000000..c318385c
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil provides record length-delimited Protocol Buffer streaming.
+package pbutil
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
new file mode 100644
index 00000000..8fb59ad2
--- /dev/null
+++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
@@ -0,0 +1,46 @@
+// Copyright 2013 Matt T. Proud
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pbutil
+
+import (
+ "encoding/binary"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// WriteDelimited encodes and dumps a message to the provided writer prefixed
+// with a 32-bit varint indicating the length of the encoded message, producing
+// a length-delimited record stream, which can be used to chain together
+// encoded messages of the same type together in a file. It returns the total
+// number of bytes written and any applicable error. This is roughly
+// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
+func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
+ buffer, err := proto.Marshal(m)
+ if err != nil {
+ return 0, err
+ }
+
+ var buf [binary.MaxVarintLen32]byte
+ encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
+
+ sync, err := w.Write(buf[:encodedLength])
+ if err != nil {
+ return sync, err
+ }
+
+ n, err = w.Write(buffer)
+ return n + sync, err
+}
diff --git a/vendor/github.com/prometheus/client_golang/AUTHORS.md b/vendor/github.com/prometheus/client_golang/AUTHORS.md
new file mode 100644
index 00000000..c5275d5a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/AUTHORS.md
@@ -0,0 +1,18 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Björn Rabenstein <beorn@soundcloud.com>
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Bernerd Schaefer <bj.schaefer@gmail.com>
+* Björn Rabenstein <beorn@soundcloud.com>
+* Daniel Bornkessel <daniel@soundcloud.com>
+* Jeff Younker <jeff@drinktomi.com>
+* Julius Volz <julius.volz@gmail.com>
+* Matt T. Proud <matt.proud@gmail.com>
+* Tobias Schmidt <ts@soundcloud.com>
+
diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
new file mode 100644
index 00000000..dd878a30
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/NOTICE
@@ -0,0 +1,23 @@
+Prometheus instrumentation library for Go applications
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
+
+
+The following components are included in this product:
+
+perks - a fork of https://github.com/bmizerany/perks
+https://github.com/beorn7/perks
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
+See https://github.com/beorn7/perks/blob/master/README.md for license details.
+
+Go support for Protocol Buffers - Google's data interchange format
+http://github.com/golang/protobuf/
+Copyright 2010 The Go Authors
+See source code for license details.
+
+Support for streaming Protocol Buffer messages for the Go language (golang).
+https://github.com/matttproud/golang_protobuf_extensions
+Copyright 2013 Matt T. Proud
+Licensed under the Apache License, Version 2.0
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
new file mode 100644
index 00000000..3460f034
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
@@ -0,0 +1 @@
+command-line-arguments.test
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 00000000..44986bff
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1 @@
+See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 00000000..623d3d83
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Registerer.Register.
+//
+// The stock metrics provided by this package (Gauge, Counter, Summary,
+// Histogram, Untyped) are also Collectors (which only ever collect one metric,
+// namely itself). An implementer of Collector may, however, collect multiple
+// metrics in a coordinated fashion and/or create metrics on the fly. Examples
+// for collectors already implemented in this library are the metric vectors
+// (i.e. collection of multiple instances of the same Metric but with different
+// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+ // Describe sends the super-set of all possible descriptors of metrics
+ // collected by this Collector to the provided channel and returns once
+ // the last descriptor has been sent. The sent descriptors fulfill the
+ // consistency and uniqueness requirements described in the Desc
+ // documentation. (It is valid if one and the same Collector sends
+ // duplicate descriptors. Those duplicates are simply ignored. However,
+ // two different Collectors must not send duplicate descriptors.) This
+ // method idempotently sends the same descriptors throughout the
+ // lifetime of the Collector. If a Collector encounters an error while
+ // executing this method, it must send an invalid descriptor (created
+ // with NewInvalidDesc) to signal the error to the registry.
+ Describe(chan<- *Desc)
+ // Collect is called by the Prometheus registry when collecting
+ // metrics. The implementation sends each collected metric via the
+ // provided channel and returns once the last metric has been sent. The
+ // descriptor of each sent metric is one of those returned by
+ // Describe. Returned metrics that share the same descriptor must differ
+ // in their variable label values. This method may be called
+ // concurrently and must therefore be implemented in a concurrency safe
+ // way. Blocking occurs at the expense of total performance of rendering
+ // all registered metrics. Ideally, Collector implementations support
+ // concurrent readers.
+ Collect(chan<- Metric)
+}
+
+// selfCollector implements Collector for a single Metric so that the Metric
+// collects itself. Add it as an anonymous field to a struct that implements
+// Metric, and call init with the Metric itself as an argument.
+type selfCollector struct {
+ self Metric
+}
+
+// init provides the selfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *selfCollector) init(self Metric) {
+ c.self = self
+}
+
+// Describe implements Collector.
+func (c *selfCollector) Describe(ch chan<- *Desc) {
+ ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *selfCollector) Collect(ch chan<- Metric) {
+ ch <- c.self
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 00000000..ee37949a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,172 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+ Metric
+ Collector
+
+ // Set is used to set the Counter to an arbitrary value. It is only used
+ // if you have to transfer a value from an external counter into this
+ // Prometheus metric. Do not use it for regular handling of a
+ // Prometheus counter (as it can be used to break the contract of
+ // monotonically increasing values).
+ //
+ // Deprecated: Use NewConstMetric to create a counter for an external
+ // value. A Counter should never be set.
+ Set(float64)
+ // Inc increments the counter by 1.
+ Inc()
+ // Add adds the given value to the counter. It panics if the value is <
+ // 0.
+ Add(float64)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+func NewCounter(opts CounterOpts) Counter {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ )
+ result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
+ result.init(result) // Init self-collection.
+ return result
+}
+
+type counter struct {
+ value
+}
+
+func (c *counter) Add(v float64) {
+ if v < 0 {
+ panic(errors.New("counter cannot decrease in value"))
+ }
+ c.value.Add(v)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+//
+// CounterVec embeds MetricVec. See there for a full list of methods with
+// detailed documentation.
+type CounterVec struct {
+ *MetricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &CounterVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ result := &counter{value: value{
+ desc: desc,
+ valType: CounterValue,
+ labelPairs: makeLabelPairs(desc, lvs),
+ }}
+ result.init(result) // Init self-collection.
+ return result
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Counter and not a
+// Metric so that no type conversion is required.
+func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Counter and not a Metric so that no
+// type conversion is required.
+func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Counter), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
+ return m.MetricVec.WithLabelValues(lvs...).(Counter)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *CounterVec) With(labels Labels) Counter {
+ return m.MetricVec.With(labels).(Counter)
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+ Metric
+ Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), CounterValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 00000000..0593be50
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,205 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+var (
+ metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
+ labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+)
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+ // fqName has been built from Namespace, Subsystem, and Name.
+ fqName string
+ // help provides some helpful information about this metric.
+ help string
+ // constLabelPairs contains precalculated DTO label pairs based on
+ // the constant labels.
+ constLabelPairs []*dto.LabelPair
+ // VariableLabels contains names of labels for which the metric
+ // maintains variable values.
+ variableLabels []string
+ // id is a hash of the values of the ConstLabels and fqName. This
+ // must be unique among all registered descriptors and can therefore be
+ // used as an identifier of the descriptor.
+ id uint64
+ // dimHash is a hash of the label names (preset and variable) and the
+ // Help string. Each Desc with the same fqName must have the same
+ // dimHash.
+ dimHash uint64
+ // err is an error that occurred during construction. It is reported on
+ // registration time.
+ err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName and help must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Opts documentation for the implications of
+// constant labels.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ d := &Desc{
+ fqName: fqName,
+ help: help,
+ variableLabels: variableLabels,
+ }
+ if help == "" {
+ d.err = errors.New("empty help string")
+ return d
+ }
+ if !metricNameRE.MatchString(fqName) {
+ d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+ return d
+ }
+ // labelValues contains the label values of const labels (in order of
+ // their sorted label names) plus the fqName (at position 0).
+ labelValues := make([]string, 1, len(constLabels)+1)
+ labelValues[0] = fqName
+ labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNameSet := map[string]struct{}{}
+ // First add only the const label names and sort them...
+ for labelName := range constLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ sort.Strings(labelNames)
+ // ... so that we can now add const label values in the order of their names.
+ for _, labelName := range labelNames {
+ labelValues = append(labelValues, constLabels[labelName])
+ }
+ // Now add the variable label names, but prefix them with something that
+ // cannot be in a regular label name. That prevents matching the label
+ // dimension with a different mix between preset and variable labels.
+ for _, labelName := range variableLabels {
+ if !checkLabelName(labelName) {
+ d.err = fmt.Errorf("%q is not a valid label name", labelName)
+ return d
+ }
+ labelNames = append(labelNames, "$"+labelName)
+ labelNameSet[labelName] = struct{}{}
+ }
+ if len(labelNames) != len(labelNameSet) {
+ d.err = errors.New("duplicate label names")
+ return d
+ }
+ vh := hashNew()
+ for _, val := range labelValues {
+ vh = hashAdd(vh, val)
+ vh = hashAddByte(vh, separatorByte)
+ }
+ d.id = vh
+ // Sort labelNames so that order doesn't matter for the hash.
+ sort.Strings(labelNames)
+ // Now hash together (in this order) the help string and the sorted
+ // label names.
+ lh := hashNew()
+ lh = hashAdd(lh, help)
+ lh = hashAddByte(lh, separatorByte)
+ for _, labelName := range labelNames {
+ lh = hashAdd(lh, labelName)
+ lh = hashAddByte(lh, separatorByte)
+ }
+ d.dimHash = lh
+
+ d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+ for n, v := range constLabels {
+ d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(v),
+ })
+ }
+ sort.Sort(LabelPairSorter(d.constLabelPairs))
+ return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+ return &Desc{
+ err: err,
+ }
+}
+
+func (d *Desc) String() string {
+ lpStrings := make([]string, 0, len(d.constLabelPairs))
+ for _, lp := range d.constLabelPairs {
+ lpStrings = append(
+ lpStrings,
+ fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+ )
+ }
+ return fmt.Sprintf(
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+ d.fqName,
+ d.help,
+ strings.Join(lpStrings, ","),
+ d.variableLabels,
+ )
+}
+
+func checkLabelName(l string) bool {
+ return labelNameRE.MatchString(l) &&
+ !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 00000000..b15a2d3b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,181 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus provides metrics primitives to instrument code for
+// monitoring. It also offers a registry for metrics. Sub-packages allow to
+// expose the registered metrics via HTTP (package promhttp) or push them to a
+// Pushgateway (package push).
+//
+// All exported functions and methods are safe to be used concurrently unless
+//specified otherwise.
+//
+// A Basic Example
+//
+// As a starting point, a very basic usage example:
+//
+// package main
+//
+// import (
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
+// )
+//
+// var (
+// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// })
+// hdFailures = prometheus.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// },
+// []string{"device"},
+// )
+// )
+//
+// func init() {
+// // Metrics have to be registered to be exposed:
+// prometheus.MustRegister(cpuTemp)
+// prometheus.MustRegister(hdFailures)
+// }
+//
+// func main() {
+// cpuTemp.Set(65.3)
+// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+//
+// // The Handler function provides a default handler to expose metrics
+// // via an HTTP server. "/metrics" is the usual endpoint for that.
+// http.Handle("/metrics", promhttp.Handler())
+// http.ListenAndServe(":8080", nil)
+// }
+//
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter,
+// the latter with a label attached to turn it into a (one-dimensional) vector.
+//
+// Metrics
+//
+// The number of exported identifiers in this package might appear a bit
+// overwhelming. Hovever, in addition to the basic plumbing shown in the example
+// above, you only need to understand the different metric types and their
+// vector versions for basic usage.
+//
+// Above, you have already touched the Counter and the Gauge. There are two more
+// advanced metric types: the Summary and Histogram. A more thorough description
+// of those four metric types can be found in the Prometheus docs:
+// https://prometheus.io/docs/concepts/metric_types/
+//
+// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the
+// Prometheus server not to assume anything about its type.
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary,
+// Histogram, and Untyped, a very important part of the Prometheus data model is
+// the partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// HistogramVec, and UntypedVec.
+//
+// While only the fundamental metric types implement the Metric interface, both
+// the metrics and their vector versions implement the Collector interface. A
+// Collector manages the collection of a number of Metrics, but for convenience,
+// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,
+// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,
+// SummaryVec, HistogramVec, and UntypedVec are not.
+//
+// To create instances of Metrics and their vector versions, you need a suitable
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
+// HistogramOpts, or UntypedOpts.
+//
+// Custom Collectors and constant Metrics
+//
+// While you could create your own implementations of Metric, most likely you
+// will only ever implement the Collector interface on your own. At a first
+// glance, a custom Collector seems handy to bundle Metrics for common
+// registration (with the prime example of the different metric vectors above,
+// which bundle all the metrics of the same name but with different labels).
+//
+// There is a more involved use case, too: If you already have metrics
+// available, created outside of the Prometheus context, you don't need the
+// interface of the various Metric types. You essentially want to mirror the
+// existing numbers into Prometheus Metrics during collection. An own
+// implementation of the Collector interface is perfect for that. You can create
+// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
+// NewConstSummary (and their respective Must… versions). That will happen in
+// the Collect method. The Describe method has to return separate Desc
+// instances, representative of the “throw-away” metrics to be created
+// later. NewDesc comes in handy to create those Desc instances.
+//
+// The Collector example illustrates the use case. You can also look at the
+// source code of the processCollector (mirroring process metrics), the
+// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
+// metrics) as examples that are used in this package itself.
+//
+// If you just need to call a function to get a single float value to collect as
+// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
+// shortcuts.
+//
+// Advanced Uses of the Registry
+//
+// While MustRegister is the by far most common way of registering a Collector,
+// sometimes you might want to handle the errors the registration might
+// cause. As suggested by the name, MustRegister panics if an error occurs. With
+// the Register function, the error is returned and can be handled.
+//
+// An error is returned if the registered Collector is incompatible or
+// inconsistent with already registered metrics. The registry aims for
+// consistency of the collected metrics according to the Prometheus data
+// model. Inconsistencies are ideally detected at registration time, not at
+// collect time. The former will usually be detected at start-up time of a
+// program, while the latter will only happen at scrape time, possibly not even
+// on the first scrape if the inconsistency only becomes relevant later. That is
+// the main reason why a Collector and a Metric have to describe themselves to
+// the registry.
+//
+// So far, everything we did operated on the so-called default registry, as it
+// can be found in the global DefaultRegistry variable. With NewRegistry, you
+// can create a custom registry, or you can even implement the Registerer or
+// Gatherer interfaces yourself. The methods Register and Unregister work in
+// the same way on a custom registry as the global functions Register and
+// Unregister on the default registry.
+//
+// There are a number of uses for custom registries: You can use registries
+// with special properties, see NewPedanticRegistry. You can avoid global state,
+// as it is imposed by the DefaultRegistry. You can use multiple registries at
+// the same time to expose different metrics in different ways. You can use
+// separate registries for testing purposes.
+//
+// Also note that the DefaultRegistry comes registered with a Collector for Go
+// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
+// NewProcessCollector). With a custom registry, you are in control and decide
+// yourself about the Collectors to register.
+//
+// HTTP Exposition
+//
+// The Registry implements the Gatherer interface. The caller of the Gather
+// method can then expose the gathered metrics in some way. Usually, the metrics
+// are served via HTTP on the /metrics endpoint. That's happening in the example
+// above. The tools to expose metrics via HTTP are in the promhttp
+// sub-package. (The top-level functions in the prometheus package are
+// deprecated.)
+//
+// Pushing to the Pushgateway
+//
+// Function for pushing to the Pushgateway can be found in the push sub-package.
+//
+// Other Means of Exposition
+//
+// More ways of exposing metrics can easily be added. Sending metrics to
+// Graphite would be an example that will soon be implemented.
+package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
new file mode 100644
index 00000000..18a99d5f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
@@ -0,0 +1,119 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "encoding/json"
+ "expvar"
+)
+
+type expvarCollector struct {
+ exports map[string]*Desc
+}
+
+// NewExpvarCollector returns a newly allocated expvar Collector that still has
+// to be registered with a Prometheus registry.
+//
+// An expvar Collector collects metrics from the expvar interface. It provides a
+// quick way to expose numeric values that are already exported via expvar as
+// Prometheus metrics. Note that the data models of expvar and Prometheus are
+// fundamentally different, and that the expvar Collector is inherently slower
+// than native Prometheus metrics. Thus, the expvar Collector is probably great
+// for experiments and prototying, but you should seriously consider a more
+// direct implementation of Prometheus metrics for monitoring production
+// systems.
+//
+// The exports map has the following meaning:
+//
+// The keys in the map correspond to expvar keys, i.e. for every expvar key you
+// want to export as Prometheus metric, you need an entry in the exports
+// map. The descriptor mapped to each key describes how to export the expvar
+// value. It defines the name and the help string of the Prometheus metric
+// proxying the expvar value. The type will always be Untyped.
+//
+// For descriptors without variable labels, the expvar value must be a number or
+// a bool. The number is then directly exported as the Prometheus sample
+// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values
+// that are not numbers or bools are silently ignored.
+//
+// If the descriptor has one variable label, the expvar value must be an expvar
+// map. The keys in the expvar map become the various values of the one
+// Prometheus label. The values in the expvar map must be numbers or bools again
+// as above.
+//
+// For descriptors with more than one variable label, the expvar must be a
+// nested expvar map, i.e. where the values of the topmost map are maps again
+// etc. until a depth is reached that corresponds to the number of labels. The
+// leaves of that structure must be numbers or bools as above to serve as the
+// sample values.
+//
+// Anything that does not fit into the scheme above is silently ignored.
+func NewExpvarCollector(exports map[string]*Desc) Collector {
+ return &expvarCollector{
+ exports: exports,
+ }
+}
+
+// Describe implements Collector.
+func (e *expvarCollector) Describe(ch chan<- *Desc) {
+ for _, desc := range e.exports {
+ ch <- desc
+ }
+}
+
+// Collect implements Collector.
+func (e *expvarCollector) Collect(ch chan<- Metric) {
+ for name, desc := range e.exports {
+ var m Metric
+ expVar := expvar.Get(name)
+ if expVar == nil {
+ continue
+ }
+ var v interface{}
+ labels := make([]string, len(desc.variableLabels))
+ if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+ ch <- NewInvalidMetric(desc, err)
+ continue
+ }
+ var processValue func(v interface{}, i int)
+ processValue = func(v interface{}, i int) {
+ if i >= len(labels) {
+ copiedLabels := append(make([]string, 0, len(labels)), labels...)
+ switch v := v.(type) {
+ case float64:
+ m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+ case bool:
+ if v {
+ m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+ } else {
+ m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+ }
+ default:
+ return
+ }
+ ch <- m
+ return
+ }
+ vm, ok := v.(map[string]interface{})
+ if !ok {
+ return
+ }
+ for lv, val := range vm {
+ labels[i] = lv
+ processValue(val, i+1)
+ }
+ }
+ processValue(v, 0)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
new file mode 100644
index 00000000..e3b67df8
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
@@ -0,0 +1,29 @@
+package prometheus
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 00000000..8b70e514
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,140 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+ Metric
+ Collector
+
+ // Set sets the Gauge to an arbitrary value.
+ Set(float64)
+ // Inc increments the Gauge by 1.
+ Inc()
+ // Dec decrements the Gauge by 1.
+ Dec()
+ // Add adds the given value to the Gauge. (The value can be
+ // negative, resulting in a decrease of the Gauge.)
+ Add(float64)
+ // Sub subtracts the given value from the Gauge. (The value can be
+ // negative, resulting in an increase of the Gauge.)
+ Sub(float64)
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+func NewGauge(opts GaugeOpts) Gauge {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, 0)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+ *MetricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &GaugeVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newValue(desc, GaugeValue, 0, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Gauge and not a
+// Metric so that no type conversion is required.
+func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Gauge and not a Metric so that no
+// type conversion is required.
+func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Gauge), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+ return m.MetricVec.WithLabelValues(lvs...).(Gauge)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *GaugeVec) With(labels Labels) Gauge {
+ return m.MetricVec.With(labels).(Gauge)
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+ Metric
+ Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. If that results in concurrent calls to Write, like in the case
+// where a GaugeFunc is directly registered with Prometheus, the provided
+// function must be concurrency-safe.
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), GaugeValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 00000000..abc9d4ec
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,263 @@
+package prometheus
+
+import (
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "time"
+)
+
+type goCollector struct {
+ goroutines Gauge
+ gcDesc *Desc
+
+ // metrics to describe and collect
+ metrics memStatsMetrics
+}
+
+// NewGoCollector returns a collector which exports metrics about the current
+// go process.
+func NewGoCollector() Collector {
+ return &goCollector{
+ goroutines: NewGauge(GaugeOpts{
+ Namespace: "go",
+ Name: "goroutines",
+ Help: "Number of goroutines that currently exist.",
+ }),
+ gcDesc: NewDesc(
+ "go_gc_duration_seconds",
+ "A summary of the GC invocation durations.",
+ nil, nil),
+ metrics: memStatsMetrics{
+ {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes"),
+ "Number of bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes_total"),
+ "Total number of bytes allocated, even if freed.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("sys_bytes"),
+ "Number of bytes obtained by system. Sum of all system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("lookups_total"),
+ "Total number of pointer lookups.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mallocs_total"),
+ "Total number of mallocs.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("frees_total"),
+ "Total number of frees.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_alloc_bytes"),
+ "Number of heap bytes allocated and still in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_sys_bytes"),
+ "Number of heap bytes obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_idle_bytes"),
+ "Number of heap bytes waiting to be used.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_inuse_bytes"),
+ "Number of heap bytes that are in use.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_released_bytes_total"),
+ "Total number of heap bytes released to OS.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_objects"),
+ "Number of allocated objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_inuse_bytes"),
+ "Number of bytes in use by the stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_sys_bytes"),
+ "Number of bytes obtained from system for stack allocator.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_inuse_bytes"),
+ "Number of bytes in use by mspan structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_sys_bytes"),
+ "Number of bytes used for mspan structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_inuse_bytes"),
+ "Number of bytes in use by mcache structures.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_sys_bytes"),
+ "Number of bytes used for mcache structures obtained from system.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("buck_hash_sys_bytes"),
+ "Number of bytes used by the profiling bucket hash table.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_sys_bytes"),
+ "Number of bytes used for garbage collection system metadata.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("other_sys_bytes"),
+ "Number of bytes used for other system allocations.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("next_gc_bytes"),
+ "Number of heap bytes when next garbage collection will take place.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("last_gc_time_seconds"),
+ "Number of seconds since 1970 of last garbage collection.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
+ valType: GaugeValue,
+ },
+ },
+ }
+}
+
+func memstatNamespace(s string) string {
+ return fmt.Sprintf("go_memstats_%s", s)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ ch <- c.goroutines.Desc()
+ ch <- c.gcDesc
+
+ for _, i := range c.metrics {
+ ch <- i.desc
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ c.goroutines.Set(float64(runtime.NumGoroutine()))
+ ch <- c.goroutines
+
+ var stats debug.GCStats
+ stats.PauseQuantiles = make([]time.Duration, 5)
+ debug.ReadGCStats(&stats)
+
+ quantiles := make(map[float64]float64)
+ for idx, pq := range stats.PauseQuantiles[1:] {
+ quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+ }
+ quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+ ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
+
+ ms := &runtime.MemStats{}
+ runtime.ReadMemStats(ms)
+ for _, i := range c.metrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+ }
+}
+
+// memStatsMetrics provide description, value, and value type for memstat metrics.
+type memStatsMetrics []struct {
+ desc *Desc
+ eval func(*runtime.MemStats) float64
+ valType ValueType
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 00000000..9719e8fa
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,444 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable buckets. Similar to a summary, it also provides a sum of
+// observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile function in the query language.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated with the
+// Prometheus query language (see the documentation for detailed
+// procedures). However, Histograms require the user to pre-define suitable
+// buckets, and they are in general less accurate. The Observe method of a
+// Histogram has a very low performance overhead in comparison with the Observe
+// method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the histogram.
+ Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+// DefBuckets are the default Histogram buckets. The default buckets are
+// tailored to broadly measure the response time (in seconds) of a network
+// service. Most likely, however, you will be required to define buckets
+// customized to your use case.
+var (
+ DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+ errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
+ )
+)
+
+// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
+// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+ if count < 1 {
+ panic("LinearBuckets needs a positive count")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start += width
+ }
+ return buckets
+}
+
+// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
+// upper bound of 'start' and each following bucket's upper bound is 'factor'
+// times the previous bucket's upper bound. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBuckets needs a positive count")
+ }
+ if start <= 0 {
+ panic("ExponentialBuckets needs a positive start value")
+ }
+ if factor <= 1 {
+ panic("ExponentialBuckets needs a factor greater than 1")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start *= factor
+ }
+ return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type HistogramOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Histogram (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Histogram must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Histogram. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Histogram. Histograms with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // HistogramVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Histograms with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Buckets defines the buckets into which observations are counted. Each
+ // element in the slice is the upper inclusive bound of a bucket. The
+ // values must be sorted in strictly increasing order. There is no need
+ // to add a highest bucket with +Inf bound, it will be added
+ // implicitly. The default value is DefBuckets.
+ Buckets []float64
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+func NewHistogram(opts HistogramOpts) Histogram {
+ return newHistogram(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == bucketLabel {
+ panic(errBucketLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Buckets) == 0 {
+ opts.Buckets = DefBuckets
+ }
+
+ h := &histogram{
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ for i, upperBound := range h.upperBounds {
+ if i < len(h.upperBounds)-1 {
+ if upperBound >= h.upperBounds[i+1] {
+ panic(fmt.Errorf(
+ "histogram buckets must be in increasing order: %f >= %f",
+ upperBound, h.upperBounds[i+1],
+ ))
+ }
+ } else {
+ if math.IsInf(upperBound, +1) {
+ // The +Inf bucket is implicit. Remove it here.
+ h.upperBounds = h.upperBounds[:i]
+ }
+ }
+ }
+ // Finally we know the final length of h.upperBounds and can make counts.
+ h.counts = make([]uint64, len(h.upperBounds))
+
+ h.init(h) // Init self-collection.
+ return h
+}
+
+type histogram struct {
+ // sumBits contains the bits of the float64 representing the sum of all
+ // observations. sumBits and count have to go first in the struct to
+ // guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ sumBits uint64
+ count uint64
+
+ selfCollector
+ // Note that there is no mutex required.
+
+ desc *Desc
+
+ upperBounds []float64
+ counts []uint64
+
+ labelPairs []*dto.LabelPair
+}
+
+func (h *histogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+ // TODO(beorn7): For small numbers of buckets (<30), a linear search is
+ // slightly faster than the binary search. If we really care, we could
+ // switch from one search strategy to the other depending on the number
+ // of buckets.
+ //
+ // Microbenchmarks (BenchmarkHistogramNoLabels):
+ // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
+ // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
+ // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+ i := sort.SearchFloat64s(h.upperBounds, v)
+ if i < len(h.counts) {
+ atomic.AddUint64(&h.counts[i], 1)
+ }
+ atomic.AddUint64(&h.count, 1)
+ for {
+ oldBits := atomic.LoadUint64(&h.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
+ break
+ }
+ }
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, len(h.upperBounds))
+
+ his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
+ his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
+ var count uint64
+ for i, upperBound := range h.upperBounds {
+ count += atomic.LoadUint64(&h.counts[i])
+ buckets[i] = &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ }
+ }
+ his.Bucket = buckets
+ out.Histogram = his
+ out.Label = h.labelPairs
+ return nil
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+ *MetricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &HistogramVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newHistogram(desc, opts, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Histogram and not a
+// Metric so that no type conversion is required.
+func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Histogram and not a Metric so that no
+// type conversion is required.
+func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Histogram), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
+ return m.MetricVec.WithLabelValues(lvs...).(Histogram)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *HistogramVec) With(labels Labels) Histogram {
+ return m.MetricVec.With(labels).(Histogram)
+}
+
+type constHistogram struct {
+ desc *Desc
+ count uint64
+ sum float64
+ buckets map[float64]uint64
+ labelPairs []*dto.LabelPair
+}
+
+func (h *constHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+ his := &dto.Histogram{}
+ buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+ his.SampleCount = proto.Uint64(h.count)
+ his.SampleSum = proto.Float64(h.sum)
+
+ for upperBound, count := range h.buckets {
+ buckets = append(buckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(count),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+
+ if len(buckets) > 0 {
+ sort.Sort(buckSort(buckets))
+ }
+ his.Bucket = buckets
+
+ out.Histogram = his
+ out.Label = h.labelPairs
+
+ return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+ return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+ return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go
new file mode 100644
index 00000000..b2b00196
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go
@@ -0,0 +1,499 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/prometheus/common/expfmt"
+)
+
+// TODO(beorn7): Remove this whole file. It is a partial mirror of
+// promhttp/http.go (to avoid circular import chains) where everything HTTP
+// related should live. The functions here are just for avoiding
+// breakage. Everything is deprecated.
+
+const (
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+)
+
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+ buf := bufPool.Get()
+ if buf == nil {
+ return &bytes.Buffer{}
+ }
+ return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ bufPool.Put(buf)
+}
+
+// Handler returns an HTTP handler for the DefaultGatherer. It is
+// already instrumented with InstrumentHandler (using "prometheus" as handler
+// name).
+//
+// Deprecated: Please note the issues described in the doc comment of
+// InstrumentHandler. You might want to consider using promhttp.Handler instead
+// (which is non instrumented).
+func Handler() http.Handler {
+ return InstrumentHandler("prometheus", UninstrumentedHandler())
+}
+
+// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
+//
+// Deprecated: Use promhttp.Handler instead. See there for further documentation.
+func UninstrumentedHandler() http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ mfs, err := DefaultGatherer.Gather()
+ if err != nil {
+ http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ contentType := expfmt.Negotiate(req.Header)
+ buf := getBuf()
+ defer giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf)
+ enc := expfmt.NewEncoder(writer, contentType)
+ var lastErr error
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ lastErr = err
+ http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ if lastErr != nil && buf.Len() == 0 {
+ http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+ })
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
+
+var instLabels = []string{"method", "code"}
+
+type nower interface {
+ Now() time.Time
+}
+
+type nowFunc func() time.Time
+
+func (n nowFunc) Now() time.Time {
+ return n()
+}
+
+var now nower = nowFunc(func() time.Time {
+ return time.Now()
+})
+
+func nowSeries(t ...time.Time) nower {
+ return nowFunc(func() time.Time {
+ defer func() {
+ t = t[1:]
+ }()
+
+ return t[0]
+ })
+}
+
+// InstrumentHandler wraps the given HTTP handler for instrumentation. It
+// registers four metric collectors (if not already done) and reports HTTP
+// metrics to the (newly or already) registered collectors: http_requests_total
+// (CounterVec), http_request_duration_microseconds (Summary),
+// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
+// has a constant label named "handler" with the provided handlerName as
+// value. http_requests_total is a metric vector partitioned by HTTP method
+// (label name "method") and HTTP status code (label name "code").
+//
+// Deprecated: InstrumentHandler has several issues:
+//
+// - It uses Summaries rather than Histograms. Summaries are not useful if
+// aggregation across multiple instances is required.
+//
+// - It uses microseconds as unit, which is deprecated and should be replaced by
+// seconds.
+//
+// - The size of the request is calculated in a separate goroutine. Since this
+// calculator requires access to the request header, it creates a race with
+// any writes to the header performed during request handling.
+// httputil.ReverseProxy is a prominent example for a handler
+// performing such writes.
+//
+// Upcoming versions of this package will provide ways of instrumenting HTTP
+// handlers that are more flexible and have fewer issues. Please prefer direct
+// instrumentation in the meantime.
+func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFunc wraps the given function for instrumentation. It
+// otherwise works in the same way as InstrumentHandler (and shares the same
+// issues).
+//
+// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
+// InstrumentHandler is.
+func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(
+ SummaryOpts{
+ Subsystem: "http",
+ ConstLabels: Labels{"handler": handlerName},
+ },
+ handlerFunc,
+ )
+}
+
+// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
+// issues) but provides more flexibility (at the cost of a more complex call
+// syntax). As InstrumentHandler, this function registers four metric
+// collectors, but it uses the provided SummaryOpts to create them. However, the
+// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
+// by "requests_total", "request_duration_microseconds", "request_size_bytes",
+// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
+// help string. The names of the variable labels of the http_requests_total
+// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
+//
+// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
+// behavior of InstrumentHandler:
+//
+// prometheus.InstrumentHandlerWithOpts(
+// prometheus.SummaryOpts{
+// Subsystem: "http",
+// ConstLabels: prometheus.Labels{"handler": handlerName},
+// },
+// handler,
+// )
+//
+// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
+// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
+// and all its fields are set to the equally named fields in the provided
+// SummaryOpts.
+//
+// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
+// InstrumentHandler is.
+func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
+ return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
+}
+
+// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
+// the same issues) but provides more flexibility (at the cost of a more complex
+// call syntax). See InstrumentHandlerWithOpts for details how the provided
+// SummaryOpts are used.
+//
+// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
+// as InstrumentHandler is.
+func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
+ reqCnt := NewCounterVec(
+ CounterOpts{
+ Namespace: opts.Namespace,
+ Subsystem: opts.Subsystem,
+ Name: "requests_total",
+ Help: "Total number of HTTP requests made.",
+ ConstLabels: opts.ConstLabels,
+ },
+ instLabels,
+ )
+
+ opts.Name = "request_duration_microseconds"
+ opts.Help = "The HTTP request latencies in microseconds."
+ reqDur := NewSummary(opts)
+
+ opts.Name = "request_size_bytes"
+ opts.Help = "The HTTP request sizes in bytes."
+ reqSz := NewSummary(opts)
+
+ opts.Name = "response_size_bytes"
+ opts.Help = "The HTTP response sizes in bytes."
+ resSz := NewSummary(opts)
+
+ regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
+ regReqDur := MustRegisterOrGet(reqDur).(Summary)
+ regReqSz := MustRegisterOrGet(reqSz).(Summary)
+ regResSz := MustRegisterOrGet(resSz).(Summary)
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+
+ delegate := &responseWriterDelegator{ResponseWriter: w}
+ out := computeApproximateRequestSize(r)
+
+ _, cn := w.(http.CloseNotifier)
+ _, fl := w.(http.Flusher)
+ _, hj := w.(http.Hijacker)
+ _, rf := w.(io.ReaderFrom)
+ var rw http.ResponseWriter
+ if cn && fl && hj && rf {
+ rw = &fancyResponseWriterDelegator{delegate}
+ } else {
+ rw = delegate
+ }
+ handlerFunc(rw, r)
+
+ elapsed := float64(time.Since(now)) / float64(time.Microsecond)
+
+ method := sanitizeMethod(r.Method)
+ code := sanitizeCode(delegate.status)
+ regReqCnt.WithLabelValues(method, code).Inc()
+ regReqDur.Observe(elapsed)
+ regResSz.Observe(float64(delegate.written))
+ regReqSz.Observe(float64(<-out))
+ })
+}
+
+func computeApproximateRequestSize(r *http.Request) <-chan int {
+ // Get URL length in current go routine for avoiding a race condition.
+ // HandlerFunc that runs in parallel may modify the URL.
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
+ }
+
+ out := make(chan int, 1)
+
+ go func() {
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ out <- s
+ close(out)
+ }()
+
+ return out
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type fancyResponseWriterDelegator struct {
+ *responseWriterDelegator
+}
+
+func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
+ return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+func (f *fancyResponseWriterDelegator) Flush() {
+ f.ResponseWriter.(http.Flusher).Flush()
+}
+
+func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return f.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
+ if !f.wroteHeader {
+ f.WriteHeader(http.StatusOK)
+ }
+ n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
+ f.written += n
+ return n, err
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 00000000..d4063d98
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,166 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const separatorByte byte = 255
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementations of Metric in this package are Gauge, Counter,
+// Histogram, Summary, and Untyped.
+type Metric interface {
+ // Desc returns the descriptor for the Metric. This method idempotently
+ // returns the same descriptor throughout the lifetime of the
+ // Metric. The returned descriptor is immutable by contract. A Metric
+ // unable to describe itself must return an invalid descriptor (created
+ // with NewInvalidDesc).
+ Desc() *Desc
+ // Write encodes the Metric into a "Metric" Protocol Buffer data
+ // transmission object.
+ //
+ // Metric implementations must observe concurrency safety as reads of
+ // this metric may occur at any time, and any blocking occurs at the
+ // expense of total performance of rendering all registered
+ // metrics. Ideally, Metric implementations should support concurrent
+ // readers.
+ //
+ // While populating dto.Metric, it is the responsibility of the
+ // implementation to ensure validity of the Metric protobuf (like valid
+ // UTF-8 strings or syntactically valid metric and label names). It is
+ // recommended to sort labels lexicographically. (Implementers may find
+ // LabelPairSorter useful for that.) Callers of Write should still make
+ // sure of sorting if they depend on it.
+ Write(*dto.Metric) error
+ // TODO(beorn7): The original rationale of passing in a pre-allocated
+ // dto.Metric protobuf to save allocations has disappeared. The
+ // signature of this method should be changed to "Write() (*dto.Metric,
+ // error)".
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just be
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name and Help to a non-empty string. All other fields
+// are optional and can safely be left at their zero value.
+type Opts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Metric (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the metric must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this metric. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a metric
+ // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
+ // serve only special purposes. One is for the special case where the
+ // value of a label does not change during the lifetime of a process,
+ // e.g. if the revision of the running binary is put into a
+ // label. Another, more advanced purpose is if more than one Collector
+ // needs to collect Metrics with the same fully-qualified name. In that
+ // case, those Metrics must differ in the values of their
+ // ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+ if name == "" {
+ return ""
+ }
+ switch {
+ case namespace != "" && subsystem != "":
+ return strings.Join([]string{namespace, subsystem, name}, "_")
+ case namespace != "":
+ return strings.Join([]string{namespace, name}, "_")
+ case subsystem != "":
+ return strings.Join([]string{subsystem, name}, "_")
+ }
+ return name
+}
+
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers. This is useful for implementing the Write method of
+// custom metrics.
+type LabelPairSorter []*dto.LabelPair
+
+func (s LabelPairSorter) Len() int {
+ return len(s)
+}
+
+func (s LabelPairSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s LabelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+type hashSorter []uint64
+
+func (s hashSorter) Len() int {
+ return len(s)
+}
+
+func (s hashSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s hashSorter) Less(i, j int) bool {
+ return s[i] < s[j]
+}
+
+type invalidMetric struct {
+ desc *Desc
+ err error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+ return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 00000000..e31e62e7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "github.com/prometheus/procfs"
+
+type processCollector struct {
+ pid int
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ cpuTotal Counter
+ openFDs, maxFDs Gauge
+ vsize, rss Gauge
+ startTime Gauge
+}
+
+// NewProcessCollector returns a collector which exports the current state of
+// process metrics including cpu, memory and file descriptor usage as well as
+// the process start time for the given process id under the given namespace.
+func NewProcessCollector(pid int, namespace string) Collector {
+ return NewProcessCollectorPIDFn(
+ func() (int, error) { return pid, nil },
+ namespace,
+ )
+}
+
+// NewProcessCollectorPIDFn returns a collector which exports the current state
+// of process metrics including cpu, memory and file descriptor usage as well
+// as the process start time under the given namespace. The given pidFn is
+// called on each collect and is used to determine the process to export
+// metrics for.
+func NewProcessCollectorPIDFn(
+ pidFn func() (int, error),
+ namespace string,
+) Collector {
+ c := processCollector{
+ pidFn: pidFn,
+ collectFn: func(chan<- Metric) {},
+
+ cpuTotal: NewCounter(CounterOpts{
+ Namespace: namespace,
+ Name: "process_cpu_seconds_total",
+ Help: "Total user and system CPU time spent in seconds.",
+ }),
+ openFDs: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_open_fds",
+ Help: "Number of open file descriptors.",
+ }),
+ maxFDs: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_max_fds",
+ Help: "Maximum number of open file descriptors.",
+ }),
+ vsize: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_virtual_memory_bytes",
+ Help: "Virtual memory size in bytes.",
+ }),
+ rss: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_resident_memory_bytes",
+ Help: "Resident memory size in bytes.",
+ }),
+ startTime: NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Name: "process_start_time_seconds",
+ Help: "Start time of the process since unix epoch in seconds.",
+ }),
+ }
+
+ // Set up process metric collection if supported by the runtime.
+ if _, err := procfs.NewStat(); err == nil {
+ c.collectFn = c.processCollect
+ }
+
+ return &c
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal.Desc()
+ ch <- c.openFDs.Desc()
+ ch <- c.maxFDs.Desc()
+ ch <- c.vsize.Desc()
+ ch <- c.rss.Desc()
+ ch <- c.startTime.Desc()
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+ c.collectFn(ch)
+}
+
+// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
+// client allows users to configure the error behavior.
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ pid, err := c.pidFn()
+ if err != nil {
+ return
+ }
+
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ return
+ }
+
+ if stat, err := p.NewStat(); err == nil {
+ c.cpuTotal.Set(stat.CPUTime())
+ ch <- c.cpuTotal
+ c.vsize.Set(float64(stat.VirtualMemory()))
+ ch <- c.vsize
+ c.rss.Set(float64(stat.ResidentMemory()))
+ ch <- c.rss
+
+ if startTime, err := stat.StartTime(); err == nil {
+ c.startTime.Set(startTime)
+ ch <- c.startTime
+ }
+ }
+
+ if fds, err := p.FileDescriptorsLen(); err == nil {
+ c.openFDs.Set(float64(fds))
+ ch <- c.openFDs
+ }
+
+ if limits, err := p.NewLimits(); err == nil {
+ c.maxFDs.Set(float64(limits.OpenFiles))
+ ch <- c.maxFDs
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
new file mode 100644
index 00000000..b6dd5a26
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -0,0 +1,201 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright (c) 2013, The Prometheus Authors
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file.
+
+// Package promhttp contains functions to create http.Handler instances to
+// expose Prometheus metrics via HTTP. In later versions of this package, it
+// will also contain tooling to instrument instances of http.Handler and
+// http.RoundTripper.
+//
+// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor,
+// you can create a handler for a custom registry or anything that implements
+// the Gatherer interface. It also allows to create handlers that act
+// differently on errors or allow to log errors.
+package promhttp
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "sync"
+
+ "github.com/prometheus/common/expfmt"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ contentTypeHeader = "Content-Type"
+ contentLengthHeader = "Content-Length"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+)
+
+var bufPool sync.Pool
+
+func getBuf() *bytes.Buffer {
+ buf := bufPool.Get()
+ if buf == nil {
+ return &bytes.Buffer{}
+ }
+ return buf.(*bytes.Buffer)
+}
+
+func giveBuf(buf *bytes.Buffer) {
+ buf.Reset()
+ bufPool.Put(buf)
+}
+
+// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
+// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
+// error, no error logging, and compression if requested by the client.
+//
+// If you want to create a Handler for the DefaultGatherer with different
+// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
+// your desired HandlerOpts.
+func Handler() http.Handler {
+ return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})
+}
+
+// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
+// of the Handler is defined by the provided HandlerOpts.
+func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ mfs, err := reg.Gather()
+ if err != nil {
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error gathering metrics:", err)
+ }
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ if len(mfs) == 0 {
+ http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ case HTTPErrorOnError:
+ http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+
+ contentType := expfmt.Negotiate(req.Header)
+ buf := getBuf()
+ defer giveBuf(buf)
+ writer, encoding := decorateWriter(req, buf, opts.DisableCompression)
+ enc := expfmt.NewEncoder(writer, contentType)
+ var lastErr error
+ for _, mf := range mfs {
+ if err := enc.Encode(mf); err != nil {
+ lastErr = err
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error encoding metric family:", err)
+ }
+ switch opts.ErrorHandling {
+ case PanicOnError:
+ panic(err)
+ case ContinueOnError:
+ // Handled later.
+ case HTTPErrorOnError:
+ http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+ }
+ if closer, ok := writer.(io.Closer); ok {
+ closer.Close()
+ }
+ if lastErr != nil && buf.Len() == 0 {
+ http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ header := w.Header()
+ header.Set(contentTypeHeader, string(contentType))
+ header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
+ if encoding != "" {
+ header.Set(contentEncodingHeader, encoding)
+ }
+ w.Write(buf.Bytes())
+ // TODO(beorn7): Consider streaming serving of metrics.
+ })
+}
+
+// HandlerErrorHandling defines how a Handler serving metrics will handle
+// errors.
+type HandlerErrorHandling int
+
+// These constants cause handlers serving metrics to behave as described if
+// errors are encountered.
+const (
+ // Serve an HTTP status code 500 upon the first error
+ // encountered. Report the error message in the body.
+ HTTPErrorOnError HandlerErrorHandling = iota
+ // Ignore errors and try to serve as many metrics as possible. However,
+ // if no metrics can be served, serve an HTTP status code 500 and the
+ // last error message in the body. Only use this in deliberate "best
+ // effort" metrics collection scenarios. It is recommended to at least
+ // log errors (by providing an ErrorLog in HandlerOpts) to not mask
+ // errors completely.
+ ContinueOnError
+ // Panic upon the first error encountered (useful for "crash only" apps).
+ PanicOnError
+)
+
+// Logger is the minimal interface HandlerOpts needs for logging. Note that
+// log.Logger from the standard library implements this interface, and it is
+// easy to implement by custom loggers, if they don't do so already anyway.
+type Logger interface {
+ Println(v ...interface{})
+}
+
+// HandlerOpts specifies options how to serve metrics via an http.Handler. The
+// zero value of HandlerOpts is a reasonable default.
+type HandlerOpts struct {
+ // ErrorLog specifies an optional logger for errors collecting and
+ // serving metrics. If nil, errors are not logged at all.
+ ErrorLog Logger
+ // ErrorHandling defines how errors are handled. Note that errors are
+ // logged regardless of the configured ErrorHandling provided ErrorLog
+ // is not nil.
+ ErrorHandling HandlerErrorHandling
+ // If DisableCompression is true, the handler will never compress the
+ // response, even if requested by the client.
+ DisableCompression bool
+}
+
+// decorateWriter wraps a writer to handle gzip compression if requested. It
+// returns the decorated writer and the appropriate "Content-Encoding" header
+// (which is empty if no compression is enabled).
+func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) {
+ if compressionDisabled {
+ return writer, ""
+ }
+ header := request.Header.Get(acceptEncodingHeader)
+ parts := strings.Split(header, ",")
+ for _, part := range parts {
+ part := strings.TrimSpace(part)
+ if part == "gzip" || strings.HasPrefix(part, "gzip;") {
+ return gzip.NewWriter(writer), "gzip"
+ }
+ }
+ return writer, ""
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 00000000..405d6ddf
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,806 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "sort"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+const (
+ // Capacity for the channel to collect metrics and descriptors.
+ capMetricChan = 1000
+ capDescChan = 10
+)
+
+// DefaultRegisterer and DefaultGatherer are the implementations of the
+// Registerer and Gatherer interface a number of convenience functions in this
+// package act on. Initially, both variables point to the same Registry, which
+// has a process collector (see NewProcessCollector) and a Go collector (see
+// NewGoCollector) already registered. This approach to keep default instances
+// as global state mirrors the approach of other packages in the Go standard
+// library. Note that there are caveats. Change the variables with caution and
+// only if you understand the consequences. Users who want to avoid global state
+// altogether should not use the convenience function and act on custom
+// instances instead.
+var (
+ defaultRegistry = NewRegistry()
+ DefaultRegisterer Registerer = defaultRegistry
+ DefaultGatherer Gatherer = defaultRegistry
+)
+
+func init() {
+ MustRegister(NewProcessCollector(os.Getpid(), ""))
+ MustRegister(NewGoCollector())
+}
+
+// NewRegistry creates a new vanilla Registry without any Collectors
+// pre-registered.
+func NewRegistry() *Registry {
+ return &Registry{
+ collectorsByID: map[uint64]Collector{},
+ descIDs: map[uint64]struct{}{},
+ dimHashesByName: map[string]uint64{},
+ }
+}
+
+// NewPedanticRegistry returns a registry that checks during collection if each
+// collected Metric is consistent with its reported Desc, and if the Desc has
+// actually been registered with the registry.
+//
+// Usually, a Registry will be happy as long as the union of all collected
+// Metrics is consistent and valid even if some metrics are not consistent with
+// their own Desc or a Desc provided by their registered Collector. Well-behaved
+// Collectors and Metrics will only provide consistent Descs. This Registry is
+// useful to test the implementation of Collectors and Metrics.
+func NewPedanticRegistry() *Registry {
+ r := NewRegistry()
+ r.pedanticChecksEnabled = true
+ return r
+}
+
+// Registerer is the interface for the part of a registry in charge of
+// registering and unregistering. Users of custom registries should use
+// Registerer as type for registration purposes (rather then the Registry type
+// directly). In that way, they are free to use custom Registerer implementation
+// (e.g. for testing purposes).
+type Registerer interface {
+ // Register registers a new Collector to be included in metrics
+ // collection. It returns an error if the descriptors provided by the
+ // Collector are invalid or if they — in combination with descriptors of
+ // already registered Collectors — do not fulfill the consistency and
+ // uniqueness criteria described in the documentation of metric.Desc.
+ //
+ // If the provided Collector is equal to a Collector already registered
+ // (which includes the case of re-registering the same Collector), the
+ // returned error is an instance of AlreadyRegisteredError, which
+ // contains the previously registered Collector.
+ //
+ // It is in general not safe to register the same Collector multiple
+ // times concurrently.
+ Register(Collector) error
+ // MustRegister works like Register but registers any number of
+ // Collectors and panics upon the first registration that causes an
+ // error.
+ MustRegister(...Collector)
+ // Unregister unregisters the Collector that equals the Collector passed
+ // in as an argument. (Two Collectors are considered equal if their
+ // Describe method yields the same set of descriptors.) The function
+ // returns whether a Collector was unregistered.
+ //
+ // Note that even after unregistering, it will not be possible to
+ // register a new Collector that is inconsistent with the unregistered
+ // Collector, e.g. a Collector collecting metrics with the same name but
+ // a different help string. The rationale here is that the same registry
+ // instance must only collect consistent metrics throughout its
+ // lifetime.
+ Unregister(Collector) bool
+}
+
+// Gatherer is the interface for the part of a registry in charge of gathering
+// the collected metrics into a number of MetricFamilies. The Gatherer interface
+// comes with the same general implication as described for the Registerer
+// interface.
+type Gatherer interface {
+ // Gather calls the Collect method of the registered Collectors and then
+ // gathers the collected metrics into a lexicographically sorted slice
+ // of MetricFamily protobufs. Even if an error occurs, Gather attempts
+ // to gather as many metrics as possible. Hence, if a non-nil error is
+ // returned, the returned MetricFamily slice could be nil (in case of a
+ // fatal error that prevented any meaningful metric collection) or
+ // contain a number of MetricFamily protobufs, some of which might be
+ // incomplete, and some might be missing altogether. The returned error
+ // (which might be a MultiError) explains the details. In scenarios
+ // where complete collection is critical, the returned MetricFamily
+ // protobufs should be disregarded if the returned error is non-nil.
+ Gather() ([]*dto.MetricFamily, error)
+}
+
+// Register registers the provided Collector with the DefaultRegisterer.
+//
+// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
+// details.
+func Register(c Collector) error {
+ return DefaultRegisterer.Register(c)
+}
+
+// MustRegister registers the provided Collectors with the DefaultRegisterer and
+// panics if any error occurs.
+//
+// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
+// there for more details.
+func MustRegister(cs ...Collector) {
+ DefaultRegisterer.MustRegister(cs...)
+}
+
+// RegisterOrGet registers the provided Collector with the DefaultRegisterer and
+// returns the Collector, unless an equal Collector was registered before, in
+// which case that Collector is returned.
+//
+// Deprecated: RegisterOrGet is merely a convenience function for the
+// implementation as described in the documentation for
+// AlreadyRegisteredError. As the use case is relatively rare, this function
+// will be removed in a future version of this package to clean up the
+// namespace.
+func RegisterOrGet(c Collector) (Collector, error) {
+ if err := Register(c); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ return are.ExistingCollector, nil
+ }
+ return nil, err
+ }
+ return c, nil
+}
+
+// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning
+// an error.
+//
+// Deprecated: This is deprecated for the same reason RegisterOrGet is. See
+// there for details.
+func MustRegisterOrGet(c Collector) Collector {
+ c, err := RegisterOrGet(c)
+ if err != nil {
+ panic(err)
+ }
+ return c
+}
+
+// Unregister removes the registration of the provided Collector from the
+// DefaultRegisterer.
+//
+// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
+// more details.
+func Unregister(c Collector) bool {
+ return DefaultRegisterer.Unregister(c)
+}
+
+// GathererFunc turns a function into a Gatherer.
+type GathererFunc func() ([]*dto.MetricFamily, error)
+
+// Gather implements Gatherer.
+func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
+ return gf()
+}
+
+// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that
+// gathers from the previous DefaultGatherers but then merges the MetricFamily
+// protobufs returned from the provided hook function with the MetricFamily
+// protobufs returned from the original DefaultGatherer.
+//
+// Deprecated: This function manipulates the DefaultGatherer variable. Consider
+// the implications, i.e. don't do this concurrently with any uses of the
+// DefaultGatherer. In the rare cases where you need to inject MetricFamily
+// protobufs directly, it is recommended to use a custom Registry and combine it
+// with a custom Gatherer using the Gatherers type (see
+// there). SetMetricFamilyInjectionHook only exists for compatibility reasons
+// with previous versions of this package.
+func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
+ DefaultGatherer = Gatherers{
+ DefaultGatherer,
+ GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }),
+ }
+}
+
+// AlreadyRegisteredError is returned by the Register method if the Collector to
+// be registered has already been registered before, or a different Collector
+// that collects the same metrics has been registered before. Registration fails
+// in that case, but you can detect from the kind of error what has
+// happened. The error contains fields for the existing Collector and the
+// (rejected) new Collector that equals the existing one. This can be used to
+// find out if an equal Collector has been registered before and switch over to
+// using the old one, as demonstrated in the example.
+type AlreadyRegisteredError struct {
+ ExistingCollector, NewCollector Collector
+}
+
+func (err AlreadyRegisteredError) Error() string {
+ return "duplicate metrics collector registration attempted"
+}
+
+// MultiError is a slice of errors implementing the error interface. It is used
+// by a Gatherer to report multiple errors during MetricFamily gathering.
+type MultiError []error
+
+func (errs MultiError) Error() string {
+ if len(errs) == 0 {
+ return ""
+ }
+ buf := &bytes.Buffer{}
+ fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
+ for _, err := range errs {
+ fmt.Fprintf(buf, "\n* %s", err)
+ }
+ return buf.String()
+}
+
+// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
+// contained error as error if len(errs is 1). In all other cases, it returns
+// the MultiError directly. This is helpful for returning a MultiError in a way
+// that only uses the MultiError if needed.
+func (errs MultiError) MaybeUnwrap() error {
+ switch len(errs) {
+ case 0:
+ return nil
+ case 1:
+ return errs[0]
+ default:
+ return errs
+ }
+}
+
+// Registry registers Prometheus collectors, collects their metrics, and gathers
+// them into MetricFamilies for exposition. It implements both Registerer and
+// Gatherer. The zero value is not usable. Create instances with NewRegistry or
+// NewPedanticRegistry.
+type Registry struct {
+ mtx sync.RWMutex
+ collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
+ descIDs map[uint64]struct{}
+ dimHashesByName map[string]uint64
+ pedanticChecksEnabled bool
+}
+
+// Register implements Registerer.
+func (r *Registry) Register(c Collector) error {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ newDescIDs = map[uint64]struct{}{}
+ newDimHashesByName = map[string]uint64{}
+ collectorID uint64 // Just a sum of all desc IDs.
+ duplicateDescErr error
+ )
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+ // Coduct various tests...
+ for desc := range descChan {
+
+ // Is the descriptor valid at all?
+ if desc.err != nil {
+ return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ }
+
+ // Is the descID unique?
+ // (In other words: Is the fqName + constLabel combination unique?)
+ if _, exists := r.descIDs[desc.id]; exists {
+ duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+ }
+ // If it is not a duplicate desc in this collector, add it to
+ // the collectorID. (We allow duplicate descs within the same
+ // collector, but their existence must be a no-op.)
+ if _, exists := newDescIDs[desc.id]; !exists {
+ newDescIDs[desc.id] = struct{}{}
+ collectorID += desc.id
+ }
+
+ // Are all the label names and the help string consistent with
+ // previous descriptors of the same name?
+ // First check existing descriptors...
+ if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+ }
+ } else {
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+ }
+ } else {
+ newDimHashesByName[desc.fqName] = desc.dimHash
+ }
+ }
+ }
+ // Did anything happen at all?
+ if len(newDescIDs) == 0 {
+ return errors.New("collector has no descriptors")
+ }
+ if existing, exists := r.collectorsByID[collectorID]; exists {
+ return AlreadyRegisteredError{
+ ExistingCollector: existing,
+ NewCollector: c,
+ }
+ }
+ // If the collectorID is new, but at least one of the descs existed
+ // before, we are in trouble.
+ if duplicateDescErr != nil {
+ return duplicateDescErr
+ }
+
+ // Only after all tests have passed, actually register.
+ r.collectorsByID[collectorID] = c
+ for hash := range newDescIDs {
+ r.descIDs[hash] = struct{}{}
+ }
+ for name, dimHash := range newDimHashesByName {
+ r.dimHashesByName[name] = dimHash
+ }
+ return nil
+}
+
+// Unregister implements Registerer.
+func (r *Registry) Unregister(c Collector) bool {
+ var (
+ descChan = make(chan *Desc, capDescChan)
+ descIDs = map[uint64]struct{}{}
+ collectorID uint64 // Just a sum of the desc IDs.
+ )
+ go func() {
+ c.Describe(descChan)
+ close(descChan)
+ }()
+ for desc := range descChan {
+ if _, exists := descIDs[desc.id]; !exists {
+ collectorID += desc.id
+ descIDs[desc.id] = struct{}{}
+ }
+ }
+
+ r.mtx.RLock()
+ if _, exists := r.collectorsByID[collectorID]; !exists {
+ r.mtx.RUnlock()
+ return false
+ }
+ r.mtx.RUnlock()
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ delete(r.collectorsByID, collectorID)
+ for id := range descIDs {
+ delete(r.descIDs, id)
+ }
+ // dimHashesByName is left untouched as those must be consistent
+ // throughout the lifetime of a program.
+ return true
+}
+
+// MustRegister implements Registerer.
+func (r *Registry) MustRegister(cs ...Collector) {
+ for _, c := range cs {
+ if err := r.Register(c); err != nil {
+ panic(err)
+ }
+ }
+}
+
+// Gather implements Gatherer.
+func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ metricChan = make(chan Metric, capMetricChan)
+ metricHashes = map[uint64]struct{}{}
+ dimHashes = map[string]uint64{}
+ wg sync.WaitGroup
+ errs MultiError // The collected errors to return in the end.
+ registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
+ )
+
+ r.mtx.RLock()
+ metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+
+ // Scatter.
+ // (Collectors could be complex and slow, so we call them all at once.)
+ wg.Add(len(r.collectorsByID))
+ go func() {
+ wg.Wait()
+ close(metricChan)
+ }()
+ for _, collector := range r.collectorsByID {
+ go func(collector Collector) {
+ defer wg.Done()
+ collector.Collect(metricChan)
+ }(collector)
+ }
+
+ // In case pedantic checks are enabled, we have to copy the map before
+ // giving up the RLock.
+ if r.pedanticChecksEnabled {
+ registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
+ for id := range r.descIDs {
+ registeredDescIDs[id] = struct{}{}
+ }
+ }
+
+ r.mtx.RUnlock()
+
+ // Drain metricChan in case of premature return.
+ defer func() {
+ for range metricChan {
+ }
+ }()
+
+ // Gather.
+ for metric := range metricChan {
+ // This could be done concurrently, too, but it required locking
+ // of metricFamiliesByName (and of metricHashes if checks are
+ // enabled). Most likely not worth it.
+ desc := metric.Desc()
+ dtoMetric := &dto.Metric{}
+ if err := metric.Write(dtoMetric); err != nil {
+ errs = append(errs, fmt.Errorf(
+ "error collecting metric %v: %s", desc, err,
+ ))
+ continue
+ }
+ metricFamily, ok := metricFamiliesByName[desc.fqName]
+ if ok {
+ if metricFamily.GetHelp() != desc.help {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
+ ))
+ continue
+ }
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch metricFamily.GetType() {
+ case dto.MetricType_COUNTER:
+ if dtoMetric.Counter == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Counter",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_GAUGE:
+ if dtoMetric.Gauge == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Gauge",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_SUMMARY:
+ if dtoMetric.Summary == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Summary",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_UNTYPED:
+ if dtoMetric.Untyped == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be Untyped",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ case dto.MetricType_HISTOGRAM:
+ if dtoMetric.Histogram == nil {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s should be a Histogram",
+ desc.fqName, dtoMetric,
+ ))
+ continue
+ }
+ default:
+ panic("encountered MetricFamily with invalid type")
+ }
+ } else {
+ metricFamily = &dto.MetricFamily{}
+ metricFamily.Name = proto.String(desc.fqName)
+ metricFamily.Help = proto.String(desc.help)
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch {
+ case dtoMetric.Gauge != nil:
+ metricFamily.Type = dto.MetricType_GAUGE.Enum()
+ case dtoMetric.Counter != nil:
+ metricFamily.Type = dto.MetricType_COUNTER.Enum()
+ case dtoMetric.Summary != nil:
+ metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+ case dtoMetric.Untyped != nil:
+ metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+ case dtoMetric.Histogram != nil:
+ metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+ default:
+ errs = append(errs, fmt.Errorf(
+ "empty metric collected: %s", dtoMetric,
+ ))
+ continue
+ }
+ metricFamiliesByName[desc.fqName] = metricFamily
+ }
+ if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ if r.pedanticChecksEnabled {
+ // Is the desc registered at all?
+ if _, exist := registeredDescIDs[desc.id]; !exist {
+ errs = append(errs, fmt.Errorf(
+ "collected metric %s %s with unregistered descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ ))
+ continue
+ }
+ if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ }
+ metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+ }
+ return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// Gatherers is a slice of Gatherer instances that implements the Gatherer
+// interface itself. Its Gather method calls Gather on all Gatherers in the
+// slice in order and returns the merged results. Errors returned from the
+// Gather calles are all returned in a flattened MultiError. Duplicate and
+// inconsistent Metrics are skipped (first occurrence in slice order wins) and
+// reported in the returned error.
+//
+// Gatherers can be used to merge the Gather results from multiple
+// Registries. It also provides a way to directly inject existing MetricFamily
+// protobufs into the gathering by creating a custom Gatherer with a Gather
+// method that simply returns the existing MetricFamily protobufs. Note that no
+// registration is involved (in contrast to Collector registration), so
+// obviously registration-time checks cannot happen. Any inconsistencies between
+// the gathered MetricFamilies are reported as errors by the Gather method, and
+// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
+// (e.g. syntactically invalid metric or label names) will go undetected.
+type Gatherers []Gatherer
+
+// Gather implements Gatherer.
+func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
+ var (
+ metricFamiliesByName = map[string]*dto.MetricFamily{}
+ metricHashes = map[uint64]struct{}{}
+ dimHashes = map[string]uint64{}
+ errs MultiError // The collected errors to return in the end.
+ )
+
+ for i, g := range gs {
+ mfs, err := g.Gather()
+ if err != nil {
+ if multiErr, ok := err.(MultiError); ok {
+ for _, err := range multiErr {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ }
+ } else {
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ }
+ }
+ for _, mf := range mfs {
+ existingMF, exists := metricFamiliesByName[mf.GetName()]
+ if exists {
+ if existingMF.GetHelp() != mf.GetHelp() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has help %q but should have %q",
+ mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
+ ))
+ continue
+ }
+ if existingMF.GetType() != mf.GetType() {
+ errs = append(errs, fmt.Errorf(
+ "gathered metric family %s has type %s but should have %s",
+ mf.GetName(), mf.GetType(), existingMF.GetType(),
+ ))
+ continue
+ }
+ } else {
+ existingMF = &dto.MetricFamily{}
+ existingMF.Name = mf.Name
+ existingMF.Help = mf.Help
+ existingMF.Type = mf.Type
+ metricFamiliesByName[mf.GetName()] = existingMF
+ }
+ for _, m := range mf.Metric {
+ if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ existingMF.Metric = append(existingMF.Metric, m)
+ }
+ }
+ }
+ return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// metricSorter is a sortable slice of *dto.Metric.
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+ return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+
+ // We should never arrive here. Multiple metrics with the same
+ // label set in the same scrape will lead to undefined ingestion
+ // behavior. However, as above, we have to provide stable sorting
+ // here, even for inconsistent metrics. So sort equal metrics
+ // by their timestamp, with missing timestamps (implying "now")
+ // coming last.
+ if s[i].TimestampMs == nil {
+ return false
+ }
+ if s[j].TimestampMs == nil {
+ return true
+ }
+ return s[i].GetTimestampMs() < s[j].GetTimestampMs()
+}
+
+// normalizeMetricFamilies returns a MetricFamily slice with empty
+// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
+// the slice, with the contained Metrics sorted within each MetricFamily.
+func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
+ for _, mf := range metricFamiliesByName {
+ sort.Sort(metricSorter(mf.Metric))
+ }
+ names := make([]string, 0, len(metricFamiliesByName))
+ for name, mf := range metricFamiliesByName {
+ if len(mf.Metric) > 0 {
+ names = append(names, name)
+ }
+ }
+ sort.Strings(names)
+ result := make([]*dto.MetricFamily, 0, len(names))
+ for _, name := range names {
+ result = append(result, metricFamiliesByName[name])
+ }
+ return result
+}
+
+// checkMetricConsistency checks if the provided Metric is consistent with the
+// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
+// name. If the resulting hash is alread in the provided metricHashes, an error
+// is returned. If not, it is added to metricHashes. The provided dimHashes maps
+// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
+// doesn't yet contain a hash for the provided MetricFamily, it is
+// added. Otherwise, an error is returned if the existing dimHashes in not equal
+// the calculated dimHash.
+func checkMetricConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ metricHashes map[uint64]struct{},
+ dimHashes map[string]uint64,
+) error {
+ // Type consistency with metric family.
+ if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+ metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+ metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+ metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+ metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %s %s is not a %s",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
+ )
+ }
+
+ // Is the metric unique (i.e. no other metric with the same name and the same label values)?
+ h := hashNew()
+ h = hashAdd(h, metricFamily.GetName())
+ h = hashAddByte(h, separatorByte)
+ dh := hashNew()
+ // Make sure label pairs are sorted. We depend on it for the consistency
+ // check.
+ sort.Sort(LabelPairSorter(dtoMetric.Label))
+ for _, lp := range dtoMetric.Label {
+ h = hashAdd(h, lp.GetValue())
+ h = hashAddByte(h, separatorByte)
+ dh = hashAdd(dh, lp.GetName())
+ dh = hashAddByte(dh, separatorByte)
+ }
+ if _, exists := metricHashes[h]; exists {
+ return fmt.Errorf(
+ "collected metric %s %s was collected before with the same name and label values",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
+ if dimHash != dh {
+ return fmt.Errorf(
+ "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
+ metricFamily.GetName(), dtoMetric,
+ )
+ }
+ } else {
+ dimHashes[metricFamily.GetName()] = dh
+ }
+ metricHashes[h] = struct{}{}
+ return nil
+}
+
+func checkDescConsistency(
+ metricFamily *dto.MetricFamily,
+ dtoMetric *dto.Metric,
+ desc *Desc,
+) error {
+ // Desc help consistency with metric family help.
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+ )
+ }
+
+ // Is the desc consistent with the content of the metric?
+ lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))
+ lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)
+ for _, l := range desc.variableLabels {
+ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+ Name: proto.String(l),
+ })
+ }
+ if len(lpsFromDesc) != len(dtoMetric.Label) {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ sort.Sort(LabelPairSorter(lpsFromDesc))
+ for i, lpFromDesc := range lpsFromDesc {
+ lpFromMetric := dtoMetric.Label[i]
+ if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+ lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+ return fmt.Errorf(
+ "labels in collected metric %s %s are inconsistent with descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 00000000..bce05bf9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,534 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/beorn7/perks/quantile"
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+ Metric
+ Collector
+
+ // Observe adds a single observation to the summary.
+ Observe(float64)
+}
+
+// DefObjectives are the default Summary quantile values.
+var (
+ DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
+
+ errQuantileLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in summaries", quantileLabel,
+ )
+)
+
+// Default values for SummaryOpts.
+const (
+ // DefMaxAge is the default duration for which observations stay
+ // relevant.
+ DefMaxAge time.Duration = 10 * time.Minute
+ // DefAgeBuckets is the default number of buckets used to calculate the
+ // age of observations.
+ DefAgeBuckets = 5
+ // DefBufCap is the standard buffer size for collecting Summary observations.
+ DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name and Help to a non-empty string. All other fields are
+// optional and can safely be left at their zero value.
+type SummaryOpts struct {
+ // Namespace, Subsystem, and Name are components of the fully-qualified
+ // name of the Summary (created by joining these components with
+ // "_"). Only Name is mandatory, the others merely help structuring the
+ // name. Note that the fully-qualified name of the Summary must be a
+ // valid Prometheus metric name.
+ Namespace string
+ Subsystem string
+ Name string
+
+ // Help provides information about this Summary. Mandatory!
+ //
+ // Metrics with the same fully-qualified name must have the same Help
+ // string.
+ Help string
+
+ // ConstLabels are used to attach fixed labels to this
+ // Summary. Summaries with the same fully-qualified name must have the
+ // same label names in their ConstLabels.
+ //
+ // Note that in most cases, labels have a value that varies during the
+ // lifetime of a process. Those labels are usually managed with a
+ // SummaryVec. ConstLabels serve only special purposes. One is for the
+ // special case where the value of a label does not change during the
+ // lifetime of a process, e.g. if the revision of the running binary is
+ // put into a label. Another, more advanced purpose is if more than one
+ // Collector needs to collect Summaries with the same fully-qualified
+ // name. In that case, those Summaries must differ in the values of
+ // their ConstLabels. See the Collector examples.
+ //
+ // If the value of a label never changes (not even between binaries),
+ // that label most likely should not be a label at all (but part of the
+ // metric name).
+ ConstLabels Labels
+
+ // Objectives defines the quantile rank estimates with their respective
+ // absolute error. If Objectives[q] = e, then the value reported
+ // for q will be the φ-quantile value for some φ between q-e and q+e.
+ // The default value is DefObjectives.
+ Objectives map[float64]float64
+
+ // MaxAge defines the duration for which an observation stays relevant
+ // for the summary. Must be positive. The default value is DefMaxAge.
+ MaxAge time.Duration
+
+ // AgeBuckets is the number of buckets used to exclude observations that
+ // are older than MaxAge from the summary. A higher number has a
+ // resource penalty, so only increase it if the higher resolution is
+ // really required. For very high observation rates, you might want to
+ // reduce the number of age buckets. With only one age bucket, you will
+ // effectively see a complete reset of the summary each time MaxAge has
+ // passed. The default value is DefAgeBuckets.
+ AgeBuckets uint32
+
+ // BufCap defines the default sample stream buffer size. The default
+ // value of DefBufCap should suffice for most uses. If there is a need
+ // to increase the value, a multiple of 500 is recommended (because that
+ // is the internal buffer size of the underlying package
+ // "github.com/bmizerany/perks/quantile").
+ BufCap uint32
+}
+
+// Great fuck-up with the sliding-window decay algorithm... The Merge method of
+// perk/quantile is actually not working as advertised - and it might be
+// unfixable, as the underlying algorithm is apparently not capable of merging
+// summaries in the first place. To avoid using Merge, we are currently adding
+// observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+ return newSummary(
+ NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ),
+ opts,
+ )
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+ if len(desc.variableLabels) != len(labelValues) {
+ panic(errInconsistentCardinality)
+ }
+
+ for _, n := range desc.variableLabels {
+ if n == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+ for _, lp := range desc.constLabelPairs {
+ if lp.GetName() == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
+
+ if len(opts.Objectives) == 0 {
+ opts.Objectives = DefObjectives
+ }
+
+ if opts.MaxAge < 0 {
+ panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+ }
+ if opts.MaxAge == 0 {
+ opts.MaxAge = DefMaxAge
+ }
+
+ if opts.AgeBuckets == 0 {
+ opts.AgeBuckets = DefAgeBuckets
+ }
+
+ if opts.BufCap == 0 {
+ opts.BufCap = DefBufCap
+ }
+
+ s := &summary{
+ desc: desc,
+
+ objectives: opts.Objectives,
+ sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+ labelPairs: makeLabelPairs(desc, labelValues),
+
+ hotBuf: make([]float64, 0, opts.BufCap),
+ coldBuf: make([]float64, 0, opts.BufCap),
+ streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+ }
+ s.headStreamExpTime = time.Now().Add(s.streamDuration)
+ s.hotBufExpTime = s.headStreamExpTime
+
+ for i := uint32(0); i < opts.AgeBuckets; i++ {
+ s.streams = append(s.streams, s.newStream())
+ }
+ s.headStream = s.streams[0]
+
+ for qu := range s.objectives {
+ s.sortedObjectives = append(s.sortedObjectives, qu)
+ }
+ sort.Float64s(s.sortedObjectives)
+
+ s.init(s) // Init self-collection.
+ return s
+}
+
+type summary struct {
+ selfCollector
+
+ bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+ mtx sync.Mutex // Protects every other moving part.
+ // Lock bufMtx before mtx if both are needed.
+
+ desc *Desc
+
+ objectives map[float64]float64
+ sortedObjectives []float64
+
+ labelPairs []*dto.LabelPair
+
+ sum float64
+ cnt uint64
+
+ hotBuf, coldBuf []float64
+
+ streams []*quantile.Stream
+ streamDuration time.Duration
+ headStream *quantile.Stream
+ headStreamIdx int
+ headStreamExpTime, hotBufExpTime time.Time
+}
+
+func (s *summary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+ s.bufMtx.Lock()
+ defer s.bufMtx.Unlock()
+
+ now := time.Now()
+ if now.After(s.hotBufExpTime) {
+ s.asyncFlush(now)
+ }
+ s.hotBuf = append(s.hotBuf, v)
+ if len(s.hotBuf) == cap(s.hotBuf) {
+ s.asyncFlush(now)
+ }
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+ s.bufMtx.Lock()
+ s.mtx.Lock()
+ // Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+ s.swapBufs(time.Now())
+ s.bufMtx.Unlock()
+
+ s.flushColdBuf()
+ sum.SampleCount = proto.Uint64(s.cnt)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for _, rank := range s.sortedObjectives {
+ var q float64
+ if s.headStream.Count() == 0 {
+ q = math.NaN()
+ } else {
+ q = s.headStream.Query(rank)
+ }
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ s.mtx.Unlock()
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+ return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+ return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+ s.mtx.Lock()
+ s.swapBufs(now)
+
+ // Unblock the original goroutine that was responsible for the mutation
+ // that triggered the compaction. But hold onto the global non-buffer
+ // state mutex until the operation finishes.
+ go func() {
+ s.flushColdBuf()
+ s.mtx.Unlock()
+ }()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+ for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+ s.headStream.Reset()
+ s.headStreamIdx++
+ if s.headStreamIdx >= len(s.streams) {
+ s.headStreamIdx = 0
+ }
+ s.headStream = s.streams[s.headStreamIdx]
+ s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+ }
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+ for _, v := range s.coldBuf {
+ for _, stream := range s.streams {
+ stream.Insert(v)
+ }
+ s.cnt++
+ s.sum += v
+ }
+ s.coldBuf = s.coldBuf[0:0]
+ s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+ if len(s.coldBuf) != 0 {
+ panic("coldBuf is not empty")
+ }
+ s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+ // hotBuf is now empty and gets new expiration set.
+ for now.After(s.hotBufExpTime) {
+ s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+ }
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+ return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+ return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+ *MetricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &SummaryVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newSummary(desc, opts, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns a Summary and not a
+// Metric so that no type conversion is required.
+func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns a Summary and not a Metric so that no
+// type conversion is required.
+func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Summary), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
+ return m.MetricVec.WithLabelValues(lvs...).(Summary)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (m *SummaryVec) With(labels Labels) Summary {
+ return m.MetricVec.With(labels).(Summary)
+}
+
+type constSummary struct {
+ desc *Desc
+ count uint64
+ sum float64
+ quantiles map[float64]float64
+ labelPairs []*dto.LabelPair
+}
+
+func (s *constSummary) Desc() *Desc {
+ return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+ sum := &dto.Summary{}
+ qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+ sum.SampleCount = proto.Uint64(s.count)
+ sum.SampleSum = proto.Float64(s.sum)
+
+ for rank, q := range s.quantiles {
+ qs = append(qs, &dto.Quantile{
+ Quantile: proto.Float64(rank),
+ Value: proto.Float64(q),
+ })
+ }
+
+ if len(qs) > 0 {
+ sort.Sort(quantSort(qs))
+ }
+ sum.Quantile = qs
+
+ out.Summary = sum
+ out.Label = s.labelPairs
+
+ return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc.
+func NewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 00000000..5faf7e6e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,138 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Untyped is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// An Untyped metric works the same as a Gauge. The only difference is that to
+// no type information is implied.
+//
+// To create Untyped instances, use NewUntyped.
+type Untyped interface {
+ Metric
+ Collector
+
+ // Set sets the Untyped metric to an arbitrary value.
+ Set(float64)
+ // Inc increments the Untyped metric by 1.
+ Inc()
+ // Dec decrements the Untyped metric by 1.
+ Dec()
+ // Add adds the given value to the Untyped metric. (The value can be
+ // negative, resulting in a decrease.)
+ Add(float64)
+ // Sub subtracts the given value from the Untyped metric. (The value can
+ // be negative, resulting in an increase.)
+ Sub(float64)
+}
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
+func NewUntyped(opts UntypedOpts) Untyped {
+ return newValue(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, 0)
+}
+
+// UntypedVec is a Collector that bundles a set of Untyped metrics that all
+// share the same Desc, but have different values for their variable
+// labels. This is used if you want to count the same thing partitioned by
+// various dimensions. Create instances with NewUntypedVec.
+type UntypedVec struct {
+ *MetricVec
+}
+
+// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
+// partitioned by the given label names. At least one label name must be
+// provided.
+func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
+ desc := NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ labelNames,
+ opts.ConstLabels,
+ )
+ return &UntypedVec{
+ MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ return newValue(desc, UntypedValue, 0, lvs...)
+ }),
+ }
+}
+
+// GetMetricWithLabelValues replaces the method of the same name in
+// MetricVec. The difference is that this method returns an Untyped and not a
+// Metric so that no type conversion is required.
+func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// GetMetricWith replaces the method of the same name in MetricVec. The
+// difference is that this method returns an Untyped and not a Metric so that no
+// type conversion is required.
+func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
+ metric, err := m.MetricVec.GetMetricWith(labels)
+ if metric != nil {
+ return metric.(Untyped), err
+ }
+ return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. By not returning an
+// error, WithLabelValues allows shortcuts like
+// myVec.WithLabelValues("404", "GET").Add(42)
+func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
+ return m.MetricVec.WithLabelValues(lvs...).(Untyped)
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. By not returning an error, With allows shortcuts like
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+func (m *UntypedVec) With(labels Labels) Untyped {
+ return m.MetricVec.With(labels).(Untyped)
+}
+
+// UntypedFunc is an Untyped whose value is determined at collect time by
+// calling a provided function.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+ Metric
+ Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+ return newValueFunc(NewDesc(
+ BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+ opts.Help,
+ nil,
+ opts.ConstLabels,
+ ), UntypedValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 00000000..a944c377
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,234 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "sort"
+ "sync/atomic"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum.
+const (
+ _ ValueType = iota
+ CounterValue
+ GaugeValue
+ UntypedValue
+)
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+// value is a generic metric for simple values. It implements Metric, Collector,
+// Counter, Gauge, and Untyped. Its effective type is determined by
+// ValueType. This is a low-level building block used by the library to back the
+// implementations of Counter, Gauge, and Untyped.
+type value struct {
+ // valBits containst the bits of the represented float64 value. It has
+ // to go first in the struct to guarantee alignment for atomic
+ // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+
+ selfCollector
+
+ desc *Desc
+ valType ValueType
+ labelPairs []*dto.LabelPair
+}
+
+// newValue returns a newly allocated value with the given Desc, ValueType,
+// sample value and label values. It panics if the number of label
+// values is different from the number of variable labels in Desc.
+func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
+ if len(labelValues) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &value{
+ desc: desc,
+ valType: valueType,
+ valBits: math.Float64bits(val),
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }
+ result.init(result)
+ return result
+}
+
+func (v *value) Desc() *Desc {
+ return v.desc
+}
+
+func (v *value) Set(val float64) {
+ atomic.StoreUint64(&v.valBits, math.Float64bits(val))
+}
+
+func (v *value) Inc() {
+ v.Add(1)
+}
+
+func (v *value) Dec() {
+ v.Add(-1)
+}
+
+func (v *value) Add(val float64) {
+ for {
+ oldBits := atomic.LoadUint64(&v.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+ if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (v *value) Sub(val float64) {
+ v.Add(val * -1)
+}
+
+func (v *value) Write(out *dto.Metric) error {
+ val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
+ return populateMetric(v.valType, val, v.labelPairs, out)
+}
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+ selfCollector
+
+ desc *Desc
+ valType ValueType
+ function func() float64
+ labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+ result := &valueFunc{
+ desc: desc,
+ valType: valueType,
+ function: function,
+ labelPairs: makeLabelPairs(desc, nil),
+ }
+ result.init(result)
+ return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+ return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+ return populateMetric(v.valType, v.function(), v.labelPairs, out)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if len(desc.variableLabels) != len(labelValues) {
+ return nil, errInconsistentCardinality
+ }
+ return &constMetric{
+ desc: desc,
+ valType: valueType,
+ val: value,
+ labelPairs: makeLabelPairs(desc, labelValues),
+ }, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+ m, err := NewConstMetric(desc, valueType, value, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+type constMetric struct {
+ desc *Desc
+ valType ValueType
+ val float64
+ labelPairs []*dto.LabelPair
+}
+
+func (m *constMetric) Desc() *Desc {
+ return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+ return populateMetric(m.valType, m.val, m.labelPairs, out)
+}
+
+func populateMetric(
+ t ValueType,
+ v float64,
+ labelPairs []*dto.LabelPair,
+ m *dto.Metric,
+) error {
+ m.Label = labelPairs
+ switch t {
+ case CounterValue:
+ m.Counter = &dto.Counter{Value: proto.Float64(v)}
+ case GaugeValue:
+ m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+ case UntypedValue:
+ m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+ default:
+ return fmt.Errorf("encountered unknown type %v", t)
+ }
+ return nil
+}
+
+func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+ totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+ if totalLen == 0 {
+ // Super fast path.
+ return nil
+ }
+ if len(desc.variableLabels) == 0 {
+ // Moderately fast path.
+ return desc.constLabelPairs
+ }
+ labelPairs := make([]*dto.LabelPair, 0, totalLen)
+ for i, n := range desc.variableLabels {
+ labelPairs = append(labelPairs, &dto.LabelPair{
+ Name: proto.String(n),
+ Value: proto.String(labelValues[i]),
+ })
+ }
+ for _, lp := range desc.constLabelPairs {
+ labelPairs = append(labelPairs, lp)
+ }
+ sort.Sort(LabelPairSorter(labelPairs))
+ return labelPairs
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 00000000..7f3eef9a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,404 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sync"
+
+ "github.com/prometheus/common/model"
+)
+
+// MetricVec is a Collector to bundle metrics of the same name that
+// differ in their label values. MetricVec is usually not used directly but as a
+// building block for implementations of vectors of a given metric
+// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
+// provided in this package.
+type MetricVec struct {
+ mtx sync.RWMutex // Protects the children.
+ children map[uint64][]metricWithLabelValues
+ desc *Desc
+
+ newMetric func(labelValues ...string) Metric
+ hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
+ hashAddByte func(h uint64, b byte) uint64
+}
+
+// newMetricVec returns an initialized MetricVec. The concrete value is
+// returned for embedding into another struct.
+func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
+ return &MetricVec{
+ children: map[uint64][]metricWithLabelValues{},
+ desc: desc,
+ newMetric: newMetric,
+ hashAdd: hashAdd,
+ hashAddByte: hashAddByte,
+ }
+}
+
+// metricWithLabelValues provides the metric and its label values for
+// disambiguation on hash collision.
+type metricWithLabelValues struct {
+ values []string
+ metric Metric
+}
+
+// Describe implements Collector. The length of the returned slice
+// is always one.
+func (m *MetricVec) Describe(ch chan<- *Desc) {
+ ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *MetricVec) Collect(ch chan<- Metric) {
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+
+ for _, metrics := range m.children {
+ for _, metric := range metrics {
+ ch <- metric.metric
+ }
+ }
+}
+
+// GetMetricWithLabelValues returns the Metric for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Metric is created.
+//
+// It is possible to call this method without using the returned Metric to only
+// create the new Metric but leave it at its start value (e.g. a Summary or
+// Histogram without any observations). See also the SummaryVec example.
+//
+// Keeping the Metric for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Metric from the MetricVec. In that case, the
+// Metric will still exist, but it will not be exported anymore, even if a
+// Metric with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.getOrCreateMetricWithLabelValues(h, lvs), nil
+}
+
+// GetMetricWith returns the Metric for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Metric is created. Implications of
+// creating a Metric without using it and keeping the Metric for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc.
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.getOrCreateMetricWithLabels(h, labels), nil
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
+// occurs. The method allows neat syntax like:
+// httpReqs.WithLabelValues("404", "POST").Inc()
+func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
+ metric, err := m.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// With works as GetMetricWith, but panics if an error occurs. The method allows
+// neat syntax like:
+// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
+func (m *MetricVec) With(labels Labels) Metric {
+ metric, err := m.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return metric
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual Metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return false
+ }
+ return m.deleteByHashWithLabelValues(h, lvs)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in the Desc of the MetricVec. However, such
+// inconsistent Labels can never match an actual Metric, so the method will
+// always return false in that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *MetricVec) Delete(labels Labels) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return false
+ }
+
+ return m.deleteByHashWithLabels(h, labels)
+}
+
+// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
+// there are multiple matches in the bucket, use lvs to select a metric and
+// remove only that metric.
+func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
+ metrics, ok := m.children[h]
+ if !ok {
+ return false
+ }
+
+ i := m.findMetricWithLabelValues(metrics, lvs)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ m.children[h] = append(metrics[:i], metrics[i+1:]...)
+ } else {
+ delete(m.children, h)
+ }
+ return true
+}
+
+// deleteByHashWithLabels removes the metric from the hash bucket h. If there
+// are multiple matches in the bucket, use lvs to select a metric and remove
+// only that metric.
+func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
+ metrics, ok := m.children[h]
+ if !ok {
+ return false
+ }
+ i := m.findMetricWithLabels(metrics, labels)
+ if i >= len(metrics) {
+ return false
+ }
+
+ if len(metrics) > 1 {
+ m.children[h] = append(metrics[:i], metrics[i+1:]...)
+ } else {
+ delete(m.children, h)
+ }
+ return true
+}
+
+// Reset deletes all metrics in this vector.
+func (m *MetricVec) Reset() {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ for h := range m.children {
+ delete(m.children, h)
+ }
+}
+
+func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
+ if len(vals) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ h := hashNew()
+ for _, val := range vals {
+ h = m.hashAdd(h, val)
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
+}
+
+func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
+ if len(labels) != len(m.desc.variableLabels) {
+ return 0, errInconsistentCardinality
+ }
+ h := hashNew()
+ for _, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if !ok {
+ return 0, fmt.Errorf("label name %q missing in label map", label)
+ }
+ h = m.hashAdd(h, val)
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithLabelValues(hash, lvs)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithLabelValues(hash, lvs)
+ if !ok {
+ // Copy to avoid allocation in case wo don't go down this code path.
+ copiedLVs := make([]string, len(lvs))
+ copy(copiedLVs, lvs)
+ metric = m.newMetric(copiedLVs...)
+ m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
+ }
+ return metric
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
+ m.mtx.RLock()
+ metric, ok := m.getMetricWithLabels(hash, labels)
+ m.mtx.RUnlock()
+ if ok {
+ return metric
+ }
+
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+ metric, ok = m.getMetricWithLabels(hash, labels)
+ if !ok {
+ lvs := m.extractLabelValues(labels)
+ metric = m.newMetric(lvs...)
+ m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
+ }
+ return metric
+}
+
+// getMetricWithLabelValues gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
+ metrics, ok := m.children[h]
+ if ok {
+ if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// getMetricWithLabels gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
+ metrics, ok := m.children[h]
+ if ok {
+ if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
+ return metrics[i].metric, true
+ }
+ }
+ return nil, false
+}
+
+// findMetricWithLabelValues returns the index of the matching metric or
+// len(metrics) if not found.
+func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
+ for i, metric := range metrics {
+ if m.matchLabelValues(metric.values, lvs) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+// findMetricWithLabels returns the index of the matching metric or len(metrics)
+// if not found.
+func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
+ for i, metric := range metrics {
+ if m.matchLabels(metric.values, labels) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
+ if len(values) != len(lvs) {
+ return false
+ }
+ for i, v := range values {
+ if v != lvs[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
+ if len(labels) != len(values) {
+ return false
+ }
+ for i, k := range m.desc.variableLabels {
+ if values[i] != labels[k] {
+ return false
+ }
+ }
+ return true
+}
+
+func (m *MetricVec) extractLabelValues(labels Labels) []string {
+ labelValues := make([]string, len(labels))
+ for i, k := range m.desc.variableLabels {
+ labelValues[i] = labels[k]
+ }
+ return labelValues
+}
diff --git a/vendor/github.com/prometheus/client_model/AUTHORS.md b/vendor/github.com/prometheus/client_model/AUTHORS.md
new file mode 100644
index 00000000..e8b3efa6
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/AUTHORS.md
@@ -0,0 +1,13 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Björn Rabenstein <beorn@soundcloud.com>
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Björn Rabenstein <beorn@soundcloud.com>
+* Matt T. Proud <matt.proud@gmail.com>
+* Tobias Schmidt <ts@soundcloud.com>
diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE
new file mode 100644
index 00000000..20110e41
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/NOTICE
@@ -0,0 +1,5 @@
+Data model artifacts for Prometheus.
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
new file mode 100644
index 00000000..b065f868
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -0,0 +1,364 @@
+// Code generated by protoc-gen-go.
+// source: metrics.proto
+// DO NOT EDIT!
+
+/*
+Package io_prometheus_client is a generated protocol buffer package.
+
+It is generated from these files:
+ metrics.proto
+
+It has these top-level messages:
+ LabelPair
+ Gauge
+ Counter
+ Quantile
+ Summary
+ Untyped
+ Histogram
+ Bucket
+ Metric
+ MetricFamily
+*/
+package io_prometheus_client
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type MetricType int32
+
+const (
+ MetricType_COUNTER MetricType = 0
+ MetricType_GAUGE MetricType = 1
+ MetricType_SUMMARY MetricType = 2
+ MetricType_UNTYPED MetricType = 3
+ MetricType_HISTOGRAM MetricType = 4
+)
+
+var MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+}
+var MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+}
+
+func (x MetricType) Enum() *MetricType {
+ p := new(MetricType)
+ *p = x
+ return p
+}
+func (x MetricType) String() string {
+ return proto.EnumName(MetricType_name, int32(x))
+}
+func (x *MetricType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+ if err != nil {
+ return err
+ }
+ *x = MetricType(value)
+ return nil
+}
+
+type LabelPair struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LabelPair) Reset() { *m = LabelPair{} }
+func (m *LabelPair) String() string { return proto.CompactTextString(m) }
+func (*LabelPair) ProtoMessage() {}
+
+func (m *LabelPair) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *LabelPair) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Gauge struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Gauge) Reset() { *m = Gauge{} }
+func (m *Gauge) String() string { return proto.CompactTextString(m) }
+func (*Gauge) ProtoMessage() {}
+
+func (m *Gauge) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Counter struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Counter) Reset() { *m = Counter{} }
+func (m *Counter) String() string { return proto.CompactTextString(m) }
+func (*Counter) ProtoMessage() {}
+
+func (m *Counter) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Quantile struct {
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Quantile) Reset() { *m = Quantile{} }
+func (m *Quantile) String() string { return proto.CompactTextString(m) }
+func (*Quantile) ProtoMessage() {}
+
+func (m *Quantile) GetQuantile() float64 {
+ if m != nil && m.Quantile != nil {
+ return *m.Quantile
+ }
+ return 0
+}
+
+func (m *Quantile) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Summary struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Summary) Reset() { *m = Summary{} }
+func (m *Summary) String() string { return proto.CompactTextString(m) }
+func (*Summary) ProtoMessage() {}
+
+func (m *Summary) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Summary) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Summary) GetQuantile() []*Quantile {
+ if m != nil {
+ return m.Quantile
+ }
+ return nil
+}
+
+type Untyped struct {
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Untyped) Reset() { *m = Untyped{} }
+func (m *Untyped) String() string { return proto.CompactTextString(m) }
+func (*Untyped) ProtoMessage() {}
+
+func (m *Untyped) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Histogram struct {
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Histogram) Reset() { *m = Histogram{} }
+func (m *Histogram) String() string { return proto.CompactTextString(m) }
+func (*Histogram) ProtoMessage() {}
+
+func (m *Histogram) GetSampleCount() uint64 {
+ if m != nil && m.SampleCount != nil {
+ return *m.SampleCount
+ }
+ return 0
+}
+
+func (m *Histogram) GetSampleSum() float64 {
+ if m != nil && m.SampleSum != nil {
+ return *m.SampleSum
+ }
+ return 0
+}
+
+func (m *Histogram) GetBucket() []*Bucket {
+ if m != nil {
+ return m.Bucket
+ }
+ return nil
+}
+
+type Bucket struct {
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Bucket) Reset() { *m = Bucket{} }
+func (m *Bucket) String() string { return proto.CompactTextString(m) }
+func (*Bucket) ProtoMessage() {}
+
+func (m *Bucket) GetCumulativeCount() uint64 {
+ if m != nil && m.CumulativeCount != nil {
+ return *m.CumulativeCount
+ }
+ return 0
+}
+
+func (m *Bucket) GetUpperBound() float64 {
+ if m != nil && m.UpperBound != nil {
+ return *m.UpperBound
+ }
+ return 0
+}
+
+type Metric struct {
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (m *Metric) String() string { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage() {}
+
+func (m *Metric) GetLabel() []*LabelPair {
+ if m != nil {
+ return m.Label
+ }
+ return nil
+}
+
+func (m *Metric) GetGauge() *Gauge {
+ if m != nil {
+ return m.Gauge
+ }
+ return nil
+}
+
+func (m *Metric) GetCounter() *Counter {
+ if m != nil {
+ return m.Counter
+ }
+ return nil
+}
+
+func (m *Metric) GetSummary() *Summary {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func (m *Metric) GetUntyped() *Untyped {
+ if m != nil {
+ return m.Untyped
+ }
+ return nil
+}
+
+func (m *Metric) GetHistogram() *Histogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+func (m *Metric) GetTimestampMs() int64 {
+ if m != nil && m.TimestampMs != nil {
+ return *m.TimestampMs
+ }
+ return 0
+}
+
+type MetricFamily struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MetricFamily) Reset() { *m = MetricFamily{} }
+func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
+func (*MetricFamily) ProtoMessage() {}
+
+func (m *MetricFamily) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetHelp() string {
+ if m != nil && m.Help != nil {
+ return *m.Help
+ }
+ return ""
+}
+
+func (m *MetricFamily) GetType() MetricType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return MetricType_COUNTER
+}
+
+func (m *MetricFamily) GetMetric() []*Metric {
+ if m != nil {
+ return m.Metric
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
+}
diff --git a/vendor/github.com/prometheus/common/AUTHORS.md b/vendor/github.com/prometheus/common/AUTHORS.md
new file mode 100644
index 00000000..c63f4d39
--- /dev/null
+++ b/vendor/github.com/prometheus/common/AUTHORS.md
@@ -0,0 +1,11 @@
+Maintainers of this repository:
+
+* Fabian Reinartz <fabian@soundcloud.com>
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Björn Rabenstein <beorn@soundcloud.com>
+* Fabian Reinartz <fabian@soundcloud.com>
+* Julius Volz <julius.volz@gmail.com>
+* Miguel Molina <hi@mvader.me>
diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/github.com/prometheus/common/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE
new file mode 100644
index 00000000..636a2c1a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/NOTICE
@@ -0,0 +1,5 @@
+Common libraries shared by Prometheus Go components.
+Copyright 2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 00000000..487fdc6c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,412 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "mime"
+ "net/http"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+ Decode(*dto.MetricFamily) error
+}
+
+type DecodeOptions struct {
+ // Timestamp is added to each value from the stream that has no explicit timestamp set.
+ Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+ ct := h.Get(hdrContentType)
+
+ mediatype, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ return FmtUnknown
+ }
+
+ const textType = "text/plain"
+
+ switch mediatype {
+ case ProtoType:
+ if p, ok := params["proto"]; ok && p != ProtoProtocol {
+ return FmtUnknown
+ }
+ if e, ok := params["encoding"]; ok && e != "delimited" {
+ return FmtUnknown
+ }
+ return FmtProtoDelim
+
+ case textType:
+ if v, ok := params["version"]; ok && v != TextVersion {
+ return FmtUnknown
+ }
+ return FmtText
+ }
+
+ return FmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format.
+// If the input format does not imply otherwise, a text format decoder is returned.
+func NewDecoder(r io.Reader, format Format) Decoder {
+ switch format {
+ case FmtProtoDelim:
+ return &protoDecoder{r: r}
+ }
+ return &textDecoder{r: r}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+ r io.Reader
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+ _, err := pbutil.ReadDelimited(d.r, v)
+ if err != nil {
+ return err
+ }
+ if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+ return fmt.Errorf("invalid metric name %q", v.GetName())
+ }
+ for _, m := range v.GetMetric() {
+ if m == nil {
+ continue
+ }
+ for _, l := range m.GetLabel() {
+ if l == nil {
+ continue
+ }
+ if !model.LabelValue(l.GetValue()).IsValid() {
+ return fmt.Errorf("invalid label value %q", l.GetValue())
+ }
+ if !model.LabelName(l.GetName()).IsValid() {
+ return fmt.Errorf("invalid label name %q", l.GetName())
+ }
+ }
+ }
+ return nil
+}
+
+// textDecoder implements the Decoder interface for the text protocol.
+type textDecoder struct {
+ r io.Reader
+ p TextParser
+ fams []*dto.MetricFamily
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+ // TODO(fabxc): Wrap this as a line reader to make streaming safer.
+ if len(d.fams) == 0 {
+ // No cached metric families, read everything and parse metrics.
+ fams, err := d.p.TextToMetricFamilies(d.r)
+ if err != nil {
+ return err
+ }
+ if len(fams) == 0 {
+ return io.EOF
+ }
+ d.fams = make([]*dto.MetricFamily, 0, len(fams))
+ for _, f := range fams {
+ d.fams = append(d.fams, f)
+ }
+ }
+
+ *v = *d.fams[0]
+ d.fams = d.fams[1:]
+
+ return nil
+}
+
+type SampleDecoder struct {
+ Dec Decoder
+ Opts *DecodeOptions
+
+ f dto.MetricFamily
+}
+
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+ if err := sd.Dec.Decode(&sd.f); err != nil {
+ return err
+ }
+ *s = extractSamples(&sd.f, sd.Opts)
+ return nil
+}
+
+// Extract samples builds a slice of samples from the provided metric families.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
+ var all model.Vector
+ for _, f := range fams {
+ all = append(all, extractSamples(f, o)...)
+ }
+ return all
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
+ switch f.GetType() {
+ case dto.MetricType_COUNTER:
+ return extractCounter(o, f)
+ case dto.MetricType_GAUGE:
+ return extractGauge(o, f)
+ case dto.MetricType_SUMMARY:
+ return extractSummary(o, f)
+ case dto.MetricType_UNTYPED:
+ return extractUntyped(o, f)
+ case dto.MetricType_HISTOGRAM:
+ return extractHistogram(o, f)
+ }
+ panic("expfmt.extractSamples: unknown metric family type")
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Counter == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Counter.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Gauge == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Gauge.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Untyped == nil {
+ continue
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ smpl := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Untyped.GetValue()),
+ }
+
+ if m.TimestampMs != nil {
+ smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ } else {
+ smpl.Timestamp = o.Timestamp
+ }
+
+ samples = append(samples, smpl)
+ }
+
+ return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Summary == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ for _, q := range m.Summary.Quantile {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ // BUG(matt): Update other names to "quantile".
+ lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetValue()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Summary.GetSampleCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+ samples := make(model.Vector, 0, len(f.Metric))
+
+ for _, m := range f.Metric {
+ if m.Histogram == nil {
+ continue
+ }
+
+ timestamp := o.Timestamp
+ if m.TimestampMs != nil {
+ timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+ }
+
+ infSeen := false
+
+ for _, q := range m.Histogram.Bucket {
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(q.GetCumulativeCount()),
+ Timestamp: timestamp,
+ })
+ }
+
+ lset := make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleSum()),
+ Timestamp: timestamp,
+ })
+
+ lset = make(model.LabelSet, len(m.Label)+1)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+ count := &model.Sample{
+ Metric: model.Metric(lset),
+ Value: model.SampleValue(m.Histogram.GetSampleCount()),
+ Timestamp: timestamp,
+ }
+ samples = append(samples, count)
+
+ if !infSeen {
+ // Append an infinity bucket sample.
+ lset := make(model.LabelSet, len(m.Label)+2)
+ for _, p := range m.Label {
+ lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+ }
+ lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+ lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+ samples = append(samples, &model.Sample{
+ Metric: model.Metric(lset),
+ Value: count.Value,
+ Timestamp: timestamp,
+ })
+ }
+ }
+
+ return samples
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 00000000..11839ed6
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/matttproud/golang_protobuf_extensions/pbutil"
+ "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+ Encode(*dto.MetricFamily) error
+}
+
+type encoder func(*dto.MetricFamily) error
+
+func (e encoder) Encode(v *dto.MetricFamily) error {
+ return e(v)
+}
+
+// Negotiate returns the Content-Type based on the given Accept header.
+// If no appropriate accepted type is found, FmtText is returned.
+func Negotiate(h http.Header) Format {
+ for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ // Check for protocol buffer
+ if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+ switch ac.Params["encoding"] {
+ case "delimited":
+ return FmtProtoDelim
+ case "text":
+ return FmtProtoText
+ case "compact-text":
+ return FmtProtoCompact
+ }
+ }
+ // Check for text format.
+ ver := ac.Params["version"]
+ if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+ return FmtText
+ }
+ }
+ return FmtText
+}
+
+// NewEncoder returns a new encoder based on content type negotiation.
+func NewEncoder(w io.Writer, format Format) Encoder {
+ switch format {
+ case FmtProtoDelim:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := pbutil.WriteDelimited(w, v)
+ return err
+ })
+ case FmtProtoCompact:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, v.String())
+ return err
+ })
+ case FmtProtoText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ return err
+ })
+ case FmtText:
+ return encoder(func(v *dto.MetricFamily) error {
+ _, err := MetricFamilyToText(w, v)
+ return err
+ })
+ }
+ panic("expfmt.NewEncoder: unknown format")
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 00000000..366fbde9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,40 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A package for reading and writing Prometheus metrics.
+package expfmt
+
+type Format string
+
+const (
+ TextVersion = "0.0.4"
+
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+
+ // The Content-Type values for the different wire protocols.
+ FmtUnknown Format = `<unknown>`
+ FmtText Format = `text/plain; version=` + TextVersion
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+
+ // fmtJSON2 is hidden as it is deprecated.
+ fmtJSON2 Format = `application/json; version=0.0.2`
+)
+
+const (
+ hdrContentType = "Content-Type"
+ hdrAccept = "Accept"
+)
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 00000000..dc2eedee
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+// +build gofuzz
+
+package expfmt
+
+import "bytes"
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+ parser := TextParser{}
+ _, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+
+ if err != nil {
+ return 0
+ }
+
+ return 1
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 00000000..1e060879
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,300 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. This function does not perform checks on the
+// content of the metric and label names, i.e. invalid metric or label names
+// will result in invalid text format output.
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
+ var written int
+
+ // Fail-fast checks.
+ if len(in.Metric) == 0 {
+ return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ }
+ name := in.GetName()
+ if name == "" {
+ return written, fmt.Errorf("MetricFamily has no name: %s", in)
+ }
+
+ // Comments, first HELP, then TYPE.
+ if in.Help != nil {
+ n, err := fmt.Fprintf(
+ out, "# HELP %s %s\n",
+ name, escapeString(*in.Help, false),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ metricType := in.GetType()
+ n, err := fmt.Fprintf(
+ out, "# TYPE %s %s\n",
+ name, strings.ToLower(metricType.String()),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ // Finally the samples, one line for each.
+ for _, metric := range in.Metric {
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ if metric.Counter == nil {
+ return written, fmt.Errorf(
+ "expected counter in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Counter.GetValue(),
+ out,
+ )
+ case dto.MetricType_GAUGE:
+ if metric.Gauge == nil {
+ return written, fmt.Errorf(
+ "expected gauge in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Gauge.GetValue(),
+ out,
+ )
+ case dto.MetricType_UNTYPED:
+ if metric.Untyped == nil {
+ return written, fmt.Errorf(
+ "expected untyped in metric %s %s", name, metric,
+ )
+ }
+ n, err = writeSample(
+ name, metric, "", "",
+ metric.Untyped.GetValue(),
+ out,
+ )
+ case dto.MetricType_SUMMARY:
+ if metric.Summary == nil {
+ return written, fmt.Errorf(
+ "expected summary in metric %s %s", name, metric,
+ )
+ }
+ for _, q := range metric.Summary.Quantile {
+ n, err = writeSample(
+ name, metric,
+ model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
+ q.GetValue(),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Summary.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Summary.GetSampleCount()),
+ out,
+ )
+ case dto.MetricType_HISTOGRAM:
+ if metric.Histogram == nil {
+ return written, fmt.Errorf(
+ "expected histogram in metric %s %s", name, metric,
+ )
+ }
+ infSeen := false
+ for _, q := range metric.Histogram.Bucket {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
+ float64(q.GetCumulativeCount()),
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if math.IsInf(q.GetUpperBound(), +1) {
+ infSeen = true
+ }
+ }
+ if !infSeen {
+ n, err = writeSample(
+ name+"_bucket", metric,
+ model.BucketLabel, "+Inf",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ }
+ n, err = writeSample(
+ name+"_sum", metric, "", "",
+ metric.Histogram.GetSampleSum(),
+ out,
+ )
+ if err != nil {
+ return written, err
+ }
+ written += n
+ n, err = writeSample(
+ name+"_count", metric, "", "",
+ float64(metric.Histogram.GetSampleCount()),
+ out,
+ )
+ default:
+ return written, fmt.Errorf(
+ "unexpected type in metric %s %s", name, metric,
+ )
+ }
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+}
+
+// writeSample writes a single sample in text format to out, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// and value (use empty strings if not required), and the value. The function
+// returns the number of bytes written and any error encountered.
+func writeSample(
+ name string,
+ metric *dto.Metric,
+ additionalLabelName, additionalLabelValue string,
+ value float64,
+ out io.Writer,
+) (int, error) {
+ var written int
+ n, err := fmt.Fprint(out, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = labelPairsToText(
+ metric.Label,
+ additionalLabelName, additionalLabelValue,
+ out,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = fmt.Fprintf(out, " %v", value)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ if metric.TimestampMs != nil {
+ n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = out.Write([]byte{'\n'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+// labelPairsToText converts a slice of LabelPair proto messages plus the
+// explicitly given additional label pair into text formatted as required by the
+// text format and writes it to 'out'. An empty slice in combination with an
+// empty string 'additionalLabelName' results in nothing being
+// written. Otherwise, the label pairs are written, escaped as required by the
+// text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered.
+func labelPairsToText(
+ in []*dto.LabelPair,
+ additionalLabelName, additionalLabelValue string,
+ out io.Writer,
+) (int, error) {
+ if len(in) == 0 && additionalLabelName == "" {
+ return 0, nil
+ }
+ var written int
+ separator := '{'
+ for _, lp := range in {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, lp.GetName(), escapeString(lp.GetValue(), true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ if additionalLabelName != "" {
+ n, err := fmt.Fprintf(
+ out, `%c%s="%s"`,
+ separator, additionalLabelName,
+ escapeString(additionalLabelValue, true),
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err := out.Write([]byte{'}'})
+ written += n
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
+var (
+ escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
+ escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
+)
+
+// escapeString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+func escapeString(v string, includeDoubleQuote bool) string {
+ if includeDoubleQuote {
+ return escapeWithDoubleQuote.Replace(v)
+ }
+
+ return escape.Replace(v)
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 00000000..bd170b16
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,753 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+ "strings"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+ Line int
+ Msg string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+ return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// nil value is ready to use.
+type TextParser struct {
+ metricFamiliesByName map[string]*dto.MetricFamily
+ buf *bufio.Reader // Where the parsed input is read through.
+ err error // Most recent error.
+ lineCount int // Tracks the line count for error messages.
+ currentByte byte // The most recent byte read.
+ currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
+ currentMF *dto.MetricFamily
+ currentMetric *dto.Metric
+ currentLabelPair *dto.LabelPair
+
+ // The remaining member variables are only used for summaries/histograms.
+ currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+ // Summary specific.
+ summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentQuantile float64
+ // Histogram specific.
+ histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+ currentBucket float64
+ // These tell us if the currently processed line ends on '_count' or
+ // '_sum' respectively and belong to a summary/histogram, representing the sample
+ // count and sum of that summary/histogram.
+ currentIsSummaryCount, currentIsSummarySum bool
+ currentIsHistogramCount, currentIsHistogramSum bool
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+ p.reset(in)
+ for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+ // Magic happens here...
+ }
+ // Get rid of empty metric families.
+ for k, mf := range p.metricFamiliesByName {
+ if len(mf.GetMetric()) == 0 {
+ delete(p.metricFamiliesByName, k)
+ }
+ }
+ // If p.err is io.EOF now, we have run into a premature end of the input
+ // stream. Turn this error into something nicer and more
+ // meaningful. (io.EOF is often used as a signal for the legitimate end
+ // of an input stream.)
+ if p.err == io.EOF {
+ p.parseError("unexpected end of input stream")
+ }
+ return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+ p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ if p.buf == nil {
+ p.buf = bufio.NewReader(in)
+ } else {
+ p.buf.Reset(in)
+ }
+ p.err = nil
+ p.lineCount = 0
+ if p.summaries == nil || len(p.summaries) > 0 {
+ p.summaries = map[uint64]*dto.Metric{}
+ }
+ if p.histograms == nil || len(p.histograms) > 0 {
+ p.histograms = map[uint64]*dto.Metric{}
+ }
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+ p.lineCount++
+ if p.skipBlankTab(); p.err != nil {
+ // End of input reached. This is the only case where
+ // that is not an error but a signal that we are done.
+ p.err = nil
+ return nil
+ }
+ switch p.currentByte {
+ case '#':
+ return p.startComment
+ case '\n':
+ return p.startOfLine // Empty line, start the next one.
+ }
+ return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ // If we have hit the end of line already, there is nothing left
+ // to do. This is not considered a syntax error.
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ keyword := p.currentToken.String()
+ if keyword != "HELP" && keyword != "TYPE" {
+ // Generic comment, ignore by fast forwarding to end of line.
+ for p.currentByte != '\n' {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ }
+ return p.startOfLine
+ }
+ // There is something. Next has to be a metric name.
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ if !isBlankOrTab(p.currentByte) {
+ p.parseError("invalid metric name in comment")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '\n' {
+ // At the end of the line already.
+ // Again, this is not considered a syntax error.
+ return p.startOfLine
+ }
+ switch keyword {
+ case "HELP":
+ return p.readingHelp
+ case "TYPE":
+ return p.readingType
+ }
+ panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+ if p.readTokenAsMetricName(); p.err != nil {
+ return nil
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.setOrCreateCurrentMF()
+ // Now is the time to fix the type if it hasn't happened yet.
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ // Do not append the newly created currentMetric to
+ // currentMF.Metric right now. First wait if this is a summary,
+ // and the metric exists already, which we can only know after
+ // having read all the labels.
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+ // Summaries/histograms are special. We have to reset the
+ // currentLabels map, currentQuantile and currentBucket before starting to
+ // read labels.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ p.currentLabels = map[string]string{}
+ p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+ p.currentQuantile = math.NaN()
+ p.currentBucket = math.NaN()
+ }
+ if p.currentByte != '{' {
+ return p.readingValue
+ }
+ return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte == '}' {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ }
+ if p.readTokenAsLabelName(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() == 0 {
+ p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
+ !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
+ }
+ if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '=' {
+ p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ return nil
+ }
+ return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentByte != '"' {
+ p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+ return nil
+ }
+ if p.readTokenAsLabelValue(); p.err != nil {
+ return nil
+ }
+ p.currentLabelPair.Value = proto.String(p.currentToken.String())
+ // Special treatment of summaries:
+ // - Quantile labels are special, will result in dto.Quantile later.
+ // - Other labels have to be added to currentLabels for signature calculation.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if p.currentLabelPair.GetName() == model.QuantileLabel {
+ if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ // Similar special treatment of histograms.
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if p.currentLabelPair.GetName() == model.BucketLabel {
+ if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+ return nil
+ }
+ } else {
+ p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+ }
+ }
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ switch p.currentByte {
+ case ',':
+ return p.startLabelName
+
+ case '}':
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
+ return nil
+ }
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+ // When we are here, we have read all the labels, so for the
+ // special case of a summary/histogram, we can finally find out
+ // if the metric already exists.
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if summary := p.summaries[signature]; summary != nil {
+ p.currentMetric = summary
+ } else {
+ p.summaries[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ signature := model.LabelsToSignature(p.currentLabels)
+ if histogram := p.histograms[signature]; histogram != nil {
+ p.currentMetric = histogram
+ } else {
+ p.histograms[signature] = p.currentMetric
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ } else {
+ p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ value, err := strconv.ParseFloat(p.currentToken.String(), 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+ return nil
+ }
+ switch p.currentMF.GetType() {
+ case dto.MetricType_COUNTER:
+ p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+ case dto.MetricType_GAUGE:
+ p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+ case dto.MetricType_UNTYPED:
+ p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+ case dto.MetricType_SUMMARY:
+ // *sigh*
+ if p.currentMetric.Summary == nil {
+ p.currentMetric.Summary = &dto.Summary{}
+ }
+ switch {
+ case p.currentIsSummaryCount:
+ p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsSummarySum:
+ p.currentMetric.Summary.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentQuantile):
+ p.currentMetric.Summary.Quantile = append(
+ p.currentMetric.Summary.Quantile,
+ &dto.Quantile{
+ Quantile: proto.Float64(p.currentQuantile),
+ Value: proto.Float64(value),
+ },
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ // *sigh*
+ if p.currentMetric.Histogram == nil {
+ p.currentMetric.Histogram = &dto.Histogram{}
+ }
+ switch {
+ case p.currentIsHistogramCount:
+ p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+ case p.currentIsHistogramSum:
+ p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+ case !math.IsNaN(p.currentBucket):
+ p.currentMetric.Histogram.Bucket = append(
+ p.currentMetric.Histogram.Bucket,
+ &dto.Bucket{
+ UpperBound: proto.Float64(p.currentBucket),
+ CumulativeCount: proto.Uint64(uint64(value)),
+ },
+ )
+ }
+ default:
+ p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+ }
+ if p.currentByte == '\n' {
+ return p.startOfLine
+ }
+ return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.readTokenUntilWhitespace(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+ if err != nil {
+ // Create a more helpful error message.
+ p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMetric.TimestampMs = proto.Int64(timestamp)
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ if p.currentToken.Len() > 0 {
+ p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+ return nil
+ }
+ return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+ if p.currentMF.Help != nil {
+ p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the docstring.
+ if p.readTokenUntilNewline(true); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ p.currentMF.Help = proto.String(p.currentToken.String())
+ return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+ if p.currentMF.Type != nil {
+ p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+ return nil
+ }
+ // Rest of line is the type.
+ if p.readTokenUntilNewline(false); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+ if !ok {
+ p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+ return nil
+ }
+ p.currentMF.Type = dto.MetricType(metricType).Enum()
+ return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+ p.err = ParseError{
+ Line: p.lineCount,
+ Msg: msg,
+ }
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+ return
+ }
+ }
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+ if isBlankOrTab(p.currentByte) {
+ p.skipBlankTab()
+ }
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
+// first byte considered is the byte already read (now in p.currentByte). The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+ p.currentToken.Reset()
+ for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
+// byte considered is the byte already read (now in p.currentByte). The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
+// other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+ p.currentToken.Reset()
+ escaped := false
+ for p.err == nil {
+ if recognizeEscapeSequence && escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '\n':
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+ p.currentByte, p.err = p.buf.ReadByte()
+ }
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+ p.currentToken.Reset()
+ if !isValidMetricNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+ p.currentToken.Reset()
+ if !isValidLabelNameStart(p.currentByte) {
+ return
+ }
+ for {
+ p.currentToken.WriteByte(p.currentByte)
+ p.currentByte, p.err = p.buf.ReadByte()
+ if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ return
+ }
+ }
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+ p.currentToken.Reset()
+ escaped := false
+ for {
+ if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+ return
+ }
+ if escaped {
+ switch p.currentByte {
+ case '"', '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ continue
+ }
+ switch p.currentByte {
+ case '"':
+ return
+ case '\n':
+ p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+ p.currentIsSummaryCount = false
+ p.currentIsSummarySum = false
+ p.currentIsHistogramCount = false
+ p.currentIsHistogramSum = false
+ name := p.currentToken.String()
+ if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+ return
+ }
+ // Try out if this is a _sum or _count for a summary/histogram.
+ summaryName := summaryMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ if isCount(name) {
+ p.currentIsSummaryCount = true
+ }
+ if isSum(name) {
+ p.currentIsSummarySum = true
+ }
+ return
+ }
+ }
+ histogramName := histogramMetricName(name)
+ if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+ if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ if isCount(name) {
+ p.currentIsHistogramCount = true
+ }
+ if isSum(name) {
+ p.currentIsHistogramSum = true
+ }
+ return
+ }
+ }
+ p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+ p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+}
+
+func isValidLabelNameContinuation(b byte) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+}
+
+func isValidMetricNameStart(b byte) bool {
+ return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte) bool {
+ return isValidLabelNameContinuation(b) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+ return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+ return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+ return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+ return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ default:
+ return name
+ }
+}
+
+func histogramMetricName(name string) string {
+ switch {
+ case isCount(name):
+ return name[:len(name)-6]
+ case isSum(name):
+ return name[:len(name)-4]
+ case isBucket(name):
+ return name[:len(name)-7]
+ default:
+ return name
+ }
+}
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
new file mode 100644
index 00000000..7723656d
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+ Type, SubType string
+ Q float32
+ Params map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+ .hg
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
new file mode 100644
index 00000000..648b38cb
--- /dev/null
+++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
@@ -0,0 +1,162 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+*/
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// For internal use, so that we can use the sort interface
+type accept_slice []Accept
+
+func (accept accept_slice) Len() int {
+ slice := []Accept(accept)
+ return len(slice)
+}
+
+func (accept accept_slice) Less(i, j int) bool {
+ slice := []Accept(accept)
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (accept accept_slice) Swap(i, j int) {
+ slice := []Accept(accept)
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) (accept []Accept) {
+ parts := strings.Split(header, ",")
+ accept = make([]Accept, 0, len(parts))
+ for _, part := range parts {
+ part := strings.Trim(part, " ")
+
+ a := Accept{}
+ a.Params = make(map[string]string)
+ a.Q = 1.0
+
+ mrp := strings.Split(part, ";")
+
+ media_range := mrp[0]
+ sp := strings.Split(media_range, "/")
+ a.Type = strings.Trim(sp[0], " ")
+
+ switch {
+ case len(sp) == 1 && a.Type == "*":
+ a.SubType = "*"
+ case len(sp) == 2:
+ a.SubType = strings.Trim(sp[1], " ")
+ default:
+ continue
+ }
+
+ if len(mrp) == 1 {
+ accept = append(accept, a)
+ continue
+ }
+
+ for _, param := range mrp[1:] {
+ sp := strings.SplitN(param, "=", 2)
+ if len(sp) != 2 {
+ continue
+ }
+ token := strings.Trim(sp[0], " ")
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp[1], 32)
+ } else {
+ a.Params[token] = strings.Trim(sp[1], " ")
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ slice := accept_slice(accept)
+ sort.Sort(slice)
+
+ return
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
new file mode 100644
index 00000000..35e739c7
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,136 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "time"
+)
+
+type AlertStatus string
+
+const (
+ AlertFiring AlertStatus = "firing"
+ AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+ // Label value pairs for purpose of aggregation, matching, and disposition
+ // dispatching. This must minimally include an "alertname" label.
+ Labels LabelSet `json:"labels"`
+
+ // Extra key/value information which does not define alert identity.
+ Annotations LabelSet `json:"annotations"`
+
+ // The known time range for this alert. Both ends are optional.
+ StartsAt time.Time `json:"startsAt,omitempty"`
+ EndsAt time.Time `json:"endsAt,omitempty"`
+ GeneratorURL string `json:"generatorURL"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+ return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+ return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+ s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+ if a.Resolved() {
+ return s + "[resolved]"
+ }
+ return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+ return a.ResolvedAt(time.Now())
+}
+
+// ResolvedAt returns true off the activity interval ended before
+// the given timestamp.
+func (a *Alert) ResolvedAt(ts time.Time) bool {
+ if a.EndsAt.IsZero() {
+ return false
+ }
+ return !a.EndsAt.After(ts)
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+ if a.Resolved() {
+ return AlertResolved
+ }
+ return AlertFiring
+}
+
+// Validate checks whether the alert data is inconsistent.
+func (a *Alert) Validate() error {
+ if a.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if err := a.Labels.Validate(); err != nil {
+ return fmt.Errorf("invalid label set: %s", err)
+ }
+ if len(a.Labels) == 0 {
+ return fmt.Errorf("at least one label pair required")
+ }
+ if err := a.Annotations.Validate(); err != nil {
+ return fmt.Errorf("invalid annotations: %s", err)
+ }
+ return nil
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+ if as[i].StartsAt.Before(as[j].StartsAt) {
+ return true
+ }
+ if as[i].EndsAt.Before(as[j].EndsAt) {
+ return true
+ }
+ return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+ for _, a := range as {
+ if !a.Resolved() {
+ return true
+ }
+ }
+ return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+ if as.HasFiring() {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 00000000..fc4de410
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+ num, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, err
+ }
+ return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+ return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+ return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+ return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for k := range s {
+ if _, ok := o[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+ myLength, otherLength := len(s), len(o)
+ if myLength == 0 || otherLength == 0 {
+ return FingerprintSet{}
+ }
+
+ subSet := s
+ superSet := o
+
+ if otherLength < myLength {
+ subSet = o
+ superSet = s
+ }
+
+ out := FingerprintSet{}
+
+ for k := range subSet {
+ if _, ok := superSet[k]; ok {
+ out[k] = struct{}{}
+ }
+ }
+
+ return out
+}
diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go
new file mode 100644
index 00000000..038fc1c9
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
new file mode 100644
index 00000000..3b72e7ff
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,206 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+ "unicode/utf8"
+)
+
+const (
+ // AlertNameLabel is the name of the label containing the an alert's name.
+ AlertNameLabel = "alertname"
+
+ // ExportedLabelPrefix is the prefix to prepend to the label names present in
+ // exported metrics if a label of the same name is added by the server.
+ ExportedLabelPrefix = "exported_"
+
+ // MetricNameLabel is the label name indicating the metric name of a
+ // timeseries.
+ MetricNameLabel = "__name__"
+
+ // SchemeLabel is the name of the label that holds the scheme on which to
+ // scrape a target.
+ SchemeLabel = "__scheme__"
+
+ // AddressLabel is the name of the label that holds the address of
+ // a scrape target.
+ AddressLabel = "__address__"
+
+ // MetricsPathLabel is the name of the label that holds the path on which to
+ // scrape a target.
+ MetricsPathLabel = "__metrics_path__"
+
+ // ReservedLabelPrefix is a prefix which is not legal in user-supplied
+ // label names.
+ ReservedLabelPrefix = "__"
+
+ // MetaLabelPrefix is a prefix for labels that provide meta information.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series.
+ MetaLabelPrefix = "__meta_"
+
+ // TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+ // Labels with this prefix are used for intermediate label processing and
+ // will not be attached to time series. This is reserved for use in
+ // Prometheus configuration files by users.
+ TmpLabelPrefix = "__tmp_"
+
+ // ParamLabelPrefix is a prefix for labels that provide URL parameters
+ // used to scrape a target.
+ ParamLabelPrefix = "__param_"
+
+ // JobLabel is the label name indicating the job from which a timeseries
+ // was scraped.
+ JobLabel = "job"
+
+ // InstanceLabel is the label name used for the instance label.
+ InstanceLabel = "instance"
+
+ // BucketLabel is used for the label that defines the upper bound of a
+ // bucket of a histogram ("le" -> "less or equal").
+ BucketLabel = "le"
+
+ // QuantileLabel is used for the label that defines the quantile in a
+ // summary.
+ QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric. It has a value associated
+// therewith.
+type LabelName string
+
+// IsValid is true iff the label name matches the pattern of LabelNameRE.
+func (ln LabelName) IsValid() bool {
+ if len(ln) == 0 {
+ return false
+ }
+ for i, b := range ln {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ if !LabelNameRE.MatchString(s) {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !LabelNameRE.MatchString(s) {
+ return fmt.Errorf("%q is not a valid label name", s)
+ }
+ *ln = LabelName(s)
+ return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+ return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+ return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+ labelStrings := make([]string, 0, len(l))
+ for _, label := range l {
+ labelStrings = append(labelStrings, string(label))
+ }
+ return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// IsValid returns true iff the string is a valid UTF8.
+func (lv LabelValue) IsValid() bool {
+ return utf8.ValidString(string(lv))
+}
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+ return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+ return string(l[i]) < string(l[j])
+}
+
+func (l LabelValues) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+ Name LabelName
+ Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+ return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+ switch {
+ case l[i].Name > l[j].Name:
+ return false
+ case l[i].Name < l[j].Name:
+ return true
+ case l[i].Value > l[j].Value:
+ return false
+ case l[i].Value < l[j].Value:
+ return true
+ default:
+ return false
+ }
+}
+
+func (l LabelPairs) Swap(i, j int) {
+ l[i], l[j] = l[j], l[i]
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 00000000..5f931cdb
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,169 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not. All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+// Validate checks whether all names and values in the label set
+// are valid.
+func (ls LabelSet) Validate() error {
+ for ln, lv := range ls {
+ if !ln.IsValid() {
+ return fmt.Errorf("invalid name %q", ln)
+ }
+ if !lv.IsValid() {
+ return fmt.Errorf("invalid value %q", lv)
+ }
+ }
+ return nil
+}
+
+// Equal returns true iff both label sets have exactly the same key/value pairs.
+func (ls LabelSet) Equal(o LabelSet) bool {
+ if len(ls) != len(o) {
+ return false
+ }
+ for ln, lv := range ls {
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if olv != lv {
+ return false
+ }
+ }
+ return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+ if len(ls) < len(o) {
+ return true
+ }
+ if len(ls) > len(o) {
+ return false
+ }
+
+ lns := make(LabelNames, 0, len(ls)+len(o))
+ for ln := range ls {
+ lns = append(lns, ln)
+ }
+ for ln := range o {
+ lns = append(lns, ln)
+ }
+ // It's probably not worth it to de-dup lns.
+ sort.Sort(lns)
+ for _, ln := range lns {
+ mlv, ok := ls[ln]
+ if !ok {
+ return true
+ }
+ olv, ok := o[ln]
+ if !ok {
+ return false
+ }
+ if mlv < olv {
+ return true
+ }
+ if mlv > olv {
+ return false
+ }
+ }
+ return false
+}
+
+// Clone returns a copy of the label set.
+func (ls LabelSet) Clone() LabelSet {
+ lsn := make(LabelSet, len(ls))
+ for ln, lv := range ls {
+ lsn[ln] = lv
+ }
+ return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (l LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(l))
+
+ for k, v := range l {
+ result[k] = v
+ }
+
+ for k, v := range other {
+ result[k] = v
+ }
+
+ return result
+}
+
+func (l LabelSet) String() string {
+ lstrs := make([]string, 0, len(l))
+ for l, v := range l {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
+ }
+
+ sort.Strings(lstrs)
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+ return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+ return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *LabelSet) UnmarshalJSON(b []byte) error {
+ var m map[LabelName]LabelValue
+ if err := json.Unmarshal(b, &m); err != nil {
+ return err
+ }
+ // encoding/json only unmarshals maps of the form map[string]T. It treats
+ // LabelName as a string and does not call its UnmarshalJSON method.
+ // Thus, we have to replicate the behavior here.
+ for ln := range m {
+ if !LabelNameRE.MatchString(string(ln)) {
+ return fmt.Errorf("%q is not a valid label name", ln)
+ }
+ }
+ *l = LabelSet(m)
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
new file mode 100644
index 00000000..a5da59a5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,98 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+var (
+ separator = []byte{0}
+ MetricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
+)
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+ return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+ return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+ clone := Metric{}
+ for k, v := range m {
+ clone[k] = v
+ }
+ return clone
+}
+
+func (m Metric) String() string {
+ metricName, hasName := m[MetricNameLabel]
+ numLabels := len(m) - 1
+ if !hasName {
+ numLabels = len(m)
+ }
+ labelStrings := make([]string, 0, numLabels)
+ for label, value := range m {
+ if label != MetricNameLabel {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+ }
+ }
+
+ switch numLabels {
+ case 0:
+ if hasName {
+ return string(metricName)
+ }
+ return "{}"
+ default:
+ sort.Strings(labelStrings)
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+ }
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+ return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+ return LabelSet(m).FastFingerprint()
+}
+
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+func IsValidMetricName(n LabelValue) bool {
+ if len(n) == 0 {
+ return false
+ }
+ for i, b := range n {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go
new file mode 100644
index 00000000..a7b96917
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus components and libraries.
+package model
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
new file mode 100644
index 00000000..8762b13c
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,144 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "sort"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+var (
+ // cache the signature of an empty label set.
+ emptyLabelSignature = hashNew()
+)
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make([]string, 0, len(labels))
+ for labelName := range labels {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Strings(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, labelName)
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, labels[labelName])
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ labelNames := make(LabelNames, 0, len(ls))
+ for labelName := range ls {
+ labelNames = append(labelNames, labelName)
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(ls[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return Fingerprint(sum)
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+ if len(ls) == 0 {
+ return Fingerprint(emptyLabelSignature)
+ }
+
+ var result uint64
+ for labelName, labelValue := range ls {
+ sum := hashNew()
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(labelValue))
+ result ^= sum
+ }
+ return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+ if len(labels) == 0 {
+ return emptyLabelSignature
+ }
+
+ sort.Sort(LabelNames(labels))
+
+ sum := hashNew()
+ for _, label := range labels {
+ sum = hashAdd(sum, string(label))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[label]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+ if len(m) == 0 {
+ return emptyLabelSignature
+ }
+
+ labelNames := make(LabelNames, 0, len(m))
+ for labelName := range m {
+ if _, exclude := labels[labelName]; !exclude {
+ labelNames = append(labelNames, labelName)
+ }
+ }
+ if len(labelNames) == 0 {
+ return emptyLabelSignature
+ }
+ sort.Sort(labelNames)
+
+ sum := hashNew()
+ for _, labelName := range labelNames {
+ sum = hashAdd(sum, string(labelName))
+ sum = hashAddByte(sum, SeparatorByte)
+ sum = hashAdd(sum, string(m[labelName]))
+ sum = hashAddByte(sum, SeparatorByte)
+ }
+ return sum
+}
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
new file mode 100644
index 00000000..7538e299
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+ Name LabelName `json:"name"`
+ Value string `json:"value"`
+ IsRegex bool `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+ type plain Matcher
+ if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+ return err
+ }
+
+ if len(m.Name) == 0 {
+ return fmt.Errorf("label name in matcher must not be empty")
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Validate returns true iff all fields of the matcher have valid values.
+func (m *Matcher) Validate() error {
+ if !m.Name.IsValid() {
+ return fmt.Errorf("invalid name %q", m.Name)
+ }
+ if m.IsRegex {
+ if _, err := regexp.Compile(m.Value); err != nil {
+ return fmt.Errorf("invalid regular expression %q", m.Value)
+ }
+ } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
+ return fmt.Errorf("invalid value %q", m.Value)
+ }
+ return nil
+}
+
+// Silence defines the representation of a silence definiton
+// in the Prometheus eco-system.
+type Silence struct {
+ ID uint64 `json:"id,omitempty"`
+
+ Matchers []*Matcher `json:"matchers"`
+
+ StartsAt time.Time `json:"startsAt"`
+ EndsAt time.Time `json:"endsAt"`
+
+ CreatedAt time.Time `json:"createdAt,omitempty"`
+ CreatedBy string `json:"createdBy"`
+ Comment string `json:"comment,omitempty"`
+}
+
+// Validate returns true iff all fields of the silence have valid values.
+func (s *Silence) Validate() error {
+ if len(s.Matchers) == 0 {
+ return fmt.Errorf("at least one matcher required")
+ }
+ for _, m := range s.Matchers {
+ if err := m.Validate(); err != nil {
+ return fmt.Errorf("invalid matcher: %s", err)
+ }
+ }
+ if s.StartsAt.IsZero() {
+ return fmt.Errorf("start time missing")
+ }
+ if s.EndsAt.IsZero() {
+ return fmt.Errorf("end time missing")
+ }
+ if s.EndsAt.Before(s.StartsAt) {
+ return fmt.Errorf("start time must be before end time")
+ }
+ if s.CreatedBy == "" {
+ return fmt.Errorf("creator information missing")
+ }
+ if s.Comment == "" {
+ return fmt.Errorf("comment missing")
+ }
+ if s.CreatedAt.IsZero() {
+ return fmt.Errorf("creation timestamp missing")
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
new file mode 100644
index 00000000..548968ae
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -0,0 +1,249 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "fmt"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // MinimumTick is the minimum supported time resolution. This has to be
+ // at least time.Second in order for the code below to work.
+ minimumTick = time.Millisecond
+ // second is the Time duration equivalent to one second.
+ second = int64(time.Second / minimumTick)
+ // The number of nanoseconds per minimum tick.
+ nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+ // Earliest is the earliest Time representable. Handy for
+ // initializing a high watermark.
+ Earliest = Time(math.MinInt64)
+ // Latest is the latest Time representable. Handy for initializing
+ // a low watermark.
+ Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes and interval between two timestamps.
+type Interval struct {
+ Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+ return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+ return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+ return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+ return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+ return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+ return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+ return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+ return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+ return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+ return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+ return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+ return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+ return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+ p := strings.Split(string(b), ".")
+ switch len(p) {
+ case 1:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ *t = Time(v * second)
+
+ case 2:
+ v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ if err != nil {
+ return err
+ }
+ v *= second
+
+ prec := dotPrecision - len(p[1])
+ if prec < 0 {
+ p[1] = p[1][:dotPrecision]
+ } else if prec > 0 {
+ p[1] = p[1] + strings.Repeat("0", prec)
+ }
+
+ va, err := strconv.ParseInt(p[1], 10, 32)
+ if err != nil {
+ return err
+ }
+
+ *t = Time(v + va)
+
+ default:
+ return fmt.Errorf("invalid time %q", string(b))
+ }
+ return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
+
+// StringToDuration parses a string into a time.Duration, assuming that a year
+// always has 365d, a week always has 7d, and a day always has 24h.
+func ParseDuration(durationStr string) (Duration, error) {
+ matches := durationRE.FindStringSubmatch(durationStr)
+ if len(matches) != 3 {
+ return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
+ }
+ var (
+ n, _ = strconv.Atoi(matches[1])
+ dur = time.Duration(n) * time.Millisecond
+ )
+ switch unit := matches[2]; unit {
+ case "y":
+ dur *= 1000 * 60 * 60 * 24 * 365
+ case "w":
+ dur *= 1000 * 60 * 60 * 24 * 7
+ case "d":
+ dur *= 1000 * 60 * 60 * 24
+ case "h":
+ dur *= 1000 * 60 * 60
+ case "m":
+ dur *= 1000 * 60
+ case "s":
+ dur *= 1000
+ case "ms":
+ // Value already correct
+ default:
+ return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
+ }
+ return Duration(dur), nil
+}
+
+func (d Duration) String() string {
+ var (
+ ms = int64(time.Duration(d) / time.Millisecond)
+ unit = "ms"
+ )
+ factors := map[string]int64{
+ "y": 1000 * 60 * 60 * 24 * 365,
+ "w": 1000 * 60 * 60 * 24 * 7,
+ "d": 1000 * 60 * 60 * 24,
+ "h": 1000 * 60 * 60,
+ "m": 1000 * 60,
+ "s": 1000,
+ "ms": 1,
+ }
+
+ switch int64(0) {
+ case ms % factors["y"]:
+ unit = "y"
+ case ms % factors["w"]:
+ unit = "w"
+ case ms % factors["d"]:
+ unit = "d"
+ case ms % factors["h"]:
+ unit = "h"
+ case ms % factors["m"]:
+ unit = "m"
+ case ms % factors["s"]:
+ unit = "s"
+ }
+ return fmt.Sprintf("%v%v", ms/factors[unit], unit)
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+ return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ dur, err := ParseDuration(s)
+ if err != nil {
+ return err
+ }
+ *d = dur
+ return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
new file mode 100644
index 00000000..dbf5d10e
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -0,0 +1,403 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
+
+// Sample is a sample pair associated with a metric.
+type Sample struct {
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value. The
+// sematics of value equality is defined by SampleValue.Equal.
+func (s *Sample) Equal(o *Sample) bool {
+ if s == o {
+ return true
+ }
+
+ if !s.Metric.Equal(o.Metric) {
+ return false
+ }
+ if !s.Timestamp.Equal(o.Timestamp) {
+ return false
+ }
+ if s.Value.Equal(o.Value) {
+ return false
+ }
+
+ return true
+}
+
+func (s Sample) String() string {
+ return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ })
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ }{
+ Metric: s.Metric,
+ Value: SamplePair{
+ Timestamp: s.Timestamp,
+ Value: s.Value,
+ },
+ }
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ s.Metric = v.Metric
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+
+ return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+ return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+ switch {
+ case s[i].Metric.Before(s[j].Metric):
+ return true
+ case s[j].Metric.Before(s[i].Metric):
+ return false
+ case s[i].Timestamp.Before(s[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+func (s Samples) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, sample := range s {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+}
+
+func (ss SampleStream) String() string {
+ vals := make([]string, len(ss.Values))
+ for i, v := range ss.Values {
+ vals[i] = v.String()
+ }
+ return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "<ValNone>":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return "<ValNone>"
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+ return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+ v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+ return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+ var f string
+ v := [...]interface{}{&s.Timestamp, &f}
+
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+
+ value, err := strconv.ParseFloat(f, 64)
+ if err != nil {
+ return fmt.Errorf("error parsing sample value: %s", err)
+ }
+ s.Value = SampleValue(value)
+ return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+ Value string `json:"value"`
+ Timestamp Time `json:"timestamp"`
+}
+
+func (s *String) String() string {
+ return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+ return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+ v := [...]interface{}{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+ entries := make([]string, len(vec))
+ for i, s := range vec {
+ entries[i] = s.String()
+ }
+ return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+ switch {
+ case vec[i].Metric.Before(vec[j].Metric):
+ return true
+ case vec[j].Metric.Before(vec[i].Metric):
+ return false
+ case vec[i].Timestamp.Before(vec[j].Timestamp):
+ return true
+ default:
+ return false
+ }
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+ if len(vec) != len(o) {
+ return false
+ }
+
+ for i, sample := range vec {
+ if !sample.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
+
+func (mat Matrix) String() string {
+ matCp := make(Matrix, len(mat))
+ copy(matCp, mat)
+ sort.Sort(matCp)
+
+ strs := make([]string, len(matCp))
+
+ for i, ss := range matCp {
+ strs[i] = ss.String()
+ }
+
+ return strings.Join(strs, "\n")
+}
diff --git a/vendor/github.com/prometheus/procfs/.travis.yml b/vendor/github.com/prometheus/procfs/.travis.yml
new file mode 100644
index 00000000..2b4554da
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/.travis.yml
@@ -0,0 +1,5 @@
+sudo: false
+language: go
+go:
+ - 1.5
+ - 1.6
diff --git a/vendor/github.com/prometheus/procfs/AUTHORS.md b/vendor/github.com/prometheus/procfs/AUTHORS.md
new file mode 100644
index 00000000..0c802dd8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/AUTHORS.md
@@ -0,0 +1,20 @@
+The Prometheus project was started by Matt T. Proud (emeritus) and
+Julius Volz in 2012.
+
+Maintainers of this repository:
+
+* Tobias Schmidt <ts@soundcloud.com>
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Armen Baghumian <abaghumian@noggin.com.au>
+* Bjoern Rabenstein <beorn@soundcloud.com>
+* David Cournapeau <cournape@gmail.com>
+* Ji-Hoon, Seol <jihoon.seol@gmail.com>
+* Jonas Große Sundrup <cherti@letopolis.de>
+* Julius Volz <julius.volz@gmail.com>
+* Matthias Rampke <mr@soundcloud.com>
+* Nicky Gerritsen <nicky@streamone.nl>
+* Rémi Audebert <contact@halfr.net>
+* Tobias Schmidt <tobidt@gmail.com>
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
new file mode 100644
index 00000000..5705f0fb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -0,0 +1,18 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
+
+* Relevant coding style guidelines are the [Go Code Review
+ Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+ and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+ Practices for Production
+ Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
new file mode 100644
index 00000000..c264a49d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/Makefile
@@ -0,0 +1,6 @@
+ci:
+ ! gofmt -l *.go | read nothing
+ go vet
+ go test -v ./...
+ go get github.com/golang/lint/golint
+ golint *.go
diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 00000000..53c5e9aa
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
new file mode 100644
index 00000000..6e7ee6b8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -0,0 +1,10 @@
+# procfs
+
+This procfs package provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+*WARNING*: This package is a work in progress. Its API may still break in
+backwards-incompatible ways without warnings. Use it at your own risk.
+
+[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
+[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
new file mode 100644
index 00000000..e2acd6d4
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.NewStat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
+//
+package procfs
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
new file mode 100644
index 00000000..49aaab05
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,33 @@
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "path"
+)
+
+// FS represents the pseudo-filesystem proc, which provides an interface to
+// kernel data structures.
+type FS string
+
+// DefaultMountPoint is the common mount point of the proc filesystem.
+const DefaultMountPoint = "/proc"
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+ info, err := os.Stat(mountPoint)
+ if err != nil {
+ return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
+ }
+ if !info.IsDir() {
+ return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
+ }
+
+ return FS(mountPoint), nil
+}
+
+// Path returns the path of the given subsystem relative to the procfs root.
+func (fs FS) Path(p ...string) string {
+ return path.Join(append([]string{string(fs)}, p...)...)
+}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
new file mode 100644
index 00000000..e7012f73
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -0,0 +1,224 @@
+package procfs
+
+import (
+ "bufio"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
+type IPVSStats struct {
+ // Total count of connections.
+ Connections uint64
+ // Total incoming packages processed.
+ IncomingPackets uint64
+ // Total outgoing packages processed.
+ OutgoingPackets uint64
+ // Total incoming traffic.
+ IncomingBytes uint64
+ // Total outgoing traffic.
+ OutgoingBytes uint64
+}
+
+// IPVSBackendStatus holds current metrics of one virtual / real address pair.
+type IPVSBackendStatus struct {
+ // The local (virtual) IP address.
+ LocalAddress net.IP
+ // The local (virtual) port.
+ LocalPort uint16
+ // The transport protocol (TCP, UDP).
+ Proto string
+ // The remote (real) IP address.
+ RemoteAddress net.IP
+ // The remote (real) port.
+ RemotePort uint16
+ // The current number of active connections for this virtual/real address pair.
+ ActiveConn uint64
+ // The current number of inactive connections for this virtual/real address pair.
+ InactConn uint64
+ // The current weight of this virtual/real address pair.
+ Weight uint64
+}
+
+// NewIPVSStats reads the IPVS statistics.
+func NewIPVSStats() (IPVSStats, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return fs.NewIPVSStats()
+}
+
+// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
+func (fs FS) NewIPVSStats() (IPVSStats, error) {
+ file, err := os.Open(fs.Path("net/ip_vs_stats"))
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ defer file.Close()
+
+ return parseIPVSStats(file)
+}
+
+// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
+func parseIPVSStats(file io.Reader) (IPVSStats, error) {
+ var (
+ statContent []byte
+ statLines []string
+ statFields []string
+ stats IPVSStats
+ )
+
+ statContent, err := ioutil.ReadAll(file)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ statLines = strings.SplitN(string(statContent), "\n", 4)
+ if len(statLines) != 4 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
+ }
+
+ statFields = strings.Fields(statLines[2])
+ if len(statFields) != 5 {
+ return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
+ }
+
+ stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+ stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
+ if err != nil {
+ return IPVSStats{}, err
+ }
+
+ return stats, nil
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.
+func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return []IPVSBackendStatus{}, err
+ }
+
+ return fs.NewIPVSBackendStatus()
+}
+
+// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
+func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
+ file, err := os.Open(fs.Path("net/ip_vs"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseIPVSBackendStatus(file)
+}
+
+func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
+ var (
+ status []IPVSBackendStatus
+ scanner = bufio.NewScanner(file)
+ proto string
+ localAddress net.IP
+ localPort uint16
+ err error
+ )
+
+ for scanner.Scan() {
+ fields := strings.Fields(string(scanner.Text()))
+ if len(fields) == 0 {
+ continue
+ }
+ switch {
+ case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
+ continue
+ case fields[0] == "TCP" || fields[0] == "UDP":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localAddress, localPort, err = parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ case fields[0] == "->":
+ if len(fields) < 6 {
+ continue
+ }
+ remoteAddress, remotePort, err := parseIPPort(fields[1])
+ if err != nil {
+ return nil, err
+ }
+ weight, err := strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ activeConn, err := strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ inactConn, err := strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ status = append(status, IPVSBackendStatus{
+ LocalAddress: localAddress,
+ LocalPort: localPort,
+ RemoteAddress: remoteAddress,
+ RemotePort: remotePort,
+ Proto: proto,
+ Weight: weight,
+ ActiveConn: activeConn,
+ InactConn: inactConn,
+ })
+ }
+ }
+ return status, nil
+}
+
+func parseIPPort(s string) (net.IP, uint16, error) {
+ tmp := strings.SplitN(s, ":", 2)
+
+ if len(tmp) != 2 {
+ return nil, 0, fmt.Errorf("invalid IP:Port: %s", s)
+ }
+
+ if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
+ return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
+ }
+
+ ip, err := hex.DecodeString(tmp[0])
+ if err != nil {
+ return nil, 0, err
+ }
+
+ port, err := strconv.ParseUint(tmp[1], 16, 16)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ return ip, uint16(port), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
new file mode 100644
index 00000000..d7a248c0
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -0,0 +1,138 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
+ buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
+)
+
+// MDStat holds info parsed from /proc/mdstat.
+type MDStat struct {
+ // Name of the device.
+ Name string
+ // activity-state of the device.
+ ActivityState string
+ // Number of active disks.
+ DisksActive int64
+ // Total number of disks the device consists of.
+ DisksTotal int64
+ // Number of blocks the device holds.
+ BlocksTotal int64
+ // Number of blocks on the device that are in sync.
+ BlocksSynced int64
+}
+
+// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.
+func (fs FS) ParseMDStat() (mdstates []MDStat, err error) {
+ mdStatusFilePath := fs.Path("mdstat")
+ content, err := ioutil.ReadFile(mdStatusFilePath)
+ if err != nil {
+ return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+
+ mdStates := []MDStat{}
+ lines := strings.Split(string(content), "\n")
+ for i, l := range lines {
+ if l == "" {
+ continue
+ }
+ if l[0] == ' ' {
+ continue
+ }
+ if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") {
+ continue
+ }
+
+ mainLine := strings.Split(l, " ")
+ if len(mainLine) < 3 {
+ return mdStates, fmt.Errorf("error parsing mdline: %s", l)
+ }
+ mdName := mainLine[0]
+ activityState := mainLine[2]
+
+ if len(lines) <= i+3 {
+ return mdStates, fmt.Errorf(
+ "error parsing %s: too few lines for md device %s",
+ mdStatusFilePath,
+ mdName,
+ )
+ }
+
+ active, total, size, err := evalStatusline(lines[i+1])
+ if err != nil {
+ return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+
+ // j is the line number of the syncing-line.
+ j := i + 2
+ if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
+ j = i + 3
+ }
+
+ // If device is syncing at the moment, get the number of currently
+ // synced bytes, otherwise that number equals the size of the device.
+ syncedBlocks := size
+ if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") {
+ syncedBlocks, err = evalBuildline(lines[j])
+ if err != nil {
+ return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err)
+ }
+ }
+
+ mdStates = append(mdStates, MDStat{
+ Name: mdName,
+ ActivityState: activityState,
+ DisksActive: active,
+ DisksTotal: total,
+ BlocksTotal: size,
+ BlocksSynced: syncedBlocks,
+ })
+ }
+
+ return mdStates, nil
+}
+
+func evalStatusline(statusline string) (active, total, size int64, err error) {
+ matches := statuslineRE.FindStringSubmatch(statusline)
+ if len(matches) != 4 {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
+ }
+
+ size, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ total, err = strconv.ParseInt(matches[2], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ active, err = strconv.ParseInt(matches[3], 10, 64)
+ if err != nil {
+ return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err)
+ }
+
+ return active, total, size, nil
+}
+
+func evalBuildline(buildline string) (syncedBlocks int64, err error) {
+ matches := buildlineRE.FindStringSubmatch(buildline)
+ if len(matches) != 2 {
+ return 0, fmt.Errorf("unexpected buildline: %s", buildline)
+ }
+
+ syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("%s in buildline: %s", err, buildline)
+ }
+
+ return syncedBlocks, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
new file mode 100644
index 00000000..0d0a6a90
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,212 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+ // The process ID.
+ PID int
+
+ fs FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+func (p Procs) Len() int { return len(p) }
+func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process read via /proc/self.
+func Self() (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.Self()
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.NewProc(pid)
+}
+
+// AllProcs returns a list of all currently available processes under /proc.
+func AllProcs() (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllProcs()
+}
+
+// Self returns a process for the current process.
+func (fs FS) Self() (Proc, error) {
+ p, err := os.Readlink(fs.Path("self"))
+ if err != nil {
+ return Proc{}, err
+ }
+ pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1))
+ if err != nil {
+ return Proc{}, err
+ }
+ return fs.NewProc(pid)
+}
+
+// NewProc returns a process for the given pid.
+func (fs FS) NewProc(pid int) (Proc, error) {
+ if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: pid, fs: fs}, nil
+}
+
+// AllProcs returns a list of all currently available processes.
+func (fs FS) AllProcs() (Procs, error) {
+ d, err := os.Open(fs.Path())
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ p := Procs{}
+ for _, n := range names {
+ pid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+ p = append(p, Proc{PID: int(pid), fs: fs})
+ }
+
+ return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+ f, err := os.Open(p.path("cmdline"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(data) < 1 {
+ return []string{}, nil
+ }
+
+ return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
+}
+
+// Comm returns the command name of a process.
+func (p Proc) Comm() (string, error) {
+ f, err := os.Open(p.path("comm"))
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return "", err
+ }
+
+ return strings.TrimSpace(string(data)), nil
+}
+
+// Executable returns the absolute path of the executable command of a process.
+func (p Proc) Executable() (string, error) {
+ exe, err := os.Readlink(p.path("exe"))
+ if os.IsNotExist(err) {
+ return "", nil
+ }
+
+ return exe, err
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ fds := make([]uintptr, len(names))
+ for i, n := range names {
+ fd, err := strconv.ParseInt(n, 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse fd %s: %s", n, err)
+ }
+ fds[i] = uintptr(fd)
+ }
+
+ return fds, nil
+}
+
+// FileDescriptorTargets returns the targets of all file descriptors of a process.
+// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
+func (p Proc) FileDescriptorTargets() ([]string, error) {
+ names, err := p.fileDescriptors()
+ if err != nil {
+ return nil, err
+ }
+
+ targets := make([]string, len(names))
+
+ for i, name := range names {
+ target, err := os.Readlink(p.path("fd", name))
+ if err == nil {
+ targets[i] = target
+ }
+ }
+
+ return targets, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+ fds, err := p.fileDescriptors()
+ if err != nil {
+ return 0, err
+ }
+
+ return len(fds), nil
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+ d, err := os.Open(p.path("fd"))
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("could not read %s: %s", d.Name(), err)
+ }
+
+ return names, nil
+}
+
+func (p Proc) path(pa ...string) string {
+ return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
new file mode 100644
index 00000000..b4e31d7b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -0,0 +1,55 @@
+package procfs
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// ProcIO models the content of /proc/<pid>/io.
+type ProcIO struct {
+ // Chars read.
+ RChar uint64
+ // Chars written.
+ WChar uint64
+ // Read syscalls.
+ SyscR uint64
+ // Write syscalls.
+ SyscW uint64
+ // Bytes read.
+ ReadBytes uint64
+ // Bytes written.
+ WriteBytes uint64
+ // Bytes written, but taking into account truncation. See
+ // Documentation/filesystems/proc.txt in the kernel sources for
+ // detailed explanation.
+ CancelledWriteBytes int64
+}
+
+// NewIO creates a new ProcIO instance from a given Proc instance.
+func (p Proc) NewIO() (ProcIO, error) {
+ pio := ProcIO{}
+
+ f, err := os.Open(p.path("io"))
+ if err != nil {
+ return pio, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return pio, err
+ }
+
+ ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
+ "read_bytes: %d\nwrite_bytes: %d\n" +
+ "cancelled_write_bytes: %d\n"
+
+ _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
+ &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
+ if err != nil {
+ return pio, err
+ }
+
+ return pio, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 00000000..2df997ce
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,137 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "regexp"
+ "strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits. For more information see getrlimit(2):
+// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
+type ProcLimits struct {
+ // CPU time limit in seconds.
+ CPUTime int
+ // Maximum size of files that the process may create.
+ FileSize int
+ // Maximum size of the process's data segment (initialized data,
+ // uninitialized data, and heap).
+ DataSize int
+ // Maximum size of the process stack in bytes.
+ StackSize int
+ // Maximum size of a core file.
+ CoreFileSize int
+ // Limit of the process's resident set in pages.
+ ResidentSet int
+ // Maximum number of processes that can be created for the real user ID of
+ // the calling process.
+ Processes int
+ // Value one greater than the maximum file descriptor number that can be
+ // opened by this process.
+ OpenFiles int
+ // Maximum number of bytes of memory that may be locked into RAM.
+ LockedMemory int
+ // Maximum size of the process's virtual memory address space in bytes.
+ AddressSpace int
+ // Limit on the combined number of flock(2) locks and fcntl(2) leases that
+ // this process may establish.
+ FileLocks int
+ // Limit of signals that may be queued for the real user ID of the calling
+ // process.
+ PendingSignals int
+ // Limit on the number of bytes that can be allocated for POSIX message
+ // queues for the real user ID of the calling process.
+ MsqqueueSize int
+ // Limit of the nice priority set using setpriority(2) or nice(2).
+ NicePriority int
+ // Limit of the real-time priority set using sched_setscheduler(2) or
+ // sched_setparam(2).
+ RealtimePriority int
+ // Limit (in microseconds) on the amount of CPU time that a process
+ // scheduled under a real-time scheduling policy may consume without making
+ // a blocking system call.
+ RealtimeTimeout int
+}
+
+const (
+ limitsFields = 3
+ limitsUnlimited = "unlimited"
+)
+
+var (
+ limitsDelimiter = regexp.MustCompile(" +")
+)
+
+// NewLimits returns the current soft limits of the process.
+func (p Proc) NewLimits() (ProcLimits, error) {
+ f, err := os.Open(p.path("limits"))
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ defer f.Close()
+
+ var (
+ l = ProcLimits{}
+ s = bufio.NewScanner(f)
+ )
+ for s.Scan() {
+ fields := limitsDelimiter.Split(s.Text(), limitsFields)
+ if len(fields) != limitsFields {
+ return ProcLimits{}, fmt.Errorf(
+ "couldn't parse %s line %s", f.Name(), s.Text())
+ }
+
+ switch fields[0] {
+ case "Max cpu time":
+ l.CPUTime, err = parseInt(fields[1])
+ case "Max file size":
+ l.FileSize, err = parseInt(fields[1])
+ case "Max data size":
+ l.DataSize, err = parseInt(fields[1])
+ case "Max stack size":
+ l.StackSize, err = parseInt(fields[1])
+ case "Max core file size":
+ l.CoreFileSize, err = parseInt(fields[1])
+ case "Max resident set":
+ l.ResidentSet, err = parseInt(fields[1])
+ case "Max processes":
+ l.Processes, err = parseInt(fields[1])
+ case "Max open files":
+ l.OpenFiles, err = parseInt(fields[1])
+ case "Max locked memory":
+ l.LockedMemory, err = parseInt(fields[1])
+ case "Max address space":
+ l.AddressSpace, err = parseInt(fields[1])
+ case "Max file locks":
+ l.FileLocks, err = parseInt(fields[1])
+ case "Max pending signals":
+ l.PendingSignals, err = parseInt(fields[1])
+ case "Max msgqueue size":
+ l.MsqqueueSize, err = parseInt(fields[1])
+ case "Max nice priority":
+ l.NicePriority, err = parseInt(fields[1])
+ case "Max realtime priority":
+ l.RealtimePriority, err = parseInt(fields[1])
+ case "Max realtime timeout":
+ l.RealtimeTimeout, err = parseInt(fields[1])
+ }
+ if err != nil {
+ return ProcLimits{}, err
+ }
+ }
+
+ return l, s.Err()
+}
+
+func parseInt(s string) (int, error) {
+ if s == limitsUnlimited {
+ return -1, nil
+ }
+ i, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
+ }
+ return int(i), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 00000000..724e271b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,175 @@
+package procfs
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+)
+
+// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
+// which required cgo. However, that caused a lot of problems regarding
+// cross-compilation. Alternatives such as running a binary to determine the
+// value, or trying to derive it in some other way were all problematic. After
+// much research it was determined that USER_HZ is actually hardcoded to 100 on
+// all Go-supported platforms as of the time of this writing. This is why we
+// decided to hardcode it here as well. It is not impossible that there could
+// be systems with exceptions, but they should be very exotic edge cases, and
+// in that case, the worst outcome will be two misreported metrics.
+//
+// See also the following discussions:
+//
+// - https://github.com/prometheus/node_exporter/issues/52
+// - https://github.com/prometheus/procfs/pull/2
+// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
+const userHZ = 100
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+ // The process ID.
+ PID int
+ // The filename of the executable.
+ Comm string
+ // The process state.
+ State string
+ // The PID of the parent of this process.
+ PPID int
+ // The process group ID of the process.
+ PGRP int
+ // The session ID of the process.
+ Session int
+ // The controlling terminal of the process.
+ TTY int
+ // The ID of the foreground process group of the controlling terminal of
+ // the process.
+ TPGID int
+ // The kernel flags word of the process.
+ Flags uint
+ // The number of minor faults the process has made which have not required
+ // loading a memory page from disk.
+ MinFlt uint
+ // The number of minor faults that the process's waited-for children have
+ // made.
+ CMinFlt uint
+ // The number of major faults the process has made which have required
+ // loading a memory page from disk.
+ MajFlt uint
+ // The number of major faults that the process's waited-for children have
+ // made.
+ CMajFlt uint
+ // Amount of time that this process has been scheduled in user mode,
+ // measured in clock ticks.
+ UTime uint
+ // Amount of time that this process has been scheduled in kernel mode,
+ // measured in clock ticks.
+ STime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in user mode, measured in clock ticks.
+ CUTime uint
+ // Amount of time that this process's waited-for children have been
+ // scheduled in kernel mode, measured in clock ticks.
+ CSTime uint
+ // For processes running a real-time scheduling policy, this is the negated
+ // scheduling priority, minus one.
+ Priority int
+ // The nice value, a value in the range 19 (low priority) to -20 (high
+ // priority).
+ Nice int
+ // Number of threads in this process.
+ NumThreads int
+ // The time the process started after system boot, the value is expressed
+ // in clock ticks.
+ Starttime uint64
+ // Virtual memory size in bytes.
+ VSize int
+ // Resident set size in pages.
+ RSS int
+
+ fs FS
+}
+
+// NewStat returns the current status information of the process.
+func (p Proc) NewStat() (ProcStat, error) {
+ f, err := os.Open(p.path("stat"))
+ if err != nil {
+ return ProcStat{}, err
+ }
+ defer f.Close()
+
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ var (
+ ignore int
+
+ s = ProcStat{PID: p.PID, fs: p.fs}
+ l = bytes.Index(data, []byte("("))
+ r = bytes.LastIndex(data, []byte(")"))
+ )
+
+ if l < 0 || r < 0 {
+ return ProcStat{}, fmt.Errorf(
+ "unexpected format, couldn't extract comm: %s",
+ data,
+ )
+ }
+
+ s.Comm = string(data[l+1 : r])
+ _, err = fmt.Fscan(
+ bytes.NewBuffer(data[r+2:]),
+ &s.State,
+ &s.PPID,
+ &s.PGRP,
+ &s.Session,
+ &s.TTY,
+ &s.TPGID,
+ &s.Flags,
+ &s.MinFlt,
+ &s.CMinFlt,
+ &s.MajFlt,
+ &s.CMajFlt,
+ &s.UTime,
+ &s.STime,
+ &s.CUTime,
+ &s.CSTime,
+ &s.Priority,
+ &s.Nice,
+ &s.NumThreads,
+ &ignore,
+ &s.Starttime,
+ &s.VSize,
+ &s.RSS,
+ )
+ if err != nil {
+ return ProcStat{}, err
+ }
+
+ return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() int {
+ return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+ return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+ stat, err := s.fs.NewStat()
+ if err != nil {
+ return 0, err
+ }
+ return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+ return float64(s.UTime+s.STime) / userHZ
+}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
new file mode 100644
index 00000000..1ca217e8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,56 @@
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+ // Boot time in seconds since the Epoch.
+ BootTime int64
+}
+
+// NewStat returns kernel/system statistics read from /proc/stat.
+func NewStat() (Stat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Stat{}, err
+ }
+
+ return fs.NewStat()
+}
+
+// NewStat returns an information about current kernel/system statistics.
+func (fs FS) NewStat() (Stat, error) {
+ f, err := os.Open(fs.Path("stat"))
+ if err != nil {
+ return Stat{}, err
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ line := s.Text()
+ if !strings.HasPrefix(line, "btime") {
+ continue
+ }
+ fields := strings.Fields(line)
+ if len(fields) != 2 {
+ return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
+ }
+ i, err := strconv.ParseInt(fields[1], 10, 32)
+ if err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
+ }
+ return Stat{BootTime: i}, nil
+ }
+ if err := s.Err(); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
+ }
+
+ return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
+}
diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS
deleted file mode 100644
index 73309904..00000000
--- a/vendor/golang.org/x/crypto/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
deleted file mode 100644
index 741eeb13..00000000
--- a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
+++ /dev/null
@@ -1,892 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package terminal
-
-import (
- "bytes"
- "io"
- "sync"
- "unicode/utf8"
-)
-
-// EscapeCodes contains escape sequences that can be written to the terminal in
-// order to achieve different styles of text.
-type EscapeCodes struct {
- // Foreground colors
- Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte
-
- // Reset all attributes
- Reset []byte
-}
-
-var vt100EscapeCodes = EscapeCodes{
- Black: []byte{keyEscape, '[', '3', '0', 'm'},
- Red: []byte{keyEscape, '[', '3', '1', 'm'},
- Green: []byte{keyEscape, '[', '3', '2', 'm'},
- Yellow: []byte{keyEscape, '[', '3', '3', 'm'},
- Blue: []byte{keyEscape, '[', '3', '4', 'm'},
- Magenta: []byte{keyEscape, '[', '3', '5', 'm'},
- Cyan: []byte{keyEscape, '[', '3', '6', 'm'},
- White: []byte{keyEscape, '[', '3', '7', 'm'},
-
- Reset: []byte{keyEscape, '[', '0', 'm'},
-}
-
-// Terminal contains the state for running a VT100 terminal that is capable of
-// reading lines of input.
-type Terminal struct {
- // AutoCompleteCallback, if non-null, is called for each keypress with
- // the full input line and the current position of the cursor (in
- // bytes, as an index into |line|). If it returns ok=false, the key
- // press is processed normally. Otherwise it returns a replacement line
- // and the new cursor position.
- AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool)
-
- // Escape contains a pointer to the escape codes for this terminal.
- // It's always a valid pointer, although the escape codes themselves
- // may be empty if the terminal doesn't support them.
- Escape *EscapeCodes
-
- // lock protects the terminal and the state in this object from
- // concurrent processing of a key press and a Write() call.
- lock sync.Mutex
-
- c io.ReadWriter
- prompt []rune
-
- // line is the current line being entered.
- line []rune
- // pos is the logical position of the cursor in line
- pos int
- // echo is true if local echo is enabled
- echo bool
- // pasteActive is true iff there is a bracketed paste operation in
- // progress.
- pasteActive bool
-
- // cursorX contains the current X value of the cursor where the left
- // edge is 0. cursorY contains the row number where the first row of
- // the current line is 0.
- cursorX, cursorY int
- // maxLine is the greatest value of cursorY so far.
- maxLine int
-
- termWidth, termHeight int
-
- // outBuf contains the terminal data to be sent.
- outBuf []byte
- // remainder contains the remainder of any partial key sequences after
- // a read. It aliases into inBuf.
- remainder []byte
- inBuf [256]byte
-
- // history contains previously entered commands so that they can be
- // accessed with the up and down keys.
- history stRingBuffer
- // historyIndex stores the currently accessed history entry, where zero
- // means the immediately previous entry.
- historyIndex int
- // When navigating up and down the history it's possible to return to
- // the incomplete, initial line. That value is stored in
- // historyPending.
- historyPending string
-}
-
-// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is
-// a local terminal, that terminal must first have been put into raw mode.
-// prompt is a string that is written at the start of each input line (i.e.
-// "> ").
-func NewTerminal(c io.ReadWriter, prompt string) *Terminal {
- return &Terminal{
- Escape: &vt100EscapeCodes,
- c: c,
- prompt: []rune(prompt),
- termWidth: 80,
- termHeight: 24,
- echo: true,
- historyIndex: -1,
- }
-}
-
-const (
- keyCtrlD = 4
- keyCtrlU = 21
- keyEnter = '\r'
- keyEscape = 27
- keyBackspace = 127
- keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota
- keyUp
- keyDown
- keyLeft
- keyRight
- keyAltLeft
- keyAltRight
- keyHome
- keyEnd
- keyDeleteWord
- keyDeleteLine
- keyClearScreen
- keyPasteStart
- keyPasteEnd
-)
-
-var pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'}
-var pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'}
-
-// bytesToKey tries to parse a key sequence from b. If successful, it returns
-// the key and the remainder of the input. Otherwise it returns utf8.RuneError.
-func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
- if len(b) == 0 {
- return utf8.RuneError, nil
- }
-
- if !pasteActive {
- switch b[0] {
- case 1: // ^A
- return keyHome, b[1:]
- case 5: // ^E
- return keyEnd, b[1:]
- case 8: // ^H
- return keyBackspace, b[1:]
- case 11: // ^K
- return keyDeleteLine, b[1:]
- case 12: // ^L
- return keyClearScreen, b[1:]
- case 23: // ^W
- return keyDeleteWord, b[1:]
- }
- }
-
- if b[0] != keyEscape {
- if !utf8.FullRune(b) {
- return utf8.RuneError, b
- }
- r, l := utf8.DecodeRune(b)
- return r, b[l:]
- }
-
- if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' {
- switch b[2] {
- case 'A':
- return keyUp, b[3:]
- case 'B':
- return keyDown, b[3:]
- case 'C':
- return keyRight, b[3:]
- case 'D':
- return keyLeft, b[3:]
- case 'H':
- return keyHome, b[3:]
- case 'F':
- return keyEnd, b[3:]
- }
- }
-
- if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' {
- switch b[5] {
- case 'C':
- return keyAltRight, b[6:]
- case 'D':
- return keyAltLeft, b[6:]
- }
- }
-
- if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) {
- return keyPasteStart, b[6:]
- }
-
- if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) {
- return keyPasteEnd, b[6:]
- }
-
- // If we get here then we have a key that we don't recognise, or a
- // partial sequence. It's not clear how one should find the end of a
- // sequence without knowing them all, but it seems that [a-zA-Z~] only
- // appears at the end of a sequence.
- for i, c := range b[0:] {
- if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' {
- return keyUnknown, b[i+1:]
- }
- }
-
- return utf8.RuneError, b
-}
-
-// queue appends data to the end of t.outBuf
-func (t *Terminal) queue(data []rune) {
- t.outBuf = append(t.outBuf, []byte(string(data))...)
-}
-
-var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'}
-var space = []rune{' '}
-
-func isPrintable(key rune) bool {
- isInSurrogateArea := key >= 0xd800 && key <= 0xdbff
- return key >= 32 && !isInSurrogateArea
-}
-
-// moveCursorToPos appends data to t.outBuf which will move the cursor to the
-// given, logical position in the text.
-func (t *Terminal) moveCursorToPos(pos int) {
- if !t.echo {
- return
- }
-
- x := visualLength(t.prompt) + pos
- y := x / t.termWidth
- x = x % t.termWidth
-
- up := 0
- if y < t.cursorY {
- up = t.cursorY - y
- }
-
- down := 0
- if y > t.cursorY {
- down = y - t.cursorY
- }
-
- left := 0
- if x < t.cursorX {
- left = t.cursorX - x
- }
-
- right := 0
- if x > t.cursorX {
- right = x - t.cursorX
- }
-
- t.cursorX = x
- t.cursorY = y
- t.move(up, down, left, right)
-}
-
-func (t *Terminal) move(up, down, left, right int) {
- movement := make([]rune, 3*(up+down+left+right))
- m := movement
- for i := 0; i < up; i++ {
- m[0] = keyEscape
- m[1] = '['
- m[2] = 'A'
- m = m[3:]
- }
- for i := 0; i < down; i++ {
- m[0] = keyEscape
- m[1] = '['
- m[2] = 'B'
- m = m[3:]
- }
- for i := 0; i < left; i++ {
- m[0] = keyEscape
- m[1] = '['
- m[2] = 'D'
- m = m[3:]
- }
- for i := 0; i < right; i++ {
- m[0] = keyEscape
- m[1] = '['
- m[2] = 'C'
- m = m[3:]
- }
-
- t.queue(movement)
-}
-
-func (t *Terminal) clearLineToRight() {
- op := []rune{keyEscape, '[', 'K'}
- t.queue(op)
-}
-
-const maxLineLength = 4096
-
-func (t *Terminal) setLine(newLine []rune, newPos int) {
- if t.echo {
- t.moveCursorToPos(0)
- t.writeLine(newLine)
- for i := len(newLine); i < len(t.line); i++ {
- t.writeLine(space)
- }
- t.moveCursorToPos(newPos)
- }
- t.line = newLine
- t.pos = newPos
-}
-
-func (t *Terminal) advanceCursor(places int) {
- t.cursorX += places
- t.cursorY += t.cursorX / t.termWidth
- if t.cursorY > t.maxLine {
- t.maxLine = t.cursorY
- }
- t.cursorX = t.cursorX % t.termWidth
-
- if places > 0 && t.cursorX == 0 {
- // Normally terminals will advance the current position
- // when writing a character. But that doesn't happen
- // for the last character in a line. However, when
- // writing a character (except a new line) that causes
- // a line wrap, the position will be advanced two
- // places.
- //
- // So, if we are stopping at the end of a line, we
- // need to write a newline so that our cursor can be
- // advanced to the next line.
- t.outBuf = append(t.outBuf, '\n')
- }
-}
-
-func (t *Terminal) eraseNPreviousChars(n int) {
- if n == 0 {
- return
- }
-
- if t.pos < n {
- n = t.pos
- }
- t.pos -= n
- t.moveCursorToPos(t.pos)
-
- copy(t.line[t.pos:], t.line[n+t.pos:])
- t.line = t.line[:len(t.line)-n]
- if t.echo {
- t.writeLine(t.line[t.pos:])
- for i := 0; i < n; i++ {
- t.queue(space)
- }
- t.advanceCursor(n)
- t.moveCursorToPos(t.pos)
- }
-}
-
-// countToLeftWord returns then number of characters from the cursor to the
-// start of the previous word.
-func (t *Terminal) countToLeftWord() int {
- if t.pos == 0 {
- return 0
- }
-
- pos := t.pos - 1
- for pos > 0 {
- if t.line[pos] != ' ' {
- break
- }
- pos--
- }
- for pos > 0 {
- if t.line[pos] == ' ' {
- pos++
- break
- }
- pos--
- }
-
- return t.pos - pos
-}
-
-// countToRightWord returns then number of characters from the cursor to the
-// start of the next word.
-func (t *Terminal) countToRightWord() int {
- pos := t.pos
- for pos < len(t.line) {
- if t.line[pos] == ' ' {
- break
- }
- pos++
- }
- for pos < len(t.line) {
- if t.line[pos] != ' ' {
- break
- }
- pos++
- }
- return pos - t.pos
-}
-
-// visualLength returns the number of visible glyphs in s.
-func visualLength(runes []rune) int {
- inEscapeSeq := false
- length := 0
-
- for _, r := range runes {
- switch {
- case inEscapeSeq:
- if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') {
- inEscapeSeq = false
- }
- case r == '\x1b':
- inEscapeSeq = true
- default:
- length++
- }
- }
-
- return length
-}
-
-// handleKey processes the given key and, optionally, returns a line of text
-// that the user has entered.
-func (t *Terminal) handleKey(key rune) (line string, ok bool) {
- if t.pasteActive && key != keyEnter {
- t.addKeyToLine(key)
- return
- }
-
- switch key {
- case keyBackspace:
- if t.pos == 0 {
- return
- }
- t.eraseNPreviousChars(1)
- case keyAltLeft:
- // move left by a word.
- t.pos -= t.countToLeftWord()
- t.moveCursorToPos(t.pos)
- case keyAltRight:
- // move right by a word.
- t.pos += t.countToRightWord()
- t.moveCursorToPos(t.pos)
- case keyLeft:
- if t.pos == 0 {
- return
- }
- t.pos--
- t.moveCursorToPos(t.pos)
- case keyRight:
- if t.pos == len(t.line) {
- return
- }
- t.pos++
- t.moveCursorToPos(t.pos)
- case keyHome:
- if t.pos == 0 {
- return
- }
- t.pos = 0
- t.moveCursorToPos(t.pos)
- case keyEnd:
- if t.pos == len(t.line) {
- return
- }
- t.pos = len(t.line)
- t.moveCursorToPos(t.pos)
- case keyUp:
- entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1)
- if !ok {
- return "", false
- }
- if t.historyIndex == -1 {
- t.historyPending = string(t.line)
- }
- t.historyIndex++
- runes := []rune(entry)
- t.setLine(runes, len(runes))
- case keyDown:
- switch t.historyIndex {
- case -1:
- return
- case 0:
- runes := []rune(t.historyPending)
- t.setLine(runes, len(runes))
- t.historyIndex--
- default:
- entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1)
- if ok {
- t.historyIndex--
- runes := []rune(entry)
- t.setLine(runes, len(runes))
- }
- }
- case keyEnter:
- t.moveCursorToPos(len(t.line))
- t.queue([]rune("\r\n"))
- line = string(t.line)
- ok = true
- t.line = t.line[:0]
- t.pos = 0
- t.cursorX = 0
- t.cursorY = 0
- t.maxLine = 0
- case keyDeleteWord:
- // Delete zero or more spaces and then one or more characters.
- t.eraseNPreviousChars(t.countToLeftWord())
- case keyDeleteLine:
- // Delete everything from the current cursor position to the
- // end of line.
- for i := t.pos; i < len(t.line); i++ {
- t.queue(space)
- t.advanceCursor(1)
- }
- t.line = t.line[:t.pos]
- t.moveCursorToPos(t.pos)
- case keyCtrlD:
- // Erase the character under the current position.
- // The EOF case when the line is empty is handled in
- // readLine().
- if t.pos < len(t.line) {
- t.pos++
- t.eraseNPreviousChars(1)
- }
- case keyCtrlU:
- t.eraseNPreviousChars(t.pos)
- case keyClearScreen:
- // Erases the screen and moves the cursor to the home position.
- t.queue([]rune("\x1b[2J\x1b[H"))
- t.queue(t.prompt)
- t.cursorX, t.cursorY = 0, 0
- t.advanceCursor(visualLength(t.prompt))
- t.setLine(t.line, t.pos)
- default:
- if t.AutoCompleteCallback != nil {
- prefix := string(t.line[:t.pos])
- suffix := string(t.line[t.pos:])
-
- t.lock.Unlock()
- newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key)
- t.lock.Lock()
-
- if completeOk {
- t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos]))
- return
- }
- }
- if !isPrintable(key) {
- return
- }
- if len(t.line) == maxLineLength {
- return
- }
- t.addKeyToLine(key)
- }
- return
-}
-
-// addKeyToLine inserts the given key at the current position in the current
-// line.
-func (t *Terminal) addKeyToLine(key rune) {
- if len(t.line) == cap(t.line) {
- newLine := make([]rune, len(t.line), 2*(1+len(t.line)))
- copy(newLine, t.line)
- t.line = newLine
- }
- t.line = t.line[:len(t.line)+1]
- copy(t.line[t.pos+1:], t.line[t.pos:])
- t.line[t.pos] = key
- if t.echo {
- t.writeLine(t.line[t.pos:])
- }
- t.pos++
- t.moveCursorToPos(t.pos)
-}
-
-func (t *Terminal) writeLine(line []rune) {
- for len(line) != 0 {
- remainingOnLine := t.termWidth - t.cursorX
- todo := len(line)
- if todo > remainingOnLine {
- todo = remainingOnLine
- }
- t.queue(line[:todo])
- t.advanceCursor(visualLength(line[:todo]))
- line = line[todo:]
- }
-}
-
-func (t *Terminal) Write(buf []byte) (n int, err error) {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- if t.cursorX == 0 && t.cursorY == 0 {
- // This is the easy case: there's nothing on the screen that we
- // have to move out of the way.
- return t.c.Write(buf)
- }
-
- // We have a prompt and possibly user input on the screen. We
- // have to clear it first.
- t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */)
- t.cursorX = 0
- t.clearLineToRight()
-
- for t.cursorY > 0 {
- t.move(1 /* up */, 0, 0, 0)
- t.cursorY--
- t.clearLineToRight()
- }
-
- if _, err = t.c.Write(t.outBuf); err != nil {
- return
- }
- t.outBuf = t.outBuf[:0]
-
- if n, err = t.c.Write(buf); err != nil {
- return
- }
-
- t.writeLine(t.prompt)
- if t.echo {
- t.writeLine(t.line)
- }
-
- t.moveCursorToPos(t.pos)
-
- if _, err = t.c.Write(t.outBuf); err != nil {
- return
- }
- t.outBuf = t.outBuf[:0]
- return
-}
-
-// ReadPassword temporarily changes the prompt and reads a password, without
-// echo, from the terminal.
-func (t *Terminal) ReadPassword(prompt string) (line string, err error) {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- oldPrompt := t.prompt
- t.prompt = []rune(prompt)
- t.echo = false
-
- line, err = t.readLine()
-
- t.prompt = oldPrompt
- t.echo = true
-
- return
-}
-
-// ReadLine returns a line of input from the terminal.
-func (t *Terminal) ReadLine() (line string, err error) {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- return t.readLine()
-}
-
-func (t *Terminal) readLine() (line string, err error) {
- // t.lock must be held at this point
-
- if t.cursorX == 0 && t.cursorY == 0 {
- t.writeLine(t.prompt)
- t.c.Write(t.outBuf)
- t.outBuf = t.outBuf[:0]
- }
-
- lineIsPasted := t.pasteActive
-
- for {
- rest := t.remainder
- lineOk := false
- for !lineOk {
- var key rune
- key, rest = bytesToKey(rest, t.pasteActive)
- if key == utf8.RuneError {
- break
- }
- if !t.pasteActive {
- if key == keyCtrlD {
- if len(t.line) == 0 {
- return "", io.EOF
- }
- }
- if key == keyPasteStart {
- t.pasteActive = true
- if len(t.line) == 0 {
- lineIsPasted = true
- }
- continue
- }
- } else if key == keyPasteEnd {
- t.pasteActive = false
- continue
- }
- if !t.pasteActive {
- lineIsPasted = false
- }
- line, lineOk = t.handleKey(key)
- }
- if len(rest) > 0 {
- n := copy(t.inBuf[:], rest)
- t.remainder = t.inBuf[:n]
- } else {
- t.remainder = nil
- }
- t.c.Write(t.outBuf)
- t.outBuf = t.outBuf[:0]
- if lineOk {
- if t.echo {
- t.historyIndex = -1
- t.history.Add(line)
- }
- if lineIsPasted {
- err = ErrPasteIndicator
- }
- return
- }
-
- // t.remainder is a slice at the beginning of t.inBuf
- // containing a partial key sequence
- readBuf := t.inBuf[len(t.remainder):]
- var n int
-
- t.lock.Unlock()
- n, err = t.c.Read(readBuf)
- t.lock.Lock()
-
- if err != nil {
- return
- }
-
- t.remainder = t.inBuf[:n+len(t.remainder)]
- }
-
- panic("unreachable") // for Go 1.0.
-}
-
-// SetPrompt sets the prompt to be used when reading subsequent lines.
-func (t *Terminal) SetPrompt(prompt string) {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- t.prompt = []rune(prompt)
-}
-
-func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) {
- // Move cursor to column zero at the start of the line.
- t.move(t.cursorY, 0, t.cursorX, 0)
- t.cursorX, t.cursorY = 0, 0
- t.clearLineToRight()
- for t.cursorY < numPrevLines {
- // Move down a line
- t.move(0, 1, 0, 0)
- t.cursorY++
- t.clearLineToRight()
- }
- // Move back to beginning.
- t.move(t.cursorY, 0, 0, 0)
- t.cursorX, t.cursorY = 0, 0
-
- t.queue(t.prompt)
- t.advanceCursor(visualLength(t.prompt))
- t.writeLine(t.line)
- t.moveCursorToPos(t.pos)
-}
-
-func (t *Terminal) SetSize(width, height int) error {
- t.lock.Lock()
- defer t.lock.Unlock()
-
- if width == 0 {
- width = 1
- }
-
- oldWidth := t.termWidth
- t.termWidth, t.termHeight = width, height
-
- switch {
- case width == oldWidth:
- // If the width didn't change then nothing else needs to be
- // done.
- return nil
- case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0:
- // If there is nothing on current line and no prompt printed,
- // just do nothing
- return nil
- case width < oldWidth:
- // Some terminals (e.g. xterm) will truncate lines that were
- // too long when shinking. Others, (e.g. gnome-terminal) will
- // attempt to wrap them. For the former, repainting t.maxLine
- // works great, but that behaviour goes badly wrong in the case
- // of the latter because they have doubled every full line.
-
- // We assume that we are working on a terminal that wraps lines
- // and adjust the cursor position based on every previous line
- // wrapping and turning into two. This causes the prompt on
- // xterms to move upwards, which isn't great, but it avoids a
- // huge mess with gnome-terminal.
- if t.cursorX >= t.termWidth {
- t.cursorX = t.termWidth - 1
- }
- t.cursorY *= 2
- t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2)
- case width > oldWidth:
- // If the terminal expands then our position calculations will
- // be wrong in the future because we think the cursor is
- // |t.pos| chars into the string, but there will be a gap at
- // the end of any wrapped line.
- //
- // But the position will actually be correct until we move, so
- // we can move back to the beginning and repaint everything.
- t.clearAndRepaintLinePlusNPrevious(t.maxLine)
- }
-
- _, err := t.c.Write(t.outBuf)
- t.outBuf = t.outBuf[:0]
- return err
-}
-
-type pasteIndicatorError struct{}
-
-func (pasteIndicatorError) Error() string {
- return "terminal: ErrPasteIndicator not correctly handled"
-}
-
-// ErrPasteIndicator may be returned from ReadLine as the error, in addition
-// to valid line data. It indicates that bracketed paste mode is enabled and
-// that the returned line consists only of pasted data. Programs may wish to
-// interpret pasted data more literally than typed data.
-var ErrPasteIndicator = pasteIndicatorError{}
-
-// SetBracketedPasteMode requests that the terminal bracket paste operations
-// with markers. Not all terminals support this but, if it is supported, then
-// enabling this mode will stop any autocomplete callback from running due to
-// pastes. Additionally, any lines that are completely pasted will be returned
-// from ReadLine with the error set to ErrPasteIndicator.
-func (t *Terminal) SetBracketedPasteMode(on bool) {
- if on {
- io.WriteString(t.c, "\x1b[?2004h")
- } else {
- io.WriteString(t.c, "\x1b[?2004l")
- }
-}
-
-// stRingBuffer is a ring buffer of strings.
-type stRingBuffer struct {
- // entries contains max elements.
- entries []string
- max int
- // head contains the index of the element most recently added to the ring.
- head int
- // size contains the number of elements in the ring.
- size int
-}
-
-func (s *stRingBuffer) Add(a string) {
- if s.entries == nil {
- const defaultNumEntries = 100
- s.entries = make([]string, defaultNumEntries)
- s.max = defaultNumEntries
- }
-
- s.head = (s.head + 1) % s.max
- s.entries[s.head] = a
- if s.size < s.max {
- s.size++
- }
-}
-
-// NthPreviousEntry returns the value passed to the nth previous call to Add.
-// If n is zero then the immediately prior value is returned, if one, then the
-// next most recent, and so on. If such an element doesn't exist then ok is
-// false.
-func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
- if n >= s.size {
- return "", false
- }
- index := s.head - n
- if index < 0 {
- index += s.max
- }
- return s.entries[index], true
-}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go
deleted file mode 100644
index 0763c9a9..00000000
--- a/vendor/golang.org/x/crypto/ssh/terminal/util.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd
-
-// Package terminal provides support functions for dealing with terminals, as
-// commonly found on UNIX systems.
-//
-// Putting a terminal into raw mode is the most common requirement:
-//
-// oldState, err := terminal.MakeRaw(0)
-// if err != nil {
-// panic(err)
-// }
-// defer terminal.Restore(0, oldState)
-package terminal
-
-import (
- "io"
- "syscall"
- "unsafe"
-)
-
-// State contains the state of a terminal.
-type State struct {
- termios syscall.Termios
-}
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal(fd int) bool {
- var termios syscall.Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
-}
-
-// MakeRaw put the terminal connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be
-// restored.
-func MakeRaw(fd int) (*State, error) {
- var oldState State
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
- return nil, err
- }
-
- newState := oldState.termios
- newState.Iflag &^= syscall.ISTRIP | syscall.INLCR | syscall.ICRNL | syscall.IGNCR | syscall.IXON | syscall.IXOFF
- newState.Lflag &^= syscall.ECHO | syscall.ICANON | syscall.ISIG
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
- return nil, err
- }
-
- return &oldState, nil
-}
-
-// GetState returns the current state of a terminal which may be useful to
-// restore the terminal after a signal.
-func GetState(fd int) (*State, error) {
- var oldState State
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
- return nil, err
- }
-
- return &oldState, nil
-}
-
-// Restore restores the terminal connected to the given file descriptor to a
-// previous state.
-func Restore(fd int, state *State) error {
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0)
- return err
-}
-
-// GetSize returns the dimensions of the given terminal.
-func GetSize(fd int) (width, height int, err error) {
- var dimensions [4]uint16
-
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {
- return -1, -1, err
- }
- return int(dimensions[1]), int(dimensions[0]), nil
-}
-
-// ReadPassword reads a line of input from a terminal without local echo. This
-// is commonly used for inputting passwords and other sensitive data. The slice
-// returned does not include the \n.
-func ReadPassword(fd int) ([]byte, error) {
- var oldState syscall.Termios
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 {
- return nil, err
- }
-
- newState := oldState
- newState.Lflag &^= syscall.ECHO
- newState.Lflag |= syscall.ICANON | syscall.ISIG
- newState.Iflag |= syscall.ICRNL
- if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
- return nil, err
- }
-
- defer func() {
- syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0)
- }()
-
- var buf [16]byte
- var ret []byte
- for {
- n, err := syscall.Read(fd, buf[:])
- if err != nil {
- return nil, err
- }
- if n == 0 {
- if len(ret) == 0 {
- return nil, io.EOF
- }
- break
- }
- if buf[n-1] == '\n' {
- n--
- }
- ret = append(ret, buf[:n]...)
- if n < len(buf) {
- break
- }
- }
-
- return ret, nil
-}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go
deleted file mode 100644
index 9c1ffd14..00000000
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build darwin dragonfly freebsd netbsd openbsd
-
-package terminal
-
-import "syscall"
-
-const ioctlReadTermios = syscall.TIOCGETA
-const ioctlWriteTermios = syscall.TIOCSETA
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
deleted file mode 100644
index 5883b22d..00000000
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package terminal
-
-// These constants are declared here, rather than importing
-// them from the syscall package as some syscall packages, even
-// on linux, for example gccgo, do not declare them.
-const ioctlReadTermios = 0x5401 // syscall.TCGETS
-const ioctlWriteTermios = 0x5402 // syscall.TCSETS
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
deleted file mode 100644
index 2dd6c3d9..00000000
--- a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows
-
-// Package terminal provides support functions for dealing with terminals, as
-// commonly found on UNIX systems.
-//
-// Putting a terminal into raw mode is the most common requirement:
-//
-// oldState, err := terminal.MakeRaw(0)
-// if err != nil {
-// panic(err)
-// }
-// defer terminal.Restore(0, oldState)
-package terminal
-
-import (
- "io"
- "syscall"
- "unsafe"
-)
-
-const (
- enableLineInput = 2
- enableEchoInput = 4
- enableProcessedInput = 1
- enableWindowInput = 8
- enableMouseInput = 16
- enableInsertMode = 32
- enableQuickEditMode = 64
- enableExtendedFlags = 128
- enableAutoPosition = 256
- enableProcessedOutput = 1
- enableWrapAtEolOutput = 2
-)
-
-var kernel32 = syscall.NewLazyDLL("kernel32.dll")
-
-var (
- procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
- procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
- procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
-)
-
-type (
- short int16
- word uint16
-
- coord struct {
- x short
- y short
- }
- smallRect struct {
- left short
- top short
- right short
- bottom short
- }
- consoleScreenBufferInfo struct {
- size coord
- cursorPosition coord
- attributes word
- window smallRect
- maximumWindowSize coord
- }
-)
-
-type State struct {
- mode uint32
-}
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal(fd int) bool {
- var st uint32
- r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- return r != 0 && e == 0
-}
-
-// MakeRaw put the terminal connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be
-// restored.
-func MakeRaw(fd int) (*State, error) {
- var st uint32
- _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- if e != 0 {
- return nil, error(e)
- }
- st &^= (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput)
- _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
- if e != 0 {
- return nil, error(e)
- }
- return &State{st}, nil
-}
-
-// GetState returns the current state of a terminal which may be useful to
-// restore the terminal after a signal.
-func GetState(fd int) (*State, error) {
- var st uint32
- _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- if e != 0 {
- return nil, error(e)
- }
- return &State{st}, nil
-}
-
-// Restore restores the terminal connected to the given file descriptor to a
-// previous state.
-func Restore(fd int, state *State) error {
- _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0)
- return err
-}
-
-// GetSize returns the dimensions of the given terminal.
-func GetSize(fd int) (width, height int, err error) {
- var info consoleScreenBufferInfo
- _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0)
- if e != 0 {
- return 0, 0, error(e)
- }
- return int(info.size.x), int(info.size.y), nil
-}
-
-// ReadPassword reads a line of input from a terminal without local echo. This
-// is commonly used for inputting passwords and other sensitive data. The slice
-// returned does not include the \n.
-func ReadPassword(fd int) ([]byte, error) {
- var st uint32
- _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- if e != 0 {
- return nil, error(e)
- }
- old := st
-
- st &^= (enableEchoInput)
- st |= (enableProcessedInput | enableLineInput | enableProcessedOutput)
- _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
- if e != 0 {
- return nil, error(e)
- }
-
- defer func() {
- syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0)
- }()
-
- var buf [16]byte
- var ret []byte
- for {
- n, err := syscall.Read(syscall.Handle(fd), buf[:])
- if err != nil {
- return nil, err
- }
- if n == 0 {
- if len(ret) == 0 {
- return nil, io.EOF
- }
- break
- }
- if buf[n-1] == '\n' {
- n--
- }
- if n > 0 && buf[n-1] == '\r' {
- n--
- }
- ret = append(ret, buf[:n]...)
- if n < len(buf) {
- break
- }
- }
-
- return ret, nil
-}
diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS
new file mode 100644
index 00000000..15167cd7
--- /dev/null
+++ b/vendor/golang.org/x/net/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS
new file mode 100644
index 00000000..1c4577e9
--- /dev/null
+++ b/vendor/golang.org/x/net/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go
new file mode 100644
index 00000000..b1394125
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -0,0 +1,256 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Transport code's client connection pooling.
+
+package http2
+
+import (
+ "crypto/tls"
+ "net/http"
+ "sync"
+)
+
+// ClientConnPool manages a pool of HTTP/2 client connections.
+type ClientConnPool interface {
+ GetClientConn(req *http.Request, addr string) (*ClientConn, error)
+ MarkDead(*ClientConn)
+}
+
+// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
+// implementations which can close their idle connections.
+type clientConnPoolIdleCloser interface {
+ ClientConnPool
+ closeIdleConnections()
+}
+
+var (
+ _ clientConnPoolIdleCloser = (*clientConnPool)(nil)
+ _ clientConnPoolIdleCloser = noDialClientConnPool{}
+)
+
+// TODO: use singleflight for dialing and addConnCalls?
+type clientConnPool struct {
+ t *Transport
+
+ mu sync.Mutex // TODO: maybe switch to RWMutex
+ // TODO: add support for sharing conns based on cert names
+ // (e.g. share conn for googleapis.com and appspot.com)
+ conns map[string][]*ClientConn // key is host:port
+ dialing map[string]*dialCall // currently in-flight dials
+ keys map[*ClientConn][]string
+ addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
+}
+
+func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
+ return p.getClientConn(req, addr, dialOnMiss)
+}
+
+const (
+ dialOnMiss = true
+ noDialOnMiss = false
+)
+
+func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
+ if isConnectionCloseRequest(req) && dialOnMiss {
+ // It gets its own connection.
+ const singleUse = true
+ cc, err := p.t.dialClientConn(addr, singleUse)
+ if err != nil {
+ return nil, err
+ }
+ return cc, nil
+ }
+ p.mu.Lock()
+ for _, cc := range p.conns[addr] {
+ if cc.CanTakeNewRequest() {
+ p.mu.Unlock()
+ return cc, nil
+ }
+ }
+ if !dialOnMiss {
+ p.mu.Unlock()
+ return nil, ErrNoCachedConn
+ }
+ call := p.getStartDialLocked(addr)
+ p.mu.Unlock()
+ <-call.done
+ return call.res, call.err
+}
+
+// dialCall is an in-flight Transport dial call to a host.
+type dialCall struct {
+ p *clientConnPool
+ done chan struct{} // closed when done
+ res *ClientConn // valid after done is closed
+ err error // valid after done is closed
+}
+
+// requires p.mu is held.
+func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
+ if call, ok := p.dialing[addr]; ok {
+ // A dial is already in-flight. Don't start another.
+ return call
+ }
+ call := &dialCall{p: p, done: make(chan struct{})}
+ if p.dialing == nil {
+ p.dialing = make(map[string]*dialCall)
+ }
+ p.dialing[addr] = call
+ go call.dial(addr)
+ return call
+}
+
+// run in its own goroutine.
+func (c *dialCall) dial(addr string) {
+ const singleUse = false // shared conn
+ c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
+ close(c.done)
+
+ c.p.mu.Lock()
+ delete(c.p.dialing, addr)
+ if c.err == nil {
+ c.p.addConnLocked(addr, c.res)
+ }
+ c.p.mu.Unlock()
+}
+
+// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
+// already exist. It coalesces concurrent calls with the same key.
+// This is used by the http1 Transport code when it creates a new connection. Because
+// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
+// the protocol), it can get into a situation where it has multiple TLS connections.
+// This code decides which ones live or die.
+// The return value used is whether c was used.
+// c is never closed.
+func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
+ p.mu.Lock()
+ for _, cc := range p.conns[key] {
+ if cc.CanTakeNewRequest() {
+ p.mu.Unlock()
+ return false, nil
+ }
+ }
+ call, dup := p.addConnCalls[key]
+ if !dup {
+ if p.addConnCalls == nil {
+ p.addConnCalls = make(map[string]*addConnCall)
+ }
+ call = &addConnCall{
+ p: p,
+ done: make(chan struct{}),
+ }
+ p.addConnCalls[key] = call
+ go call.run(t, key, c)
+ }
+ p.mu.Unlock()
+
+ <-call.done
+ if call.err != nil {
+ return false, call.err
+ }
+ return !dup, nil
+}
+
+type addConnCall struct {
+ p *clientConnPool
+ done chan struct{} // closed when done
+ err error
+}
+
+func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
+ cc, err := t.NewClientConn(tc)
+
+ p := c.p
+ p.mu.Lock()
+ if err != nil {
+ c.err = err
+ } else {
+ p.addConnLocked(key, cc)
+ }
+ delete(p.addConnCalls, key)
+ p.mu.Unlock()
+ close(c.done)
+}
+
+func (p *clientConnPool) addConn(key string, cc *ClientConn) {
+ p.mu.Lock()
+ p.addConnLocked(key, cc)
+ p.mu.Unlock()
+}
+
+// p.mu must be held
+func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
+ for _, v := range p.conns[key] {
+ if v == cc {
+ return
+ }
+ }
+ if p.conns == nil {
+ p.conns = make(map[string][]*ClientConn)
+ }
+ if p.keys == nil {
+ p.keys = make(map[*ClientConn][]string)
+ }
+ p.conns[key] = append(p.conns[key], cc)
+ p.keys[cc] = append(p.keys[cc], key)
+}
+
+func (p *clientConnPool) MarkDead(cc *ClientConn) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ for _, key := range p.keys[cc] {
+ vv, ok := p.conns[key]
+ if !ok {
+ continue
+ }
+ newList := filterOutClientConn(vv, cc)
+ if len(newList) > 0 {
+ p.conns[key] = newList
+ } else {
+ delete(p.conns, key)
+ }
+ }
+ delete(p.keys, cc)
+}
+
+func (p *clientConnPool) closeIdleConnections() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // TODO: don't close a cc if it was just added to the pool
+ // milliseconds ago and has never been used. There's currently
+ // a small race window with the HTTP/1 Transport's integration
+ // where it can add an idle conn just before using it, and
+ // somebody else can concurrently call CloseIdleConns and
+ // break some caller's RoundTrip.
+ for _, vv := range p.conns {
+ for _, cc := range vv {
+ cc.closeIfIdle()
+ }
+ }
+}
+
+func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
+ out := in[:0]
+ for _, v := range in {
+ if v != exclude {
+ out = append(out, v)
+ }
+ }
+ // If we filtered it out, zero out the last item to prevent
+ // the GC from seeing it.
+ if len(in) != len(out) {
+ in[len(in)-1] = nil
+ }
+ return out
+}
+
+// noDialClientConnPool is an implementation of http2.ClientConnPool
+// which never dials. We let the HTTP/1.1 client dial and use its TLS
+// connection instead.
+type noDialClientConnPool struct{ *clientConnPool }
+
+func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
+ return p.getClientConn(req, addr, noDialOnMiss)
+}
diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go
new file mode 100644
index 00000000..4f720f53
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/configure_transport.go
@@ -0,0 +1,80 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.6
+
+package http2
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/http"
+)
+
+func configureTransport(t1 *http.Transport) (*Transport, error) {
+ connPool := new(clientConnPool)
+ t2 := &Transport{
+ ConnPool: noDialClientConnPool{connPool},
+ t1: t1,
+ }
+ connPool.t = t2
+ if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
+ return nil, err
+ }
+ if t1.TLSClientConfig == nil {
+ t1.TLSClientConfig = new(tls.Config)
+ }
+ if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
+ t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
+ }
+ if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
+ t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
+ }
+ upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
+ addr := authorityAddr("https", authority)
+ if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
+ go c.Close()
+ return erringRoundTripper{err}
+ } else if !used {
+ // Turns out we don't need this c.
+ // For example, two goroutines made requests to the same host
+ // at the same time, both kicking off TCP dials. (since protocol
+ // was unknown)
+ go c.Close()
+ }
+ return t2
+ }
+ if m := t1.TLSNextProto; len(m) == 0 {
+ t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
+ "h2": upgradeFn,
+ }
+ } else {
+ m["h2"] = upgradeFn
+ }
+ return t2, nil
+}
+
+// registerHTTPSProtocol calls Transport.RegisterProtocol but
+// convering panics into errors.
+func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf("%v", e)
+ }
+ }()
+ t.RegisterProtocol("https", rt)
+ return nil
+}
+
+// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
+// if there's already has a cached connection to the host.
+type noDialH2RoundTripper struct{ t *Transport }
+
+func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ res, err := rt.t.RoundTrip(req)
+ if err == ErrNoCachedConn {
+ return nil, http.ErrSkipAltProtocol
+ }
+ return res, err
+}
diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go
index 1f8de65e..20fd7626 100644
--- a/vendor/golang.org/x/net/http2/errors.go
+++ b/vendor/golang.org/x/net/http2/errors.go
@@ -4,7 +4,10 @@
package http2
-import "fmt"
+import (
+ "errors"
+ "fmt"
+)
// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
type ErrCode uint32
@@ -61,9 +64,17 @@ func (e ConnectionError) Error() string { return fmt.Sprintf("connection error:
type StreamError struct {
StreamID uint32
Code ErrCode
+ Cause error // optional additional detail
+}
+
+func streamError(id uint32, code ErrCode) StreamError {
+ return StreamError{StreamID: id, Code: code}
}
func (e StreamError) Error() string {
+ if e.Cause != nil {
+ return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
+ }
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
}
@@ -75,3 +86,45 @@ func (e StreamError) Error() string {
type goAwayFlowError struct{}
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
+
+// connErrorReason wraps a ConnectionError with an informative error about why it occurs.
+
+// Errors of this type are only returned by the frame parser functions
+// and converted into ConnectionError(ErrCodeProtocol).
+type connError struct {
+ Code ErrCode
+ Reason string
+}
+
+func (e connError) Error() string {
+ return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
+}
+
+type pseudoHeaderError string
+
+func (e pseudoHeaderError) Error() string {
+ return fmt.Sprintf("invalid pseudo-header %q", string(e))
+}
+
+type duplicatePseudoHeaderError string
+
+func (e duplicatePseudoHeaderError) Error() string {
+ return fmt.Sprintf("duplicate pseudo-header %q", string(e))
+}
+
+type headerFieldNameError string
+
+func (e headerFieldNameError) Error() string {
+ return fmt.Sprintf("invalid header field name %q", string(e))
+}
+
+type headerFieldValueError string
+
+func (e headerFieldValueError) Error() string {
+ return fmt.Sprintf("invalid header field value %q", string(e))
+}
+
+var (
+ errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
+ errPseudoAfterRegular = errors.New("pseudo header field after regular")
+)
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index 30c333a1..c9b09bb6 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -10,7 +10,12 @@ import (
"errors"
"fmt"
"io"
+ "log"
+ "strings"
"sync"
+
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/lex/httplex"
)
const frameHeaderLen = 9
@@ -171,6 +176,12 @@ func (h FrameHeader) Header() FrameHeader { return h }
func (h FrameHeader) String() string {
var buf bytes.Buffer
buf.WriteString("[FrameHeader ")
+ h.writeDebug(&buf)
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+func (h FrameHeader) writeDebug(buf *bytes.Buffer) {
buf.WriteString(h.Type.String())
if h.Flags != 0 {
buf.WriteString(" flags=")
@@ -187,15 +198,14 @@ func (h FrameHeader) String() string {
if name != "" {
buf.WriteString(name)
} else {
- fmt.Fprintf(&buf, "0x%x", 1<<i)
+ fmt.Fprintf(buf, "0x%x", 1<<i)
}
}
}
if h.StreamID != 0 {
- fmt.Fprintf(&buf, " stream=%d", h.StreamID)
+ fmt.Fprintf(buf, " stream=%d", h.StreamID)
}
- fmt.Fprintf(&buf, " len=%d]", h.Length)
- return buf.String()
+ fmt.Fprintf(buf, " len=%d", h.Length)
}
func (h *FrameHeader) checkValid() {
@@ -255,6 +265,11 @@ type Frame interface {
type Framer struct {
r io.Reader
lastFrame Frame
+ errDetail error
+
+ // lastHeaderStream is non-zero if the last frame was an
+ // unfinished HEADERS/CONTINUATION.
+ lastHeaderStream uint32
maxReadSize uint32
headerBuf [frameHeaderLen]byte
@@ -271,18 +286,48 @@ type Framer struct {
wbuf []byte
// AllowIllegalWrites permits the Framer's Write methods to
- // write frames that do not conform to the HTTP/2 spec. This
+ // write frames that do not conform to the HTTP/2 spec. This
// permits using the Framer to test other HTTP/2
// implementations' conformance to the spec.
// If false, the Write methods will prefer to return an error
// rather than comply.
AllowIllegalWrites bool
+ // AllowIllegalReads permits the Framer's ReadFrame method
+ // to return non-compliant frames or frame orders.
+ // This is for testing and permits using the Framer to test
+ // other HTTP/2 implementations' conformance to the spec.
+ // It is not compatible with ReadMetaHeaders.
+ AllowIllegalReads bool
+
+ // ReadMetaHeaders if non-nil causes ReadFrame to merge
+ // HEADERS and CONTINUATION frames together and return
+ // MetaHeadersFrame instead.
+ ReadMetaHeaders *hpack.Decoder
+
+ // MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE.
+ // It's used only if ReadMetaHeaders is set; 0 means a sane default
+ // (currently 16MB)
+ // If the limit is hit, MetaHeadersFrame.Truncated is set true.
+ MaxHeaderListSize uint32
+
// TODO: track which type of frame & with which flags was sent
// last. Then return an error (unless AllowIllegalWrites) if
// we're in the middle of a header block and a
// non-Continuation or Continuation on a different stream is
// attempted to be written.
+
+ logReads bool
+
+ debugFramer *Framer // only use for logging written writes
+ debugFramerBuf *bytes.Buffer
+}
+
+func (fr *Framer) maxHeaderListSize() uint32 {
+ if fr.MaxHeaderListSize == 0 {
+ return 16 << 20 // sane default, per docs
+ }
+ return fr.MaxHeaderListSize
}
func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
@@ -310,6 +355,10 @@ func (f *Framer) endWrite() error {
byte(length>>16),
byte(length>>8),
byte(length))
+ if logFrameWrites {
+ f.logWrite()
+ }
+
n, err := f.w.Write(f.wbuf)
if err == nil && n != len(f.wbuf) {
err = io.ErrShortWrite
@@ -317,6 +366,24 @@ func (f *Framer) endWrite() error {
return err
}
+func (f *Framer) logWrite() {
+ if f.debugFramer == nil {
+ f.debugFramerBuf = new(bytes.Buffer)
+ f.debugFramer = NewFramer(nil, f.debugFramerBuf)
+ f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below
+ // Let us read anything, even if we accidentally wrote it
+ // in the wrong order:
+ f.debugFramer.AllowIllegalReads = true
+ }
+ f.debugFramerBuf.Write(f.wbuf)
+ fr, err := f.debugFramer.ReadFrame()
+ if err != nil {
+ log.Printf("http2: Framer %p: failed to decode just-written frame", f)
+ return
+ }
+ log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr))
+}
+
func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) }
func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }
@@ -332,8 +399,9 @@ const (
// NewFramer returns a Framer that writes frames to w and reads them from r.
func NewFramer(w io.Writer, r io.Reader) *Framer {
fr := &Framer{
- w: w,
- r: r,
+ w: w,
+ r: r,
+ logReads: logFrameReads,
}
fr.getReadBuf = func(size uint32) []byte {
if cap(fr.readBuf) >= int(size) {
@@ -357,15 +425,39 @@ func (fr *Framer) SetMaxReadFrameSize(v uint32) {
fr.maxReadSize = v
}
+// ErrorDetail returns a more detailed error of the last error
+// returned by Framer.ReadFrame. For instance, if ReadFrame
+// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail
+// will say exactly what was invalid. ErrorDetail is not guaranteed
+// to return a non-nil value and like the rest of the http2 package,
+// its return value is not protected by an API compatibility promise.
+// ErrorDetail is reset after the next call to ReadFrame.
+func (fr *Framer) ErrorDetail() error {
+ return fr.errDetail
+}
+
// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer
// sends a frame that is larger than declared with SetMaxReadFrameSize.
var ErrFrameTooLarge = errors.New("http2: frame too large")
+// terminalReadFrameError reports whether err is an unrecoverable
+// error from ReadFrame and no other frames should be read.
+func terminalReadFrameError(err error) bool {
+ if _, ok := err.(StreamError); ok {
+ return false
+ }
+ return err != nil
+}
+
// ReadFrame reads a single frame. The returned Frame is only valid
// until the next call to ReadFrame.
-// If the frame is larger than previously set with SetMaxReadFrameSize,
-// the returned error is ErrFrameTooLarge.
+//
+// If the frame is larger than previously set with SetMaxReadFrameSize, the
+// returned error is ErrFrameTooLarge. Other errors may be of type
+// ConnectionError, StreamError, or anything else from the underlying
+// reader.
func (fr *Framer) ReadFrame() (Frame, error) {
+ fr.errDetail = nil
if fr.lastFrame != nil {
fr.lastFrame.invalidate()
}
@@ -382,12 +474,71 @@ func (fr *Framer) ReadFrame() (Frame, error) {
}
f, err := typeFrameParser(fh.Type)(fh, payload)
if err != nil {
+ if ce, ok := err.(connError); ok {
+ return nil, fr.connError(ce.Code, ce.Reason)
+ }
return nil, err
}
- fr.lastFrame = f
+ if err := fr.checkFrameOrder(f); err != nil {
+ return nil, err
+ }
+ if fr.logReads {
+ log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f))
+ }
+ if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil {
+ return fr.readMetaFrame(f.(*HeadersFrame))
+ }
return f, nil
}
+// connError returns ConnectionError(code) but first
+// stashes away a public reason to the caller can optionally relay it
+// to the peer before hanging up on them. This might help others debug
+// their implementations.
+func (fr *Framer) connError(code ErrCode, reason string) error {
+ fr.errDetail = errors.New(reason)
+ return ConnectionError(code)
+}
+
+// checkFrameOrder reports an error if f is an invalid frame to return
+// next from ReadFrame. Mostly it checks whether HEADERS and
+// CONTINUATION frames are contiguous.
+func (fr *Framer) checkFrameOrder(f Frame) error {
+ last := fr.lastFrame
+ fr.lastFrame = f
+ if fr.AllowIllegalReads {
+ return nil
+ }
+
+ fh := f.Header()
+ if fr.lastHeaderStream != 0 {
+ if fh.Type != FrameContinuation {
+ return fr.connError(ErrCodeProtocol,
+ fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
+ fh.Type, fh.StreamID,
+ last.Header().Type, fr.lastHeaderStream))
+ }
+ if fh.StreamID != fr.lastHeaderStream {
+ return fr.connError(ErrCodeProtocol,
+ fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d",
+ fh.StreamID, fr.lastHeaderStream))
+ }
+ } else if fh.Type == FrameContinuation {
+ return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID))
+ }
+
+ switch fh.Type {
+ case FrameHeaders, FrameContinuation:
+ if fh.Flags.Has(FlagHeadersEndHeaders) {
+ fr.lastHeaderStream = 0
+ } else {
+ fr.lastHeaderStream = fh.StreamID
+ }
+ }
+
+ return nil
+}
+
// A DataFrame conveys arbitrary, variable-length sequences of octets
// associated with a stream.
// See http://http2.github.io/http2-spec/#rfc.section.6.1
@@ -416,7 +567,7 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
// field is 0x0, the recipient MUST respond with a
// connection error (Section 5.4.1) of type
// PROTOCOL_ERROR.
- return nil, ConnectionError(ErrCodeProtocol)
+ return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
}
f := &DataFrame{
FrameHeader: fh,
@@ -434,13 +585,21 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
// length of the frame payload, the recipient MUST
// treat this as a connection error.
// Filed: https://github.com/http2/http2-spec/issues/610
- return nil, ConnectionError(ErrCodeProtocol)
+ return nil, connError{ErrCodeProtocol, "pad size larger than data payload"}
}
f.data = payload[:len(payload)-int(padSize)]
return f, nil
}
-var errStreamID = errors.New("invalid streamid")
+var (
+ errStreamID = errors.New("invalid stream ID")
+ errDepStreamID = errors.New("invalid dependent stream ID")
+ errPadLength = errors.New("pad length too large")
+)
+
+func validStreamIDOrZero(streamID uint32) bool {
+ return streamID&(1<<31) == 0
+}
func validStreamID(streamID uint32) bool {
return streamID != 0 && streamID&(1<<31) == 0
@@ -449,18 +608,40 @@ func validStreamID(streamID uint32) bool {
// WriteData writes a DATA frame.
//
// It will perform exactly one Write to the underlying Writer.
-// It is the caller's responsibility to not call other Write methods concurrently.
+// It is the caller's responsibility not to violate the maximum frame size
+// and to not call other Write methods concurrently.
func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
- // TODO: ignoring padding for now. will add when somebody cares.
+ return f.WriteDataPadded(streamID, endStream, data, nil)
+}
+
+// WriteData writes a DATA frame with optional padding.
+//
+// If pad is nil, the padding bit is not sent.
+// The length of pad must not exceed 255 bytes.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility not to violate the maximum frame size
+// and to not call other Write methods concurrently.
+func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
if !validStreamID(streamID) && !f.AllowIllegalWrites {
return errStreamID
}
+ if len(pad) > 255 {
+ return errPadLength
+ }
var flags Flags
if endStream {
flags |= FlagDataEndStream
}
+ if pad != nil {
+ flags |= FlagDataPadded
+ }
f.startWrite(FrameData, flags, streamID)
+ if pad != nil {
+ f.wbuf = append(f.wbuf, byte(len(pad)))
+ }
f.wbuf = append(f.wbuf, data...)
+ f.wbuf = append(f.wbuf, pad...)
return f.endWrite()
}
@@ -574,6 +755,8 @@ type PingFrame struct {
Data [8]byte
}
+func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
+
func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) {
if len(payload) != 8 {
return nil, ConnectionError(ErrCodeFrameSize)
@@ -680,7 +863,7 @@ func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
if fh.StreamID == 0 {
return nil, ConnectionError(ErrCodeProtocol)
}
- return nil, StreamError{fh.StreamID, ErrCodeProtocol}
+ return nil, streamError(fh.StreamID, ErrCodeProtocol)
}
return &WindowUpdateFrame{
FrameHeader: fh,
@@ -739,7 +922,7 @@ func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
// is received whose stream identifier field is 0x0, the recipient MUST
// respond with a connection error (Section 5.4.1) of type
// PROTOCOL_ERROR.
- return nil, ConnectionError(ErrCodeProtocol)
+ return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"}
}
var padLength uint8
if fh.Flags.Has(FlagHeadersPadded) {
@@ -761,7 +944,7 @@ func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
}
}
if len(p)-int(padLength) <= 0 {
- return nil, StreamError{fh.StreamID, ErrCodeProtocol}
+ return nil, streamError(fh.StreamID, ErrCodeProtocol)
}
hf.headerFragBuf = p[:len(p)-int(padLength)]
return hf, nil
@@ -825,8 +1008,8 @@ func (f *Framer) WriteHeaders(p HeadersFrameParam) error {
}
if !p.Priority.IsZero() {
v := p.Priority.StreamDep
- if !validStreamID(v) && !f.AllowIllegalWrites {
- return errors.New("invalid dependent stream id")
+ if !validStreamIDOrZero(v) && !f.AllowIllegalWrites {
+ return errDepStreamID
}
if p.Priority.Exclusive {
v |= 1 << 31
@@ -869,10 +1052,10 @@ func (p PriorityParam) IsZero() bool {
func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) {
if fh.StreamID == 0 {
- return nil, ConnectionError(ErrCodeProtocol)
+ return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
}
if len(payload) != 5 {
- return nil, ConnectionError(ErrCodeFrameSize)
+ return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
}
v := binary.BigEndian.Uint32(payload[:4])
streamID := v & 0x7fffffff // mask off high bit
@@ -894,6 +1077,9 @@ func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error {
if !validStreamID(streamID) && !f.AllowIllegalWrites {
return errStreamID
}
+ if !validStreamIDOrZero(p.StreamDep) {
+ return errDepStreamID
+ }
f.startWrite(FramePriority, 0, streamID)
v := p.StreamDep
if p.Exclusive {
@@ -942,13 +1128,12 @@ type ContinuationFrame struct {
}
func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
+ }
return &ContinuationFrame{fh, p}, nil
}
-func (f *ContinuationFrame) StreamEnded() bool {
- return f.FrameHeader.Flags.Has(FlagDataEndStream)
-}
-
func (f *ContinuationFrame) HeaderBlockFragment() []byte {
f.checkValid()
return f.headerFragBuf
@@ -1110,3 +1295,245 @@ type streamEnder interface {
type headersEnder interface {
HeadersEnded() bool
}
+
+type headersOrContinuation interface {
+ headersEnder
+ HeaderBlockFragment() []byte
+}
+
+// A MetaHeadersFrame is the representation of one HEADERS frame and
+// zero or more contiguous CONTINUATION frames and the decoding of
+// their HPACK-encoded contents.
+//
+// This type of frame does not appear on the wire and is only returned
+// by the Framer when Framer.ReadMetaHeaders is set.
+type MetaHeadersFrame struct {
+ *HeadersFrame
+
+ // Fields are the fields contained in the HEADERS and
+ // CONTINUATION frames. The underlying slice is owned by the
+ // Framer and must not be retained after the next call to
+ // ReadFrame.
+ //
+ // Fields are guaranteed to be in the correct http2 order and
+ // not have unknown pseudo header fields or invalid header
+ // field names or values. Required pseudo header fields may be
+ // missing, however. Use the MetaHeadersFrame.Pseudo accessor
+ // method access pseudo headers.
+ Fields []hpack.HeaderField
+
+ // Truncated is whether the max header list size limit was hit
+ // and Fields is incomplete. The hpack decoder state is still
+ // valid, however.
+ Truncated bool
+}
+
+// PseudoValue returns the given pseudo header field's value.
+// The provided pseudo field should not contain the leading colon.
+func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string {
+ for _, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return ""
+ }
+ if hf.Name[1:] == pseudo {
+ return hf.Value
+ }
+ }
+ return ""
+}
+
+// RegularFields returns the regular (non-pseudo) header fields of mh.
+// The caller does not own the returned slice.
+func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField {
+ for i, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return mh.Fields[i:]
+ }
+ }
+ return nil
+}
+
+// PseudoFields returns the pseudo header fields of mh.
+// The caller does not own the returned slice.
+func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField {
+ for i, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return mh.Fields[:i]
+ }
+ }
+ return mh.Fields
+}
+
+func (mh *MetaHeadersFrame) checkPseudos() error {
+ var isRequest, isResponse bool
+ pf := mh.PseudoFields()
+ for i, hf := range pf {
+ switch hf.Name {
+ case ":method", ":path", ":scheme", ":authority":
+ isRequest = true
+ case ":status":
+ isResponse = true
+ default:
+ return pseudoHeaderError(hf.Name)
+ }
+ // Check for duplicates.
+ // This would be a bad algorithm, but N is 4.
+ // And this doesn't allocate.
+ for _, hf2 := range pf[:i] {
+ if hf.Name == hf2.Name {
+ return duplicatePseudoHeaderError(hf.Name)
+ }
+ }
+ }
+ if isRequest && isResponse {
+ return errMixPseudoHeaderTypes
+ }
+ return nil
+}
+
+func (fr *Framer) maxHeaderStringLen() int {
+ v := fr.maxHeaderListSize()
+ if uint32(int(v)) == v {
+ return int(v)
+ }
+ // They had a crazy big number for MaxHeaderBytes anyway,
+ // so give them unlimited header lengths:
+ return 0
+}
+
+// readMetaFrame returns 0 or more CONTINUATION frames from fr and
+// merge them into into the provided hf and returns a MetaHeadersFrame
+// with the decoded hpack values.
+func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
+ if fr.AllowIllegalReads {
+ return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
+ }
+ mh := &MetaHeadersFrame{
+ HeadersFrame: hf,
+ }
+ var remainSize = fr.maxHeaderListSize()
+ var sawRegular bool
+
+ var invalid error // pseudo header field errors
+ hdec := fr.ReadMetaHeaders
+ hdec.SetEmitEnabled(true)
+ hdec.SetMaxStringLength(fr.maxHeaderStringLen())
+ hdec.SetEmitFunc(func(hf hpack.HeaderField) {
+ if VerboseLogs && logFrameReads {
+ log.Printf("http2: decoded hpack field %+v", hf)
+ }
+ if !httplex.ValidHeaderFieldValue(hf.Value) {
+ invalid = headerFieldValueError(hf.Value)
+ }
+ isPseudo := strings.HasPrefix(hf.Name, ":")
+ if isPseudo {
+ if sawRegular {
+ invalid = errPseudoAfterRegular
+ }
+ } else {
+ sawRegular = true
+ if !validWireHeaderFieldName(hf.Name) {
+ invalid = headerFieldNameError(hf.Name)
+ }
+ }
+
+ if invalid != nil {
+ hdec.SetEmitEnabled(false)
+ return
+ }
+
+ size := hf.Size()
+ if size > remainSize {
+ hdec.SetEmitEnabled(false)
+ mh.Truncated = true
+ return
+ }
+ remainSize -= size
+
+ mh.Fields = append(mh.Fields, hf)
+ })
+ // Lose reference to MetaHeadersFrame:
+ defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {})
+
+ var hc headersOrContinuation = hf
+ for {
+ frag := hc.HeaderBlockFragment()
+ if _, err := hdec.Write(frag); err != nil {
+ return nil, ConnectionError(ErrCodeCompression)
+ }
+
+ if hc.HeadersEnded() {
+ break
+ }
+ if f, err := fr.ReadFrame(); err != nil {
+ return nil, err
+ } else {
+ hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder
+ }
+ }
+
+ mh.HeadersFrame.headerFragBuf = nil
+ mh.HeadersFrame.invalidate()
+
+ if err := hdec.Close(); err != nil {
+ return nil, ConnectionError(ErrCodeCompression)
+ }
+ if invalid != nil {
+ fr.errDetail = invalid
+ if VerboseLogs {
+ log.Printf("http2: invalid header: %v", invalid)
+ }
+ return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid}
+ }
+ if err := mh.checkPseudos(); err != nil {
+ fr.errDetail = err
+ if VerboseLogs {
+ log.Printf("http2: invalid pseudo headers: %v", err)
+ }
+ return nil, StreamError{mh.StreamID, ErrCodeProtocol, err}
+ }
+ return mh, nil
+}
+
+func summarizeFrame(f Frame) string {
+ var buf bytes.Buffer
+ f.Header().writeDebug(&buf)
+ switch f := f.(type) {
+ case *SettingsFrame:
+ n := 0
+ f.ForeachSetting(func(s Setting) error {
+ n++
+ if n == 1 {
+ buf.WriteString(", settings:")
+ }
+ fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val)
+ return nil
+ })
+ if n > 0 {
+ buf.Truncate(buf.Len() - 1) // remove trailing comma
+ }
+ case *DataFrame:
+ data := f.Data()
+ const max = 256
+ if len(data) > max {
+ data = data[:max]
+ }
+ fmt.Fprintf(&buf, " data=%q", data)
+ if len(f.Data()) > max {
+ fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max)
+ }
+ case *WindowUpdateFrame:
+ if f.StreamID == 0 {
+ buf.WriteString(" (conn)")
+ }
+ fmt.Fprintf(&buf, " incr=%v", f.Increment)
+ case *PingFrame:
+ fmt.Fprintf(&buf, " ping=%q", f.Data[:])
+ case *GoAwayFrame:
+ fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q",
+ f.LastStreamID, f.ErrCode, f.debugData)
+ case *RSTStreamFrame:
+ fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode)
+ }
+ return buf.String()
+}
diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go
new file mode 100644
index 00000000..2b72855f
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go16.go
@@ -0,0 +1,43 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.6
+
+package http2
+
+import (
+ "crypto/tls"
+ "net/http"
+ "time"
+)
+
+func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
+ return t1.ExpectContinueTimeout
+}
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+func isBadCipher(cipher uint16) bool {
+ switch cipher {
+ case tls.TLS_RSA_WITH_RC4_128_SHA,
+ tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
+ tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
+ tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+ // Reject cipher suites from Appendix A.
+ // "This list includes those cipher suites that do not
+ // offer an ephemeral key exchange and those that are
+ // based on the TLS null, stream or block cipher type"
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go
new file mode 100644
index 00000000..730319dd
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go17.go
@@ -0,0 +1,94 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package http2
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "net/http/httptrace"
+ "time"
+)
+
+type contextContext interface {
+ context.Context
+}
+
+func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
+ ctx, cancel = context.WithCancel(context.Background())
+ ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
+ if hs := opts.baseConfig(); hs != nil {
+ ctx = context.WithValue(ctx, http.ServerContextKey, hs)
+ }
+ return
+}
+
+func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
+ return context.WithCancel(ctx)
+}
+
+func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
+ return req.WithContext(ctx)
+}
+
+type clientTrace httptrace.ClientTrace
+
+func reqContext(r *http.Request) context.Context { return r.Context() }
+
+func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
+
+func traceGotConn(req *http.Request, cc *ClientConn) {
+ trace := httptrace.ContextClientTrace(req.Context())
+ if trace == nil || trace.GotConn == nil {
+ return
+ }
+ ci := httptrace.GotConnInfo{Conn: cc.tconn}
+ cc.mu.Lock()
+ ci.Reused = cc.nextStreamID > 1
+ ci.WasIdle = len(cc.streams) == 0 && ci.Reused
+ if ci.WasIdle && !cc.lastActive.IsZero() {
+ ci.IdleTime = time.Now().Sub(cc.lastActive)
+ }
+ cc.mu.Unlock()
+
+ trace.GotConn(ci)
+}
+
+func traceWroteHeaders(trace *clientTrace) {
+ if trace != nil && trace.WroteHeaders != nil {
+ trace.WroteHeaders()
+ }
+}
+
+func traceGot100Continue(trace *clientTrace) {
+ if trace != nil && trace.Got100Continue != nil {
+ trace.Got100Continue()
+ }
+}
+
+func traceWait100Continue(trace *clientTrace) {
+ if trace != nil && trace.Wait100Continue != nil {
+ trace.Wait100Continue()
+ }
+}
+
+func traceWroteRequest(trace *clientTrace, err error) {
+ if trace != nil && trace.WroteRequest != nil {
+ trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
+ }
+}
+
+func traceFirstResponseByte(trace *clientTrace) {
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ trace.GotFirstResponseByte()
+ }
+}
+
+func requestTrace(req *http.Request) *clientTrace {
+ trace := httptrace.ContextClientTrace(req.Context())
+ return (*clientTrace)(trace)
+}
diff --git a/vendor/golang.org/x/net/http2/h2demo/.gitignore b/vendor/golang.org/x/net/http2/h2demo/.gitignore
deleted file mode 100644
index 0de86ddb..00000000
--- a/vendor/golang.org/x/net/http2/h2demo/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-h2demo
-h2demo.linux
-client-id.dat
-client-secret.dat
-token.dat
diff --git a/vendor/golang.org/x/net/http2/h2demo/Makefile b/vendor/golang.org/x/net/http2/h2demo/Makefile
deleted file mode 100644
index 3a77cf07..00000000
--- a/vendor/golang.org/x/net/http2/h2demo/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-h2demo.linux: h2demo.go
- GOOS=linux go build --tags=h2demo -o h2demo.linux .
-
-upload: h2demo.linux
- cat h2demo.linux | go run launch.go --write_object=http2-demo-server-tls/h2demo --write_object_is_public
diff --git a/vendor/golang.org/x/net/http2/h2demo/README b/vendor/golang.org/x/net/http2/h2demo/README
deleted file mode 100644
index 212a96f3..00000000
--- a/vendor/golang.org/x/net/http2/h2demo/README
+++ /dev/null
@@ -1,16 +0,0 @@
-
-Client:
- -- Firefox nightly with about:config network.http.spdy.enabled.http2draft set true
- -- Chrome: go to chrome://flags/#enable-spdy4, save and restart (button at bottom)
-
-Make CA:
-$ openssl genrsa -out rootCA.key 2048
-$ openssl req -x509 -new -nodes -key rootCA.key -days 1024 -out rootCA.pem
-... install that to Firefox
-
-Make cert:
-$ openssl genrsa -out server.key 2048
-$ openssl req -new -key server.key -out server.csr
-$ openssl x509 -req -in server.csr -CA rootCA.pem -CAkey rootCA.key -CAcreateserial -out server.crt -days 500
-
-
diff --git a/vendor/golang.org/x/net/http2/h2demo/h2demo.go b/vendor/golang.org/x/net/http2/h2demo/h2demo.go
deleted file mode 100644
index 84b89e2b..00000000
--- a/vendor/golang.org/x/net/http2/h2demo/h2demo.go
+++ /dev/null
@@ -1,436 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build h2demo
-
-package main
-
-import (
- "bytes"
- "crypto/tls"
- "flag"
- "fmt"
- "hash/crc32"
- "image"
- "image/jpeg"
- "io"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "path"
- "regexp"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "camlistore.org/pkg/googlestorage"
- "camlistore.org/pkg/singleflight"
- "golang.org/x/net/http2"
-)
-
-var (
- prod = flag.Bool("prod", false, "Whether to configure itself to be the production http2.golang.org server.")
-
- httpsAddr = flag.String("https_addr", "localhost:4430", "TLS address to listen on ('host:port' or ':port'). Required.")
- httpAddr = flag.String("http_addr", "", "Plain HTTP address to listen on ('host:port', or ':port'). Empty means no HTTP.")
-
- hostHTTP = flag.String("http_host", "", "Optional host or host:port to use for http:// links to this service. By default, this is implied from -http_addr.")
- hostHTTPS = flag.String("https_host", "", "Optional host or host:port to use for http:// links to this service. By default, this is implied from -https_addr.")
-)
-
-func homeOldHTTP(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, `<html>
-<body>
-<h1>Go + HTTP/2</h1>
-<p>Welcome to <a href="https://golang.org/">the Go language</a>'s <a href="https://http2.github.io/">HTTP/2</a> demo & interop server.</p>
-<p>Unfortunately, you're <b>not</b> using HTTP/2 right now. To do so:</p>
-<ul>
- <li>Use Firefox Nightly or go to <b>about:config</b> and enable "network.http.spdy.enabled.http2draft"</li>
- <li>Use Google Chrome Canary and/or go to <b>chrome://flags/#enable-spdy4</b> to <i>Enable SPDY/4</i> (Chrome's name for HTTP/2)</li>
-</ul>
-<p>See code & instructions for connecting at <a href="https://github.com/golang/net/tree/master/http2">https://github.com/golang/net/tree/master/http2</a>.</p>
-
-</body></html>`)
-}
-
-func home(w http.ResponseWriter, r *http.Request) {
- if r.URL.Path != "/" {
- http.NotFound(w, r)
- return
- }
- io.WriteString(w, `<html>
-<body>
-<h1>Go + HTTP/2</h1>
-
-<p>Welcome to <a href="https://golang.org/">the Go language</a>'s <a
-href="https://http2.github.io/">HTTP/2</a> demo & interop server.</p>
-
-<p>Congratulations, <b>you're using HTTP/2 right now</b>.</p>
-
-<p>This server exists for others in the HTTP/2 community to test their HTTP/2 client implementations and point out flaws in our server.</p>
-
-<p> The code is currently at <a
-href="https://golang.org/x/net/http2">golang.org/x/net/http2</a>
-but will move to the Go standard library at some point in the future
-(enabled by default, without users needing to change their code).</p>
-
-<p>Contact info: <i>bradfitz@golang.org</i>, or <a
-href="https://golang.org/issues">file a bug</a>.</p>
-
-<h2>Handlers for testing</h2>
-<ul>
- <li>GET <a href="/reqinfo">/reqinfo</a> to dump the request + headers received</li>
- <li>GET <a href="/clockstream">/clockstream</a> streams the current time every second</li>
- <li>GET <a href="/gophertiles">/gophertiles</a> to see a page with a bunch of images</li>
- <li>GET <a href="/file/gopher.png">/file/gopher.png</a> for a small file (does If-Modified-Since, Content-Range, etc)</li>
- <li>GET <a href="/file/go.src.tar.gz">/file/go.src.tar.gz</a> for a larger file (~10 MB)</li>
- <li>GET <a href="/redirect">/redirect</a> to redirect back to / (this page)</li>
- <li>GET <a href="/goroutines">/goroutines</a> to see all active goroutines in this server</li>
- <li>PUT something to <a href="/crc32">/crc32</a> to get a count of number of bytes and its CRC-32</li>
-</ul>
-
-</body></html>`)
-}
-
-func reqInfoHandler(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "text/plain")
- fmt.Fprintf(w, "Method: %s\n", r.Method)
- fmt.Fprintf(w, "Protocol: %s\n", r.Proto)
- fmt.Fprintf(w, "Host: %s\n", r.Host)
- fmt.Fprintf(w, "RemoteAddr: %s\n", r.RemoteAddr)
- fmt.Fprintf(w, "RequestURI: %q\n", r.RequestURI)
- fmt.Fprintf(w, "URL: %#v\n", r.URL)
- fmt.Fprintf(w, "Body.ContentLength: %d (-1 means unknown)\n", r.ContentLength)
- fmt.Fprintf(w, "Close: %v (relevant for HTTP/1 only)\n", r.Close)
- fmt.Fprintf(w, "TLS: %#v\n", r.TLS)
- fmt.Fprintf(w, "\nHeaders:\n")
- r.Header.Write(w)
-}
-
-func crcHandler(w http.ResponseWriter, r *http.Request) {
- if r.Method != "PUT" {
- http.Error(w, "PUT required.", 400)
- return
- }
- crc := crc32.NewIEEE()
- n, err := io.Copy(crc, r.Body)
- if err == nil {
- w.Header().Set("Content-Type", "text/plain")
- fmt.Fprintf(w, "bytes=%d, CRC32=%x", n, crc.Sum(nil))
- }
-}
-
-var (
- fsGrp singleflight.Group
- fsMu sync.Mutex // guards fsCache
- fsCache = map[string]http.Handler{}
-)
-
-// fileServer returns a file-serving handler that proxies URL.
-// It lazily fetches URL on the first access and caches its contents forever.
-func fileServer(url string) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- hi, err := fsGrp.Do(url, func() (interface{}, error) {
- fsMu.Lock()
- if h, ok := fsCache[url]; ok {
- fsMu.Unlock()
- return h, nil
- }
- fsMu.Unlock()
-
- res, err := http.Get(url)
- if err != nil {
- return nil, err
- }
- defer res.Body.Close()
- slurp, err := ioutil.ReadAll(res.Body)
- if err != nil {
- return nil, err
- }
-
- modTime := time.Now()
- var h http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- http.ServeContent(w, r, path.Base(url), modTime, bytes.NewReader(slurp))
- })
- fsMu.Lock()
- fsCache[url] = h
- fsMu.Unlock()
- return h, nil
- })
- if err != nil {
- http.Error(w, err.Error(), 500)
- return
- }
- hi.(http.Handler).ServeHTTP(w, r)
- })
-}
-
-func clockStreamHandler(w http.ResponseWriter, r *http.Request) {
- clientGone := w.(http.CloseNotifier).CloseNotify()
- w.Header().Set("Content-Type", "text/plain")
- ticker := time.NewTicker(1 * time.Second)
- defer ticker.Stop()
- fmt.Fprintf(w, "# ~1KB of junk to force browsers to start rendering immediately: \n")
- io.WriteString(w, strings.Repeat("# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n", 13))
-
- for {
- fmt.Fprintf(w, "%v\n", time.Now())
- w.(http.Flusher).Flush()
- select {
- case <-ticker.C:
- case <-clientGone:
- log.Printf("Client %v disconnected from the clock", r.RemoteAddr)
- return
- }
- }
-}
-
-func registerHandlers() {
- tiles := newGopherTilesHandler()
-
- mux2 := http.NewServeMux()
- http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
- if r.TLS == nil {
- if r.URL.Path == "/gophertiles" {
- tiles.ServeHTTP(w, r)
- return
- }
- http.Redirect(w, r, "https://"+httpsHost()+"/", http.StatusFound)
- return
- }
- if r.ProtoMajor == 1 {
- if r.URL.Path == "/reqinfo" {
- reqInfoHandler(w, r)
- return
- }
- homeOldHTTP(w, r)
- return
- }
- mux2.ServeHTTP(w, r)
- })
- mux2.HandleFunc("/", home)
- mux2.Handle("/file/gopher.png", fileServer("https://golang.org/doc/gopher/frontpage.png"))
- mux2.Handle("/file/go.src.tar.gz", fileServer("https://storage.googleapis.com/golang/go1.4.1.src.tar.gz"))
- mux2.HandleFunc("/reqinfo", reqInfoHandler)
- mux2.HandleFunc("/crc32", crcHandler)
- mux2.HandleFunc("/clockstream", clockStreamHandler)
- mux2.Handle("/gophertiles", tiles)
- mux2.HandleFunc("/redirect", func(w http.ResponseWriter, r *http.Request) {
- http.Redirect(w, r, "/", http.StatusFound)
- })
- stripHomedir := regexp.MustCompile(`/(Users|home)/\w+`)
- mux2.HandleFunc("/goroutines", func(w http.ResponseWriter, r *http.Request) {
- w.Header().Set("Content-Type", "text/plain; charset=utf-8")
- buf := make([]byte, 2<<20)
- w.Write(stripHomedir.ReplaceAll(buf[:runtime.Stack(buf, true)], nil))
- })
-}
-
-func newGopherTilesHandler() http.Handler {
- const gopherURL = "https://blog.golang.org/go-programming-language-turns-two_gophers.jpg"
- res, err := http.Get(gopherURL)
- if err != nil {
- log.Fatal(err)
- }
- if res.StatusCode != 200 {
- log.Fatalf("Error fetching %s: %v", gopherURL, res.Status)
- }
- slurp, err := ioutil.ReadAll(res.Body)
- res.Body.Close()
- if err != nil {
- log.Fatal(err)
- }
- im, err := jpeg.Decode(bytes.NewReader(slurp))
- if err != nil {
- if len(slurp) > 1024 {
- slurp = slurp[:1024]
- }
- log.Fatalf("Failed to decode gopher image: %v (got %q)", err, slurp)
- }
-
- type subImager interface {
- SubImage(image.Rectangle) image.Image
- }
- const tileSize = 32
- xt := im.Bounds().Max.X / tileSize
- yt := im.Bounds().Max.Y / tileSize
- var tile [][][]byte // y -> x -> jpeg bytes
- for yi := 0; yi < yt; yi++ {
- var row [][]byte
- for xi := 0; xi < xt; xi++ {
- si := im.(subImager).SubImage(image.Rectangle{
- Min: image.Point{xi * tileSize, yi * tileSize},
- Max: image.Point{(xi + 1) * tileSize, (yi + 1) * tileSize},
- })
- buf := new(bytes.Buffer)
- if err := jpeg.Encode(buf, si, &jpeg.Options{Quality: 90}); err != nil {
- log.Fatal(err)
- }
- row = append(row, buf.Bytes())
- }
- tile = append(tile, row)
- }
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- ms, _ := strconv.Atoi(r.FormValue("latency"))
- const nanosPerMilli = 1e6
- if r.FormValue("x") != "" {
- x, _ := strconv.Atoi(r.FormValue("x"))
- y, _ := strconv.Atoi(r.FormValue("y"))
- if ms <= 1000 {
- time.Sleep(time.Duration(ms) * nanosPerMilli)
- }
- if x >= 0 && x < xt && y >= 0 && y < yt {
- http.ServeContent(w, r, "", time.Time{}, bytes.NewReader(tile[y][x]))
- return
- }
- }
- io.WriteString(w, "<html><body onload='showtimes()'>")
- fmt.Fprintf(w, "A grid of %d tiled images is below. Compare:<p>", xt*yt)
- for _, ms := range []int{0, 30, 200, 1000} {
- d := time.Duration(ms) * nanosPerMilli
- fmt.Fprintf(w, "[<a href='https://%s/gophertiles?latency=%d'>HTTP/2, %v latency</a>] [<a href='http://%s/gophertiles?latency=%d'>HTTP/1, %v latency</a>]<br>\n",
- httpsHost(), ms, d,
- httpHost(), ms, d,
- )
- }
- io.WriteString(w, "<p>\n")
- cacheBust := time.Now().UnixNano()
- for y := 0; y < yt; y++ {
- for x := 0; x < xt; x++ {
- fmt.Fprintf(w, "<img width=%d height=%d src='/gophertiles?x=%d&y=%d&cachebust=%d&latency=%d'>",
- tileSize, tileSize, x, y, cacheBust, ms)
- }
- io.WriteString(w, "<br/>\n")
- }
- io.WriteString(w, `<p><div id='loadtimes'></div></p>
-<script>
-function showtimes() {
- var times = 'Times from connection start:<br>'
- times += 'DOM loaded: ' + (window.performance.timing.domContentLoadedEventEnd - window.performance.timing.connectStart) + 'ms<br>'
- times += 'DOM complete (images loaded): ' + (window.performance.timing.domComplete - window.performance.timing.connectStart) + 'ms<br>'
- document.getElementById('loadtimes').innerHTML = times
-}
-</script>
-<hr><a href='/'>&lt;&lt Back to Go HTTP/2 demo server</a></body></html>`)
- })
-}
-
-func httpsHost() string {
- if *hostHTTPS != "" {
- return *hostHTTPS
- }
- if v := *httpsAddr; strings.HasPrefix(v, ":") {
- return "localhost" + v
- } else {
- return v
- }
-}
-
-func httpHost() string {
- if *hostHTTP != "" {
- return *hostHTTP
- }
- if v := *httpAddr; strings.HasPrefix(v, ":") {
- return "localhost" + v
- } else {
- return v
- }
-}
-
-func serveProdTLS() error {
- c, err := googlestorage.NewServiceClient()
- if err != nil {
- return err
- }
- slurp := func(key string) ([]byte, error) {
- const bucket = "http2-demo-server-tls"
- rc, _, err := c.GetObject(&googlestorage.Object{
- Bucket: bucket,
- Key: key,
- })
- if err != nil {
- return nil, fmt.Errorf("Error fetching GCS object %q in bucket %q: %v", key, bucket, err)
- }
- defer rc.Close()
- return ioutil.ReadAll(rc)
- }
- certPem, err := slurp("http2.golang.org.chained.pem")
- if err != nil {
- return err
- }
- keyPem, err := slurp("http2.golang.org.key")
- if err != nil {
- return err
- }
- cert, err := tls.X509KeyPair(certPem, keyPem)
- if err != nil {
- return err
- }
- srv := &http.Server{
- TLSConfig: &tls.Config{
- Certificates: []tls.Certificate{cert},
- },
- }
- http2.ConfigureServer(srv, &http2.Server{})
- ln, err := net.Listen("tcp", ":443")
- if err != nil {
- return err
- }
- return srv.Serve(tls.NewListener(tcpKeepAliveListener{ln.(*net.TCPListener)}, srv.TLSConfig))
-}
-
-type tcpKeepAliveListener struct {
- *net.TCPListener
-}
-
-func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
- tc, err := ln.AcceptTCP()
- if err != nil {
- return
- }
- tc.SetKeepAlive(true)
- tc.SetKeepAlivePeriod(3 * time.Minute)
- return tc, nil
-}
-
-func serveProd() error {
- errc := make(chan error, 2)
- go func() { errc <- http.ListenAndServe(":80", nil) }()
- go func() { errc <- serveProdTLS() }()
- return <-errc
-}
-
-func main() {
- var srv http.Server
- flag.BoolVar(&http2.VerboseLogs, "verbose", false, "Verbose HTTP/2 debugging.")
- flag.Parse()
- srv.Addr = *httpsAddr
-
- registerHandlers()
-
- if *prod {
- *hostHTTP = "http2.golang.org"
- *hostHTTPS = "http2.golang.org"
- log.Fatal(serveProd())
- }
-
- url := "https://" + httpsHost() + "/"
- log.Printf("Listening on " + url)
- http2.ConfigureServer(&srv, &http2.Server{})
-
- if *httpAddr != "" {
- go func() {
- log.Printf("Listening on http://" + httpHost() + "/ (for unencrypted HTTP/1)")
- log.Fatal(http.ListenAndServe(*httpAddr, nil))
- }()
- }
-
- go func() {
- log.Fatal(srv.ListenAndServeTLS("server.crt", "server.key"))
- }()
- select {}
-}
diff --git a/vendor/golang.org/x/net/http2/h2demo/launch.go b/vendor/golang.org/x/net/http2/h2demo/launch.go
deleted file mode 100644
index 74666154..00000000
--- a/vendor/golang.org/x/net/http2/h2demo/launch.go
+++ /dev/null
@@ -1,302 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-import (
- "bufio"
- "bytes"
- "encoding/json"
- "flag"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net/http"
- "os"
- "strings"
- "time"
-
- "golang.org/x/oauth2"
- "golang.org/x/oauth2/google"
- compute "google.golang.org/api/compute/v1"
-)
-
-var (
- proj = flag.String("project", "symbolic-datum-552", "name of Project")
- zone = flag.String("zone", "us-central1-a", "GCE zone")
- mach = flag.String("machinetype", "n1-standard-1", "Machine type")
- instName = flag.String("instance_name", "http2-demo", "Name of VM instance.")
- sshPub = flag.String("ssh_public_key", "", "ssh public key file to authorize. Can modify later in Google's web UI anyway.")
- staticIP = flag.String("static_ip", "130.211.116.44", "Static IP to use. If empty, automatic.")
-
- writeObject = flag.String("write_object", "", "If non-empty, a VM isn't created and the flag value is Google Cloud Storage bucket/object to write. The contents from stdin.")
- publicObject = flag.Bool("write_object_is_public", false, "Whether the object created by --write_object should be public.")
-)
-
-func readFile(v string) string {
- slurp, err := ioutil.ReadFile(v)
- if err != nil {
- log.Fatalf("Error reading %s: %v", v, err)
- }
- return strings.TrimSpace(string(slurp))
-}
-
-var config = &oauth2.Config{
- // The client-id and secret should be for an "Installed Application" when using
- // the CLI. Later we'll use a web application with a callback.
- ClientID: readFile("client-id.dat"),
- ClientSecret: readFile("client-secret.dat"),
- Endpoint: google.Endpoint,
- Scopes: []string{
- compute.DevstorageFull_controlScope,
- compute.ComputeScope,
- "https://www.googleapis.com/auth/sqlservice",
- "https://www.googleapis.com/auth/sqlservice.admin",
- },
- RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
-}
-
-const baseConfig = `#cloud-config
-coreos:
- units:
- - name: h2demo.service
- command: start
- content: |
- [Unit]
- Description=HTTP2 Demo
-
- [Service]
- ExecStartPre=/bin/bash -c 'mkdir -p /opt/bin && curl -s -o /opt/bin/h2demo http://storage.googleapis.com/http2-demo-server-tls/h2demo && chmod +x /opt/bin/h2demo'
- ExecStart=/opt/bin/h2demo --prod
- RestartSec=5s
- Restart=always
- Type=simple
-
- [Install]
- WantedBy=multi-user.target
-`
-
-func main() {
- flag.Parse()
- if *proj == "" {
- log.Fatalf("Missing --project flag")
- }
- prefix := "https://www.googleapis.com/compute/v1/projects/" + *proj
- machType := prefix + "/zones/" + *zone + "/machineTypes/" + *mach
-
- const tokenFileName = "token.dat"
- tokenFile := tokenCacheFile(tokenFileName)
- tokenSource := oauth2.ReuseTokenSource(nil, tokenFile)
- token, err := tokenSource.Token()
- if err != nil {
- if *writeObject != "" {
- log.Fatalf("Can't use --write_object without a valid token.dat file already cached.")
- }
- log.Printf("Error getting token from %s: %v", tokenFileName, err)
- log.Printf("Get auth code from %v", config.AuthCodeURL("my-state"))
- fmt.Print("\nEnter auth code: ")
- sc := bufio.NewScanner(os.Stdin)
- sc.Scan()
- authCode := strings.TrimSpace(sc.Text())
- token, err = config.Exchange(oauth2.NoContext, authCode)
- if err != nil {
- log.Fatalf("Error exchanging auth code for a token: %v", err)
- }
- if err := tokenFile.WriteToken(token); err != nil {
- log.Fatalf("Error writing to %s: %v", tokenFileName, err)
- }
- tokenSource = oauth2.ReuseTokenSource(token, nil)
- }
-
- oauthClient := oauth2.NewClient(oauth2.NoContext, tokenSource)
-
- if *writeObject != "" {
- writeCloudStorageObject(oauthClient)
- return
- }
-
- computeService, _ := compute.New(oauthClient)
-
- natIP := *staticIP
- if natIP == "" {
- // Try to find it by name.
- aggAddrList, err := computeService.Addresses.AggregatedList(*proj).Do()
- if err != nil {
- log.Fatal(err)
- }
- // http://godoc.org/code.google.com/p/google-api-go-client/compute/v1#AddressAggregatedList
- IPLoop:
- for _, asl := range aggAddrList.Items {
- for _, addr := range asl.Addresses {
- if addr.Name == *instName+"-ip" && addr.Status == "RESERVED" {
- natIP = addr.Address
- break IPLoop
- }
- }
- }
- }
-
- cloudConfig := baseConfig
- if *sshPub != "" {
- key := strings.TrimSpace(readFile(*sshPub))
- cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", key)
- }
- if os.Getenv("USER") == "bradfitz" {
- cloudConfig += fmt.Sprintf("\nssh_authorized_keys:\n - %s\n", "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAwks9dwWKlRC+73gRbvYtVg0vdCwDSuIlyt4z6xa/YU/jTDynM4R4W10hm2tPjy8iR1k8XhDv4/qdxe6m07NjG/By1tkmGpm1mGwho4Pr5kbAAy/Qg+NLCSdAYnnE00FQEcFOC15GFVMOW2AzDGKisReohwH9eIzHPzdYQNPRWXE= bradfitz@papag.bradfitz.com")
- }
- const maxCloudConfig = 32 << 10 // per compute API docs
- if len(cloudConfig) > maxCloudConfig {
- log.Fatalf("cloud config length of %d bytes is over %d byte limit", len(cloudConfig), maxCloudConfig)
- }
-
- instance := &compute.Instance{
- Name: *instName,
- Description: "Go Builder",
- MachineType: machType,
- Disks: []*compute.AttachedDisk{instanceDisk(computeService)},
- Tags: &compute.Tags{
- Items: []string{"http-server", "https-server"},
- },
- Metadata: &compute.Metadata{
- Items: []*compute.MetadataItems{
- {
- Key: "user-data",
- Value: cloudConfig,
- },
- },
- },
- NetworkInterfaces: []*compute.NetworkInterface{
- &compute.NetworkInterface{
- AccessConfigs: []*compute.AccessConfig{
- &compute.AccessConfig{
- Type: "ONE_TO_ONE_NAT",
- Name: "External NAT",
- NatIP: natIP,
- },
- },
- Network: prefix + "/global/networks/default",
- },
- },
- ServiceAccounts: []*compute.ServiceAccount{
- {
- Email: "default",
- Scopes: []string{
- compute.DevstorageFull_controlScope,
- compute.ComputeScope,
- },
- },
- },
- }
-
- log.Printf("Creating instance...")
- op, err := computeService.Instances.Insert(*proj, *zone, instance).Do()
- if err != nil {
- log.Fatalf("Failed to create instance: %v", err)
- }
- opName := op.Name
- log.Printf("Created. Waiting on operation %v", opName)
-OpLoop:
- for {
- time.Sleep(2 * time.Second)
- op, err := computeService.ZoneOperations.Get(*proj, *zone, opName).Do()
- if err != nil {
- log.Fatalf("Failed to get op %s: %v", opName, err)
- }
- switch op.Status {
- case "PENDING", "RUNNING":
- log.Printf("Waiting on operation %v", opName)
- continue
- case "DONE":
- if op.Error != nil {
- for _, operr := range op.Error.Errors {
- log.Printf("Error: %+v", operr)
- }
- log.Fatalf("Failed to start.")
- }
- log.Printf("Success. %+v", op)
- break OpLoop
- default:
- log.Fatalf("Unknown status %q: %+v", op.Status, op)
- }
- }
-
- inst, err := computeService.Instances.Get(*proj, *zone, *instName).Do()
- if err != nil {
- log.Fatalf("Error getting instance after creation: %v", err)
- }
- ij, _ := json.MarshalIndent(inst, "", " ")
- log.Printf("Instance: %s", ij)
-}
-
-func instanceDisk(svc *compute.Service) *compute.AttachedDisk {
- const imageURL = "https://www.googleapis.com/compute/v1/projects/coreos-cloud/global/images/coreos-stable-444-5-0-v20141016"
- diskName := *instName + "-disk"
-
- return &compute.AttachedDisk{
- AutoDelete: true,
- Boot: true,
- Type: "PERSISTENT",
- InitializeParams: &compute.AttachedDiskInitializeParams{
- DiskName: diskName,
- SourceImage: imageURL,
- DiskSizeGb: 50,
- },
- }
-}
-
-func writeCloudStorageObject(httpClient *http.Client) {
- content := os.Stdin
- const maxSlurp = 1 << 20
- var buf bytes.Buffer
- n, err := io.CopyN(&buf, content, maxSlurp)
- if err != nil && err != io.EOF {
- log.Fatalf("Error reading from stdin: %v, %v", n, err)
- }
- contentType := http.DetectContentType(buf.Bytes())
-
- req, err := http.NewRequest("PUT", "https://storage.googleapis.com/"+*writeObject, io.MultiReader(&buf, content))
- if err != nil {
- log.Fatal(err)
- }
- req.Header.Set("x-goog-api-version", "2")
- if *publicObject {
- req.Header.Set("x-goog-acl", "public-read")
- }
- req.Header.Set("Content-Type", contentType)
- res, err := httpClient.Do(req)
- if err != nil {
- log.Fatal(err)
- }
- if res.StatusCode != 200 {
- res.Write(os.Stderr)
- log.Fatalf("Failed.")
- }
- log.Printf("Success.")
- os.Exit(0)
-}
-
-type tokenCacheFile string
-
-func (f tokenCacheFile) Token() (*oauth2.Token, error) {
- slurp, err := ioutil.ReadFile(string(f))
- if err != nil {
- return nil, err
- }
- t := new(oauth2.Token)
- if err := json.Unmarshal(slurp, t); err != nil {
- return nil, err
- }
- return t, nil
-}
-
-func (f tokenCacheFile) WriteToken(t *oauth2.Token) error {
- jt, err := json.Marshal(t)
- if err != nil {
- return err
- }
- return ioutil.WriteFile(string(f), jt, 0600)
-}
diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.key b/vendor/golang.org/x/net/http2/h2demo/rootCA.key
deleted file mode 100644
index a15a6aba..00000000
--- a/vendor/golang.org/x/net/http2/h2demo/rootCA.key
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSSR8Od0+9Q
-62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoTZjkUygby
-XDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYkJfODVGnV
-mr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3mOoLb4yJ
-JQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYWcaiW8LWZ
-SUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABAoIBAFFHV7JMAqPWnMYA
-nezY6J81v9+XN+7xABNWM2Q8uv4WdksbigGLTXR3/680Z2hXqJ7LMeC5XJACFT/e
-/Gr0vmpgOCygnCPfjGehGKpavtfksXV3edikUlnCXsOP1C//c1bFL+sMYmFCVgTx
-qYdDK8yKzXNGrKYT6q5YG7IglyRNV1rsQa8lM/5taFYiD1Ck/3tQi3YIq8Lcuser
-hrxsMABcQ6mi+EIvG6Xr4mfJug0dGJMHG4RG1UGFQn6RXrQq2+q53fC8ZbVUSi0j
-NQ918aKFzktwv+DouKU0ME4I9toks03gM860bAL7zCbKGmwR3hfgX/TqzVCWpG9E
-LDVfvekCgYEA8fk9N53jbBRmULUGEf4qWypcLGiZnNU0OeXWpbPV9aa3H0VDytA7
-8fCN2dPAVDPqlthMDdVe983NCNwp2Yo8ZimDgowyIAKhdC25s1kejuaiH9OAPj3c
-0f8KbriYX4n8zNHxFwK6Ae3pQ6EqOLJVCUsziUaZX9nyKY5aZlyX6xcCgYEAwjws
-K62PjC64U5wYddNLp+kNdJ4edx+a7qBb3mEgPvSFT2RO3/xafJyG8kQB30Mfstjd
-bRxyUV6N0vtX1zA7VQtRUAvfGCecpMo+VQZzcHXKzoRTnQ7eZg4Lmj5fQ9tOAKAo
-QCVBoSW/DI4PZL26CAMDcAba4Pa22ooLapoRIQsCgYA6pIfkkbxLNkpxpt2YwLtt
-Kr/590O7UaR9n6k8sW/aQBRDXNsILR1KDl2ifAIxpf9lnXgZJiwE7HiTfCAcW7c1
-nzwDCI0hWuHcMTS/NYsFYPnLsstyyjVZI3FY0h4DkYKV9Q9z3zJLQ2hz/nwoD3gy
-b2pHC7giFcTts1VPV4Nt8wKBgHeFn4ihHJweg76vZz3Z78w7VNRWGFklUalVdDK7
-gaQ7w2y/ROn/146mo0OhJaXFIFRlrpvdzVrU3GDf2YXJYDlM5ZRkObwbZADjksev
-WInzcgDy3KDg7WnPasRXbTfMU4t/AkW2p1QKbi3DnSVYuokDkbH2Beo45vxDxhKr
-C69RAoGBAIyo3+OJenoZmoNzNJl2WPW5MeBUzSh8T/bgyjFTdqFHF5WiYRD/lfHj
-x9Glyw2nutuT4hlOqHvKhgTYdDMsF2oQ72fe3v8Q5FU7FuKndNPEAyvKNXZaShVA
-hnlhv5DjXKb0wFWnt5PCCiQLtzG0yyHaITrrEme7FikkIcTxaX/Y
------END RSA PRIVATE KEY-----
diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.pem b/vendor/golang.org/x/net/http2/h2demo/rootCA.pem
deleted file mode 100644
index 3a323e77..00000000
--- a/vendor/golang.org/x/net/http2/h2demo/rootCA.pem
+++ /dev/null
@@ -1,26 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV
-BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG
-A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3
-DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0
-NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG
-cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv
-c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B
-AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS
-R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT
-ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk
-JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3
-mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW
-caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G
-A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt
-hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB
-MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES
-MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv
-bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h
-U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao
-eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4
-UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD
-58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n
-sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF
-kPe6XoSbiLm/kxk32T0=
------END CERTIFICATE-----
diff --git a/vendor/golang.org/x/net/http2/h2demo/rootCA.srl b/vendor/golang.org/x/net/http2/h2demo/rootCA.srl
deleted file mode 100644
index 6db38918..00000000
--- a/vendor/golang.org/x/net/http2/h2demo/rootCA.srl
+++ /dev/null
@@ -1 +0,0 @@
-E2CE26BF3285059C
diff --git a/vendor/golang.org/x/net/http2/h2demo/server.crt b/vendor/golang.org/x/net/http2/h2demo/server.crt
deleted file mode 100644
index c59059bd..00000000
--- a/vendor/golang.org/x/net/http2/h2demo/server.crt
+++ /dev/null
@@ -1,20 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDPjCCAiYCCQDizia/MoUFnDANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJV
-UzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xFDASBgNVBAoT
-C0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhvc3QxHTAbBgkqhkiG9w0BCQEW
-DmJyYWRAZGFuZ2EuY29tMB4XDTE0MDcxNTIwNTAyN1oXDTE1MTEyNzIwNTAyN1ow
-RzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMQswCQYDVQQHEwJTRjEeMBwGA1UE
-ChMVYnJhZGZpdHogaHR0cDIgc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
-MIIBCgKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDifx2l
-gZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1LmJ4c2
-dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nefb3HL
-A7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55mjws
-/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/fz88
-F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABMA0GCSqGSIb3DQEBBQUAA4IB
-AQC0zL+n/YpRZOdulSu9tS8FxrstXqGWoxfe+vIUgqfMZ5+0MkjJ/vW0FqlLDl2R
-rn4XaR3e7FmWkwdDVbq/UB6lPmoAaFkCgh9/5oapMaclNVNnfF3fjCJfRr+qj/iD
-EmJStTIN0ZuUjAlpiACmfnpEU55PafT5Zx+i1yE4FGjw8bJpFoyD4Hnm54nGjX19
-KeCuvcYFUPnBm3lcL0FalF2AjqV02WTHYNQk7YF/oeO7NKBoEgvGvKG3x+xaOeBI
-dwvdq175ZsGul30h+QjrRlXhH/twcuaT3GSdoysDl9cCYE8f1Mk8PD6gan3uBCJU
-90p6/CbU71bGbfpM2PHot2fm
------END CERTIFICATE-----
diff --git a/vendor/golang.org/x/net/http2/h2demo/server.key b/vendor/golang.org/x/net/http2/h2demo/server.key
deleted file mode 100644
index f329c142..00000000
--- a/vendor/golang.org/x/net/http2/h2demo/server.key
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEowIBAAKCAQEAs1Y9CyLFrdL8VQWN1WaifDqaZFnoqjHhCMlc1TfG2zA+InDi
-fx2lgZD3o8FeNnAcfM2sPlk3+ZleOYw9P/CklFVDlvqmpCv9ss/BEp/dDaWvy1Lm
-J4c2dbQJfmTxn7CV1H3TsVJvKdwFmdoABb41NoBp6+NNO7OtDyhbIMiCI0pL3Nef
-b3HLA7hIMo3DYbORTtJLTIH9W8YKrEWL0lwHLrYFx/UdutZnv+HjdmO6vCN4na55
-mjws/vjKQUmc7xeY7Xe20xDEG2oDKVkL2eD7FfyrYMS3rO1ExP2KSqlXYG/1S9I/
-fz88F0GK7HX55b5WjZCl2J3ERVdnv/0MQv+sYQIDAQABAoIBADQ2spUwbY+bcz4p
-3M66ECrNQTBggP40gYl2XyHxGGOu2xhZ94f9ELf1hjRWU2DUKWco1rJcdZClV6q3
-qwmXvcM2Q/SMS8JW0ImkNVl/0/NqPxGatEnj8zY30d/L8hGFb0orzFu/XYA5gCP4
-NbN2WrXgk3ZLeqwcNxHHtSiJWGJ/fPyeDWAu/apy75u9Xf2GlzBZmV6HYD9EfK80
-LTlI60f5FO487CrJnboL7ovPJrIHn+k05xRQqwma4orpz932rTXnTjs9Lg6KtbQN
-a7PrqfAntIISgr11a66Mng3IYH1lYqJsWJJwX/xHT4WLEy0EH4/0+PfYemJekz2+
-Co62drECgYEA6O9zVJZXrLSDsIi54cfxA7nEZWm5CAtkYWeAHa4EJ+IlZ7gIf9sL
-W8oFcEfFGpvwVqWZ+AsQ70dsjXAv3zXaG0tmg9FtqWp7pzRSMPidifZcQwWkKeTO
-gJnFmnVyed8h6GfjTEu4gxo1/S5U0V+mYSha01z5NTnN6ltKx1Or3b0CgYEAxRgm
-S30nZxnyg/V7ys61AZhst1DG2tkZXEMcA7dYhabMoXPJAP/EfhlWwpWYYUs/u0gS
-Wwmf5IivX5TlYScgmkvb/NYz0u4ZmOXkLTnLPtdKKFXhjXJcHjUP67jYmOxNlJLp
-V4vLRnFxTpffAV+OszzRxsXX6fvruwZBANYJeXUCgYBVouLFsFgfWGYp2rpr9XP4
-KK25kvrBqF6JKOIDB1zjxNJ3pUMKrl8oqccCFoCyXa4oTM2kUX0yWxHfleUjrMq4
-yimwQKiOZmV7fVLSSjSw6e/VfBd0h3gb82ygcplZkN0IclkwTY5SNKqwn/3y07V5
-drqdhkrgdJXtmQ6O5YYECQKBgATERcDToQ1USlI4sKrB/wyv1AlG8dg/IebiVJ4e
-ZAyvcQmClFzq0qS+FiQUnB/WQw9TeeYrwGs1hxBHuJh16srwhLyDrbMvQP06qh8R
-48F8UXXSRec22dV9MQphaROhu2qZdv1AC0WD3tqov6L33aqmEOi+xi8JgbT/PLk5
-c/c1AoGBAI1A/02ryksW6/wc7/6SP2M2rTy4m1sD/GnrTc67EHnRcVBdKO6qH2RY
-nqC8YcveC2ZghgPTDsA3VGuzuBXpwY6wTyV99q6jxQJ6/xcrD9/NUG6Uwv/xfCxl
-IJLeBYEqQundSSny3VtaAUK8Ul1nxpTvVRNwtcyWTo8RHAAyNPWd
------END RSA PRIVATE KEY-----
diff --git a/vendor/golang.org/x/net/http2/h2i/README.md b/vendor/golang.org/x/net/http2/h2i/README.md
deleted file mode 100644
index fb5c5efb..00000000
--- a/vendor/golang.org/x/net/http2/h2i/README.md
+++ /dev/null
@@ -1,97 +0,0 @@
-# h2i
-
-**h2i** is an interactive HTTP/2 ("h2") console debugger. Miss the good ol'
-days of telnetting to your HTTP/1.n servers? We're bringing you
-back.
-
-Features:
-- send raw HTTP/2 frames
- - PING
- - SETTINGS
- - HEADERS
- - etc
-- type in HTTP/1.n and have it auto-HPACK/frame-ify it for HTTP/2
-- pretty print all received HTTP/2 frames from the peer (including HPACK decoding)
-- tab completion of commands, options
-
-Not yet features, but soon:
-- unnecessary CONTINUATION frames on short boundaries, to test peer implementations
-- request bodies (DATA frames)
-- send invalid frames for testing server implementations (supported by underlying Framer)
-
-Later:
-- act like a server
-
-## Installation
-
-```
-$ go get golang.org/x/net/http2/h2i
-$ h2i <host>
-```
-
-## Demo
-
-```
-$ h2i
-Usage: h2i <hostname>
-
- -insecure
- Whether to skip TLS cert validation
- -nextproto string
- Comma-separated list of NPN/ALPN protocol names to negotiate. (default "h2,h2-14")
-
-$ h2i google.com
-Connecting to google.com:443 ...
-Connected to 74.125.224.41:443
-Negotiated protocol "h2-14"
-[FrameHeader SETTINGS len=18]
- [MAX_CONCURRENT_STREAMS = 100]
- [INITIAL_WINDOW_SIZE = 1048576]
- [MAX_FRAME_SIZE = 16384]
-[FrameHeader WINDOW_UPDATE len=4]
- Window-Increment = 983041
-
-h2i> PING h2iSayHI
-[FrameHeader PING flags=ACK len=8]
- Data = "h2iSayHI"
-h2i> headers
-(as HTTP/1.1)> GET / HTTP/1.1
-(as HTTP/1.1)> Host: ip.appspot.com
-(as HTTP/1.1)> User-Agent: h2i/brad-n-blake
-(as HTTP/1.1)>
-Opening Stream-ID 1:
- :authority = ip.appspot.com
- :method = GET
- :path = /
- :scheme = https
- user-agent = h2i/brad-n-blake
-[FrameHeader HEADERS flags=END_HEADERS stream=1 len=77]
- :status = "200"
- alternate-protocol = "443:quic,p=1"
- content-length = "15"
- content-type = "text/html"
- date = "Fri, 01 May 2015 23:06:56 GMT"
- server = "Google Frontend"
-[FrameHeader DATA flags=END_STREAM stream=1 len=15]
- "173.164.155.78\n"
-[FrameHeader PING len=8]
- Data = "\x00\x00\x00\x00\x00\x00\x00\x00"
-h2i> ping
-[FrameHeader PING flags=ACK len=8]
- Data = "h2i_ping"
-h2i> ping
-[FrameHeader PING flags=ACK len=8]
- Data = "h2i_ping"
-h2i> ping
-[FrameHeader GOAWAY len=22]
- Last-Stream-ID = 1; Error-Code = PROTOCOL_ERROR (1)
-
-ReadFrame: EOF
-```
-
-## Status
-
-Quick few hour hack. So much yet to do. Feel free to file issues for
-bugs or wishlist items, but [@bmizerany](https://github.com/bmizerany/)
-and I aren't yet accepting pull requests until things settle down.
-
diff --git a/vendor/golang.org/x/net/http2/h2i/h2i.go b/vendor/golang.org/x/net/http2/h2i/h2i.go
deleted file mode 100644
index ac262084..00000000
--- a/vendor/golang.org/x/net/http2/h2i/h2i.go
+++ /dev/null
@@ -1,486 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-The h2i command is an interactive HTTP/2 console.
-
-Usage:
- $ h2i [flags] <hostname>
-
-Interactive commands in the console: (all parts case-insensitive)
-
- ping [data]
- settings ack
- settings FOO=n BAR=z
- headers (open a new stream by typing HTTP/1.1)
-*/
-package main
-
-import (
- "bufio"
- "bytes"
- "crypto/tls"
- "errors"
- "flag"
- "fmt"
- "io"
- "log"
- "net"
- "net/http"
- "os"
- "regexp"
- "strconv"
- "strings"
-
- "golang.org/x/crypto/ssh/terminal"
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/hpack"
-)
-
-// Flags
-var (
- flagNextProto = flag.String("nextproto", "h2,h2-14", "Comma-separated list of NPN/ALPN protocol names to negotiate.")
- flagInsecure = flag.Bool("insecure", false, "Whether to skip TLS cert validation")
-)
-
-type command struct {
- run func(*h2i, []string) error // required
-
- // complete optionally specifies tokens (case-insensitive) which are
- // valid for this subcommand.
- complete func() []string
-}
-
-var commands = map[string]command{
- "ping": command{run: (*h2i).cmdPing},
- "settings": command{
- run: (*h2i).cmdSettings,
- complete: func() []string {
- return []string{
- "ACK",
- http2.SettingHeaderTableSize.String(),
- http2.SettingEnablePush.String(),
- http2.SettingMaxConcurrentStreams.String(),
- http2.SettingInitialWindowSize.String(),
- http2.SettingMaxFrameSize.String(),
- http2.SettingMaxHeaderListSize.String(),
- }
- },
- },
- "quit": command{run: (*h2i).cmdQuit},
- "headers": command{run: (*h2i).cmdHeaders},
-}
-
-func usage() {
- fmt.Fprintf(os.Stderr, "Usage: h2i <hostname>\n\n")
- flag.PrintDefaults()
- os.Exit(1)
-}
-
-// withPort adds ":443" if another port isn't already present.
-func withPort(host string) string {
- if _, _, err := net.SplitHostPort(host); err != nil {
- return net.JoinHostPort(host, "443")
- }
- return host
-}
-
-// h2i is the app's state.
-type h2i struct {
- host string
- tc *tls.Conn
- framer *http2.Framer
- term *terminal.Terminal
-
- // owned by the command loop:
- streamID uint32
- hbuf bytes.Buffer
- henc *hpack.Encoder
-
- // owned by the readFrames loop:
- peerSetting map[http2.SettingID]uint32
- hdec *hpack.Decoder
-}
-
-func main() {
- flag.Usage = usage
- flag.Parse()
- if flag.NArg() != 1 {
- usage()
- }
- log.SetFlags(0)
-
- host := flag.Arg(0)
- app := &h2i{
- host: host,
- peerSetting: make(map[http2.SettingID]uint32),
- }
- app.henc = hpack.NewEncoder(&app.hbuf)
-
- if err := app.Main(); err != nil {
- if app.term != nil {
- app.logf("%v\n", err)
- } else {
- fmt.Fprintf(os.Stderr, "%v\n", err)
- }
- os.Exit(1)
- }
- fmt.Fprintf(os.Stdout, "\n")
-}
-
-func (app *h2i) Main() error {
- cfg := &tls.Config{
- ServerName: app.host,
- NextProtos: strings.Split(*flagNextProto, ","),
- InsecureSkipVerify: *flagInsecure,
- }
-
- hostAndPort := withPort(app.host)
- log.Printf("Connecting to %s ...", hostAndPort)
- tc, err := tls.Dial("tcp", hostAndPort, cfg)
- if err != nil {
- return fmt.Errorf("Error dialing %s: %v", withPort(app.host), err)
- }
- log.Printf("Connected to %v", tc.RemoteAddr())
- defer tc.Close()
-
- if err := tc.Handshake(); err != nil {
- return fmt.Errorf("TLS handshake: %v", err)
- }
- if !*flagInsecure {
- if err := tc.VerifyHostname(app.host); err != nil {
- return fmt.Errorf("VerifyHostname: %v", err)
- }
- }
- state := tc.ConnectionState()
- log.Printf("Negotiated protocol %q", state.NegotiatedProtocol)
- if !state.NegotiatedProtocolIsMutual || state.NegotiatedProtocol == "" {
- return fmt.Errorf("Could not negotiate protocol mutually")
- }
-
- if _, err := io.WriteString(tc, http2.ClientPreface); err != nil {
- return err
- }
-
- app.framer = http2.NewFramer(tc, tc)
-
- oldState, err := terminal.MakeRaw(0)
- if err != nil {
- return err
- }
- defer terminal.Restore(0, oldState)
-
- var screen = struct {
- io.Reader
- io.Writer
- }{os.Stdin, os.Stdout}
-
- app.term = terminal.NewTerminal(screen, "h2i> ")
- lastWord := regexp.MustCompile(`.+\W(\w+)$`)
- app.term.AutoCompleteCallback = func(line string, pos int, key rune) (newLine string, newPos int, ok bool) {
- if key != '\t' {
- return
- }
- if pos != len(line) {
- // TODO: we're being lazy for now, only supporting tab completion at the end.
- return
- }
- // Auto-complete for the command itself.
- if !strings.Contains(line, " ") {
- var name string
- name, _, ok = lookupCommand(line)
- if !ok {
- return
- }
- return name, len(name), true
- }
- _, c, ok := lookupCommand(line[:strings.IndexByte(line, ' ')])
- if !ok || c.complete == nil {
- return
- }
- if strings.HasSuffix(line, " ") {
- app.logf("%s", strings.Join(c.complete(), " "))
- return line, pos, true
- }
- m := lastWord.FindStringSubmatch(line)
- if m == nil {
- return line, len(line), true
- }
- soFar := m[1]
- var match []string
- for _, cand := range c.complete() {
- if len(soFar) > len(cand) || !strings.EqualFold(cand[:len(soFar)], soFar) {
- continue
- }
- match = append(match, cand)
- }
- if len(match) == 0 {
- return
- }
- if len(match) > 1 {
- // TODO: auto-complete any common prefix
- app.logf("%s", strings.Join(match, " "))
- return line, pos, true
- }
- newLine = line[:len(line)-len(soFar)] + match[0]
- return newLine, len(newLine), true
-
- }
-
- errc := make(chan error, 2)
- go func() { errc <- app.readFrames() }()
- go func() { errc <- app.readConsole() }()
- return <-errc
-}
-
-func (app *h2i) logf(format string, args ...interface{}) {
- fmt.Fprintf(app.term, format+"\n", args...)
-}
-
-func (app *h2i) readConsole() error {
- for {
- line, err := app.term.ReadLine()
- if err == io.EOF {
- return nil
- }
- if err != nil {
- return fmt.Errorf("terminal.ReadLine: %v", err)
- }
- f := strings.Fields(line)
- if len(f) == 0 {
- continue
- }
- cmd, args := f[0], f[1:]
- if _, c, ok := lookupCommand(cmd); ok {
- err = c.run(app, args)
- } else {
- app.logf("Unknown command %q", line)
- }
- if err == errExitApp {
- return nil
- }
- if err != nil {
- return err
- }
- }
-}
-
-func lookupCommand(prefix string) (name string, c command, ok bool) {
- prefix = strings.ToLower(prefix)
- if c, ok = commands[prefix]; ok {
- return prefix, c, ok
- }
-
- for full, candidate := range commands {
- if strings.HasPrefix(full, prefix) {
- if c.run != nil {
- return "", command{}, false // ambiguous
- }
- c = candidate
- name = full
- }
- }
- return name, c, c.run != nil
-}
-
-var errExitApp = errors.New("internal sentinel error value to quit the console reading loop")
-
-func (a *h2i) cmdQuit(args []string) error {
- if len(args) > 0 {
- a.logf("the QUIT command takes no argument")
- return nil
- }
- return errExitApp
-}
-
-func (a *h2i) cmdSettings(args []string) error {
- if len(args) == 1 && strings.EqualFold(args[0], "ACK") {
- return a.framer.WriteSettingsAck()
- }
- var settings []http2.Setting
- for _, arg := range args {
- if strings.EqualFold(arg, "ACK") {
- a.logf("Error: ACK must be only argument with the SETTINGS command")
- return nil
- }
- eq := strings.Index(arg, "=")
- if eq == -1 {
- a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg)
- return nil
- }
- sid, ok := settingByName(arg[:eq])
- if !ok {
- a.logf("Error: unknown setting name %q", arg[:eq])
- return nil
- }
- val, err := strconv.ParseUint(arg[eq+1:], 10, 32)
- if err != nil {
- a.logf("Error: invalid argument %q (expected SETTING_NAME=nnnn)", arg)
- return nil
- }
- settings = append(settings, http2.Setting{
- ID: sid,
- Val: uint32(val),
- })
- }
- a.logf("Sending: %v", settings)
- return a.framer.WriteSettings(settings...)
-}
-
-func settingByName(name string) (http2.SettingID, bool) {
- for _, sid := range [...]http2.SettingID{
- http2.SettingHeaderTableSize,
- http2.SettingEnablePush,
- http2.SettingMaxConcurrentStreams,
- http2.SettingInitialWindowSize,
- http2.SettingMaxFrameSize,
- http2.SettingMaxHeaderListSize,
- } {
- if strings.EqualFold(sid.String(), name) {
- return sid, true
- }
- }
- return 0, false
-}
-
-func (app *h2i) cmdPing(args []string) error {
- if len(args) > 1 {
- app.logf("invalid PING usage: only accepts 0 or 1 args")
- return nil // nil means don't end the program
- }
- var data [8]byte
- if len(args) == 1 {
- copy(data[:], args[0])
- } else {
- copy(data[:], "h2i_ping")
- }
- return app.framer.WritePing(false, data)
-}
-
-func (app *h2i) cmdHeaders(args []string) error {
- if len(args) > 0 {
- app.logf("Error: HEADERS doesn't yet take arguments.")
- // TODO: flags for restricting window size, to force CONTINUATION
- // frames.
- return nil
- }
- var h1req bytes.Buffer
- app.term.SetPrompt("(as HTTP/1.1)> ")
- defer app.term.SetPrompt("h2i> ")
- for {
- line, err := app.term.ReadLine()
- if err != nil {
- return err
- }
- h1req.WriteString(line)
- h1req.WriteString("\r\n")
- if line == "" {
- break
- }
- }
- req, err := http.ReadRequest(bufio.NewReader(&h1req))
- if err != nil {
- app.logf("Invalid HTTP/1.1 request: %v", err)
- return nil
- }
- if app.streamID == 0 {
- app.streamID = 1
- } else {
- app.streamID += 2
- }
- app.logf("Opening Stream-ID %d:", app.streamID)
- hbf := app.encodeHeaders(req)
- if len(hbf) > 16<<10 {
- app.logf("TODO: h2i doesn't yet write CONTINUATION frames. Copy it from transport.go")
- return nil
- }
- return app.framer.WriteHeaders(http2.HeadersFrameParam{
- StreamID: app.streamID,
- BlockFragment: hbf,
- EndStream: req.Method == "GET" || req.Method == "HEAD", // good enough for now
- EndHeaders: true, // for now
- })
-}
-
-func (app *h2i) readFrames() error {
- for {
- f, err := app.framer.ReadFrame()
- if err != nil {
- return fmt.Errorf("ReadFrame: %v", err)
- }
- app.logf("%v", f)
- switch f := f.(type) {
- case *http2.PingFrame:
- app.logf(" Data = %q", f.Data)
- case *http2.SettingsFrame:
- f.ForeachSetting(func(s http2.Setting) error {
- app.logf(" %v", s)
- app.peerSetting[s.ID] = s.Val
- return nil
- })
- case *http2.WindowUpdateFrame:
- app.logf(" Window-Increment = %v\n", f.Increment)
- case *http2.GoAwayFrame:
- app.logf(" Last-Stream-ID = %d; Error-Code = %v (%d)\n", f.LastStreamID, f.ErrCode, f.ErrCode)
- case *http2.DataFrame:
- app.logf(" %q", f.Data())
- case *http2.HeadersFrame:
- if f.HasPriority() {
- app.logf(" PRIORITY = %v", f.Priority)
- }
- if app.hdec == nil {
- // TODO: if the user uses h2i to send a SETTINGS frame advertising
- // something larger, we'll need to respect SETTINGS_HEADER_TABLE_SIZE
- // and stuff here instead of using the 4k default. But for now:
- tableSize := uint32(4 << 10)
- app.hdec = hpack.NewDecoder(tableSize, app.onNewHeaderField)
- }
- app.hdec.Write(f.HeaderBlockFragment())
- }
- }
-}
-
-// called from readLoop
-func (app *h2i) onNewHeaderField(f hpack.HeaderField) {
- if f.Sensitive {
- app.logf(" %s = %q (SENSITIVE)", f.Name, f.Value)
- }
- app.logf(" %s = %q", f.Name, f.Value)
-}
-
-func (app *h2i) encodeHeaders(req *http.Request) []byte {
- app.hbuf.Reset()
-
- // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go
- host := req.Host
- if host == "" {
- host = req.URL.Host
- }
-
- path := req.URL.Path
- if path == "" {
- path = "/"
- }
-
- app.writeHeader(":authority", host) // probably not right for all sites
- app.writeHeader(":method", req.Method)
- app.writeHeader(":path", path)
- app.writeHeader(":scheme", "https")
-
- for k, vv := range req.Header {
- lowKey := strings.ToLower(k)
- if lowKey == "host" {
- continue
- }
- for _, v := range vv {
- app.writeHeader(lowKey, v)
- }
- }
- return app.hbuf.Bytes()
-}
-
-func (app *h2i) writeHeader(name, value string) {
- app.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
- app.logf(" %s = %s", name, value)
-}
diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go
index 014f7896..c2805f6a 100644
--- a/vendor/golang.org/x/net/http2/headermap.go
+++ b/vendor/golang.org/x/net/http2/headermap.go
@@ -57,6 +57,7 @@ func init() {
"server",
"set-cookie",
"strict-transport-security",
+ "trailer",
"transfer-encoding",
"user-agent",
"vary",
diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go
index 80d621cf..f9bb0339 100644
--- a/vendor/golang.org/x/net/http2/hpack/encode.go
+++ b/vendor/golang.org/x/net/http2/hpack/encode.go
@@ -144,7 +144,7 @@ func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
// shouldIndex reports whether f should be indexed.
func (e *Encoder) shouldIndex(f HeaderField) bool {
- return !f.Sensitive && f.size() <= e.dynTab.maxSize
+ return !f.Sensitive && f.Size() <= e.dynTab.maxSize
}
// appendIndexed appends index i, as encoded in "Indexed Header Field"
diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go
index 8e9b2f2e..8aa197ad 100644
--- a/vendor/golang.org/x/net/http2/hpack/hpack.go
+++ b/vendor/golang.org/x/net/http2/hpack/hpack.go
@@ -41,7 +41,24 @@ type HeaderField struct {
Sensitive bool
}
-func (hf *HeaderField) size() uint32 {
+// IsPseudo reports whether the header field is an http2 pseudo header.
+// That is, it reports whether it starts with a colon.
+// It is not otherwise guaranteed to be a valid pseudo header field,
+// though.
+func (hf HeaderField) IsPseudo() bool {
+ return len(hf.Name) != 0 && hf.Name[0] == ':'
+}
+
+func (hf HeaderField) String() string {
+ var suffix string
+ if hf.Sensitive {
+ suffix = " (sensitive)"
+ }
+ return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
+}
+
+// Size returns the size of an entry per RFC 7540 section 5.2.
+func (hf HeaderField) Size() uint32 {
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
// "The size of the dynamic table is the sum of the size of
// its entries. The size of an entry is the sum of its name's
@@ -102,6 +119,13 @@ func (d *Decoder) SetMaxStringLength(n int) {
d.maxStrLen = n
}
+// SetEmitFunc changes the callback used when new header fields
+// are decoded.
+// It must be non-nil. It does not affect EmitEnabled.
+func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
+ d.emit = emitFunc
+}
+
// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
// should be called. The default is true.
//
@@ -156,7 +180,7 @@ func (dt *dynamicTable) setMaxSize(v uint32) {
func (dt *dynamicTable) add(f HeaderField) {
dt.ents = append(dt.ents, f)
- dt.size += f.size()
+ dt.size += f.Size()
dt.evict()
}
@@ -164,7 +188,7 @@ func (dt *dynamicTable) add(f HeaderField) {
func (dt *dynamicTable) evict() {
base := dt.ents // keep base pointer of slice
for dt.size > dt.maxSize {
- dt.size -= dt.ents[0].size()
+ dt.size -= dt.ents[0].Size()
dt.ents = dt.ents[1:]
}
diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go
index eb4b1f05..8850e394 100644
--- a/vendor/golang.org/x/net/http2/hpack/huffman.go
+++ b/vendor/golang.org/x/net/http2/hpack/huffman.go
@@ -48,12 +48,16 @@ var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
// maxLen bytes will return ErrStringLength.
func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
n := rootHuffmanNode
- cur, nbits := uint(0), uint8(0)
+ // cur is the bit buffer that has not been fed into n.
+ // cbits is the number of low order bits in cur that are valid.
+ // sbits is the number of bits of the symbol prefix being decoded.
+ cur, cbits, sbits := uint(0), uint8(0), uint8(0)
for _, b := range v {
cur = cur<<8 | uint(b)
- nbits += 8
- for nbits >= 8 {
- idx := byte(cur >> (nbits - 8))
+ cbits += 8
+ sbits += 8
+ for cbits >= 8 {
+ idx := byte(cur >> (cbits - 8))
n = n.children[idx]
if n == nil {
return ErrInvalidHuffman
@@ -63,22 +67,40 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
return ErrStringLength
}
buf.WriteByte(n.sym)
- nbits -= n.codeLen
+ cbits -= n.codeLen
n = rootHuffmanNode
+ sbits = cbits
} else {
- nbits -= 8
+ cbits -= 8
}
}
}
- for nbits > 0 {
- n = n.children[byte(cur<<(8-nbits))]
- if n.children != nil || n.codeLen > nbits {
+ for cbits > 0 {
+ n = n.children[byte(cur<<(8-cbits))]
+ if n == nil {
+ return ErrInvalidHuffman
+ }
+ if n.children != nil || n.codeLen > cbits {
break
}
+ if maxLen != 0 && buf.Len() == maxLen {
+ return ErrStringLength
+ }
buf.WriteByte(n.sym)
- nbits -= n.codeLen
+ cbits -= n.codeLen
n = rootHuffmanNode
+ sbits = cbits
+ }
+ if sbits > 7 {
+ // Either there was an incomplete symbol, or overlong padding.
+ // Both are decoding errors per RFC 7541 section 5.2.
+ return ErrInvalidHuffman
}
+ if mask := uint(1<<cbits - 1); cur&mask != mask {
+ // Trailing bits must be a prefix of EOS per RFC 7541 section 5.2.
+ return ErrInvalidHuffman
+ }
+
return nil
}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index 03ad05cc..a9053eec 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -4,25 +4,51 @@
// Package http2 implements the HTTP/2 protocol.
//
-// This is a work in progress. This package is low-level and intended
-// to be used directly by very few people. Most users will use it
-// indirectly through integration with the net/http package. See
-// ConfigureServer. That ConfigureServer call will likely be automatic
-// or available via an empty import in the future.
+// This package is low-level and intended to be used directly by very
+// few people. Most users will use it indirectly through the automatic
+// use by the net/http package (from Go 1.6 and later).
+// For use in earlier Go versions see ConfigureServer. (Transport support
+// requires Go 1.6 or later)
+//
+// See https://http2.github.io/ for more information on HTTP/2.
+//
+// See https://http2.golang.org/ for a test server running this code.
//
-// See http://http2.github.io/
package http2
import (
"bufio"
+ "crypto/tls"
+ "errors"
"fmt"
"io"
"net/http"
+ "os"
+ "sort"
"strconv"
+ "strings"
"sync"
+
+ "golang.org/x/net/lex/httplex"
+)
+
+var (
+ VerboseLogs bool
+ logFrameWrites bool
+ logFrameReads bool
)
-var VerboseLogs = false
+func init() {
+ e := os.Getenv("GODEBUG")
+ if strings.Contains(e, "http2debug=1") {
+ VerboseLogs = true
+ }
+ if strings.Contains(e, "http2debug=2") {
+ VerboseLogs = true
+ logFrameWrites = true
+ logFrameReads = true
+ }
+}
const (
// ClientPreface is the string that must be sent by new
@@ -138,17 +164,28 @@ func (s SettingID) String() string {
return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
}
-func validHeader(v string) bool {
+var (
+ errInvalidHeaderFieldName = errors.New("http2: invalid header field name")
+ errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
+)
+
+// validWireHeaderFieldName reports whether v is a valid header field
+// name (key). See httplex.ValidHeaderName for the base rules.
+//
+// Further, http2 says:
+// "Just as in HTTP/1.x, header field names are strings of ASCII
+// characters that are compared in a case-insensitive
+// fashion. However, header field names MUST be converted to
+// lowercase prior to their encoding in HTTP/2. "
+func validWireHeaderFieldName(v string) bool {
if len(v) == 0 {
return false
}
for _, r := range v {
- // "Just as in HTTP/1.x, header field names are
- // strings of ASCII characters that are compared in a
- // case-insensitive fashion. However, header field
- // names MUST be converted to lowercase prior to their
- // encoding in HTTP/2. "
- if r >= 127 || ('A' <= r && r <= 'Z') {
+ if !httplex.IsTokenRune(r) {
+ return false
+ }
+ if 'A' <= r && r <= 'Z' {
return false
}
}
@@ -251,3 +288,65 @@ func mustUint31(v int32) uint32 {
}
return uint32(v)
}
+
+// bodyAllowedForStatus reports whether a given response status code
+// permits a body. See RFC 2616, section 4.4.
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+type httpError struct {
+ msg string
+ timeout bool
+}
+
+func (e *httpError) Error() string { return e.msg }
+func (e *httpError) Timeout() bool { return e.timeout }
+func (e *httpError) Temporary() bool { return true }
+
+var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
+
+type connectionStater interface {
+ ConnectionState() tls.ConnectionState
+}
+
+var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
+
+type sorter struct {
+ v []string // owned by sorter
+}
+
+func (s *sorter) Len() int { return len(s.v) }
+func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
+func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
+
+// Keys returns the sorted keys of h.
+//
+// The returned slice is only valid until s used again or returned to
+// its pool.
+func (s *sorter) Keys(h http.Header) []string {
+ keys := s.v[:0]
+ for k := range h {
+ keys = append(keys, k)
+ }
+ s.v = keys
+ sort.Sort(s)
+ return keys
+}
+
+func (s *sorter) SortStrings(ss []string) {
+ // Our sorter works on s.v, which sorter owners, so
+ // stash it away while we sort the user's buffer.
+ save := s.v
+ s.v = ss
+ sort.Sort(s)
+ s.v = save
+}
diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go
new file mode 100644
index 00000000..efd2e128
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go16.go
@@ -0,0 +1,46 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.6
+
+package http2
+
+import (
+ "crypto/tls"
+ "net/http"
+ "time"
+)
+
+func configureTransport(t1 *http.Transport) (*Transport, error) {
+ return nil, errTransportVersion
+}
+
+func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
+ return 0
+
+}
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+func isBadCipher(cipher uint16) bool {
+ switch cipher {
+ case tls.TLS_RSA_WITH_RC4_128_SHA,
+ tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+ // Reject cipher suites from Appendix A.
+ // "This list includes those cipher suites that do not
+ // offer an ephemeral key exchange and those that are
+ // based on the TLS null, stream or block cipher type"
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go
new file mode 100644
index 00000000..28df0c16
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go17.go
@@ -0,0 +1,51 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package http2
+
+import (
+ "net"
+ "net/http"
+)
+
+type contextContext interface{}
+
+type fakeContext struct{}
+
+func (fakeContext) Done() <-chan struct{} { return nil }
+func (fakeContext) Err() error { panic("should not be called") }
+
+func reqContext(r *http.Request) fakeContext {
+ return fakeContext{}
+}
+
+func setResponseUncompressed(res *http.Response) {
+ // Nothing.
+}
+
+type clientTrace struct{}
+
+func requestTrace(*http.Request) *clientTrace { return nil }
+func traceGotConn(*http.Request, *ClientConn) {}
+func traceFirstResponseByte(*clientTrace) {}
+func traceWroteHeaders(*clientTrace) {}
+func traceWroteRequest(*clientTrace, error) {}
+func traceGot100Continue(trace *clientTrace) {}
+func traceWait100Continue(trace *clientTrace) {}
+
+func nop() {}
+
+func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
+ return nil, nop
+}
+
+func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
+ return ctx, nop
+}
+
+func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
+ return req
+}
diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go
index 96a3eb8d..53b7a1da 100644
--- a/vendor/golang.org/x/net/http2/pipe.go
+++ b/vendor/golang.org/x/net/http2/pipe.go
@@ -14,10 +14,13 @@ import (
// io.Pipe except there are no PipeReader/PipeWriter halves, and the
// underlying buffer is an interface. (io.Pipe is always unbuffered)
type pipe struct {
- mu sync.Mutex
- c sync.Cond // c.L must point to
- b pipeBuffer
- err error // read error once empty. non-nil means closed.
+ mu sync.Mutex
+ c sync.Cond // c.L lazily initialized to &p.mu
+ b pipeBuffer
+ err error // read error once empty. non-nil means closed.
+ breakErr error // immediate read error (caller doesn't see rest of b)
+ donec chan struct{} // closed on error
+ readFn func() // optional code to run in Read before error
}
type pipeBuffer interface {
@@ -26,6 +29,12 @@ type pipeBuffer interface {
io.Reader
}
+func (p *pipe) Len() int {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ return p.b.Len()
+}
+
// Read waits until data is available and copies bytes
// from the buffer into p.
func (p *pipe) Read(d []byte) (n int, err error) {
@@ -35,10 +44,17 @@ func (p *pipe) Read(d []byte) (n int, err error) {
p.c.L = &p.mu
}
for {
+ if p.breakErr != nil {
+ return 0, p.breakErr
+ }
if p.b.Len() > 0 {
return p.b.Read(d)
}
if p.err != nil {
+ if p.readFn != nil {
+ p.readFn() // e.g. copy trailers
+ p.readFn = nil // not sticky like p.err
+ }
return 0, p.err
}
p.c.Wait()
@@ -62,13 +78,25 @@ func (p *pipe) Write(d []byte) (n int, err error) {
return p.b.Write(d)
}
-// CloseWithError causes Reads to wake up and return the
-// provided err after all data has been read.
+// CloseWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err after all data has been
+// read.
//
// The error must be non-nil.
-func (p *pipe) CloseWithError(err error) {
+func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
+
+// BreakWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err immediately, without
+// waiting for unread data.
+func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
+
+// closeWithErrorAndCode is like CloseWithError but also sets some code to run
+// in the caller's goroutine before returning the error.
+func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
+
+func (p *pipe) closeWithError(dst *error, err error, fn func()) {
if err == nil {
- panic("CloseWithError must be non-nil")
+ panic("err must be non-nil")
}
p.mu.Lock()
defer p.mu.Unlock()
@@ -76,15 +104,50 @@ func (p *pipe) CloseWithError(err error) {
p.c.L = &p.mu
}
defer p.c.Signal()
- if p.err == nil {
- p.err = err
+ if *dst != nil {
+ // Already been done.
+ return
}
+ p.readFn = fn
+ *dst = err
+ p.closeDoneLocked()
}
-// Err returns the error (if any) first set with CloseWithError.
-// This is the error which will be returned after the reader is exhausted.
+// requires p.mu be held.
+func (p *pipe) closeDoneLocked() {
+ if p.donec == nil {
+ return
+ }
+ // Close if unclosed. This isn't racy since we always
+ // hold p.mu while closing.
+ select {
+ case <-p.donec:
+ default:
+ close(p.donec)
+ }
+}
+
+// Err returns the error (if any) first set by BreakWithError or CloseWithError.
func (p *pipe) Err() error {
p.mu.Lock()
defer p.mu.Unlock()
+ if p.breakErr != nil {
+ return p.breakErr
+ }
return p.err
}
+
+// Done returns a channel which is closed if and when this pipe is closed
+// with CloseWithError.
+func (p *pipe) Done() <-chan struct{} {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.donec == nil {
+ p.donec = make(chan struct{})
+ if p.err != nil || p.breakErr != nil {
+ // Already hit an error.
+ p.closeDoneLocked()
+ }
+ }
+ return p.donec
+}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index 7405cdab..8206fa79 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -6,8 +6,8 @@
// instead, and make sure that on close we close all open
// streams. then remove doneServing?
-// TODO: finish GOAWAY support. Consider each incoming frame type and
-// whether it should be ignored during a shutdown race.
+// TODO: re-audit GOAWAY support. Consider each incoming frame type and
+// whether it should be ignored during graceful shutdown.
// TODO: disconnect idle clients. GFE seems to do 4 minutes. make
// configurable? or maximum number of idle clients and remove the
@@ -46,7 +46,11 @@ import (
"log"
"net"
"net/http"
+ "net/textproto"
"net/url"
+ "os"
+ "reflect"
+ "runtime"
"strconv"
"strings"
"sync"
@@ -190,28 +194,80 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if testHookOnConn != nil {
testHookOnConn()
}
- conf.handleConn(hs, c, h)
+ conf.ServeConn(c, &ServeConnOpts{
+ Handler: h,
+ BaseConfig: hs,
+ })
}
s.TLSNextProto[NextProtoTLS] = protoHandler
s.TLSNextProto["h2-14"] = protoHandler // temporary; see above.
return nil
}
-func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) {
+// ServeConnOpts are options for the Server.ServeConn method.
+type ServeConnOpts struct {
+ // BaseConfig optionally sets the base configuration
+ // for values. If nil, defaults are used.
+ BaseConfig *http.Server
+
+ // Handler specifies which handler to use for processing
+ // requests. If nil, BaseConfig.Handler is used. If BaseConfig
+ // or BaseConfig.Handler is nil, http.DefaultServeMux is used.
+ Handler http.Handler
+}
+
+func (o *ServeConnOpts) baseConfig() *http.Server {
+ if o != nil && o.BaseConfig != nil {
+ return o.BaseConfig
+ }
+ return new(http.Server)
+}
+
+func (o *ServeConnOpts) handler() http.Handler {
+ if o != nil {
+ if o.Handler != nil {
+ return o.Handler
+ }
+ if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
+ return o.BaseConfig.Handler
+ }
+ }
+ return http.DefaultServeMux
+}
+
+// ServeConn serves HTTP/2 requests on the provided connection and
+// blocks until the connection is no longer readable.
+//
+// ServeConn starts speaking HTTP/2 assuming that c has not had any
+// reads or writes. It writes its initial settings frame and expects
+// to be able to read the preface and settings frame from the
+// client. If c has a ConnectionState method like a *tls.Conn, the
+// ConnectionState is used to verify the TLS ciphersuite and to set
+// the Request.TLS field in Handlers.
+//
+// ServeConn does not support h2c by itself. Any h2c support must be
+// implemented in terms of providing a suitably-behaving net.Conn.
+//
+// The opts parameter is optional. If nil, default values are used.
+func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+ baseCtx, cancel := serverConnBaseContext(c, opts)
+ defer cancel()
+
sc := &serverConn{
- srv: srv,
- hs: hs,
+ srv: s,
+ hs: opts.baseConfig(),
conn: c,
+ baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
bw: newBufferedWriter(c),
- handler: h,
+ handler: opts.handler(),
streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult),
wantWriteFrameCh: make(chan frameWriteMsg, 8),
wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
doneServing: make(chan struct{}),
- advMaxStreams: srv.maxConcurrentStreams(),
+ advMaxStreams: s.maxConcurrentStreams(),
writeSched: writeScheduler{
maxFrameSize: initialMaxFrameSize,
},
@@ -220,17 +276,18 @@ func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) {
serveG: newGoroutineLock(),
pushEnabled: true,
}
+
sc.flow.add(initialWindowSize)
sc.inflow.add(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
- sc.hpackDecoder = hpack.NewDecoder(initialHeaderTableSize, sc.onNewHeaderField)
- sc.hpackDecoder.SetMaxStringLength(sc.maxHeaderStringLen())
fr := NewFramer(sc.bw, c)
- fr.SetMaxReadFrameSize(srv.maxReadFrameSize())
+ fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+ fr.MaxHeaderListSize = sc.maxHeaderListSize()
+ fr.SetMaxReadFrameSize(s.maxReadFrameSize())
sc.framer = fr
- if tc, ok := c.(*tls.Conn); ok {
+ if tc, ok := c.(connectionStater); ok {
sc.tlsState = new(tls.ConnectionState)
*sc.tlsState = tc.ConnectionState()
// 9.2 Use of TLS Features
@@ -260,7 +317,7 @@ func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) {
// So for now, do nothing here again.
}
- if !srv.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
+ if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
// "Endpoints MAY choose to generate a connection error
// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
// the prohibited cipher suites are negotiated."
@@ -282,32 +339,8 @@ func (srv *Server) handleConn(hs *http.Server, c net.Conn, h http.Handler) {
sc.serve()
}
-// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
-func isBadCipher(cipher uint16) bool {
- switch cipher {
- case tls.TLS_RSA_WITH_RC4_128_SHA,
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_RSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
- // Reject cipher suites from Appendix A.
- // "This list includes those cipher suites that do not
- // offer an ephemeral key exchange and those that are
- // based on the TLS null, stream or block cipher type"
- return true
- default:
- return false
- }
-}
-
func (sc *serverConn) rejectConn(err ErrCode, debug string) {
- sc.vlogf("REJECTING conn: %v, %s", err, debug)
+ sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
// ignoring errors. hanging up anyway.
sc.framer.WriteGoAway(0, err, []byte(debug))
sc.bw.Flush()
@@ -321,8 +354,8 @@ type serverConn struct {
conn net.Conn
bw *bufferedWriter // writing to conn
handler http.Handler
+ baseCtx contextContext
framer *Framer
- hpackDecoder *hpack.Decoder
doneServing chan struct{} // closed when serverConn.serve ends
readFrameCh chan readFrameResult // written by serverConn.readFrames
wantWriteFrameCh chan frameWriteMsg // from handlers -> serve
@@ -349,7 +382,6 @@ type serverConn struct {
headerTableSize uint32
peerMaxHeaderListSize uint32 // zero means unknown (default)
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
- req requestParam // non-zero while reading request headers
writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh
needsFrameFlush bool // last frame write wasn't a flush
writeSched writeScheduler
@@ -358,22 +390,13 @@ type serverConn struct {
goAwayCode ErrCode
shutdownTimerCh <-chan time.Time // nil until used
shutdownTimer *time.Timer // nil until used
+ freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
hpackEncoder *hpack.Encoder
}
-func (sc *serverConn) maxHeaderStringLen() int {
- v := sc.maxHeaderListSize()
- if uint32(int(v)) == v {
- return int(v)
- }
- // They had a crazy big number for MaxHeaderBytes anyway,
- // so give them unlimited header lengths:
- return 0
-}
-
func (sc *serverConn) maxHeaderListSize() uint32 {
n := sc.hs.MaxHeaderBytes
if n <= 0 {
@@ -386,21 +409,6 @@ func (sc *serverConn) maxHeaderListSize() uint32 {
return uint32(n + typicalHeaders*perFieldOverhead)
}
-// requestParam is the state of the next request, initialized over
-// potentially several frames HEADERS + zero or more CONTINUATION
-// frames.
-type requestParam struct {
- // stream is non-nil if we're reading (HEADER or CONTINUATION)
- // frames for a request (but not DATA).
- stream *stream
- header http.Header
- method, path string
- scheme, authority string
- sawRegularHeader bool // saw a non-pseudo header already
- invalidHeader bool // an invalid header was seen
- headerListSize int64 // actually uint32, but easier math this way
-}
-
// stream represents a stream. This is the minimal metadata needed by
// the serve goroutine. Most of the actual stream state is owned by
// the http.Handler's goroutine in the responseWriter. Because the
@@ -410,20 +418,30 @@ type requestParam struct {
// responseWriter's state field.
type stream struct {
// immutable:
- id uint32
- body *pipe // non-nil if expecting DATA frames
- cw closeWaiter // closed wait stream transitions to closed state
+ sc *serverConn
+ id uint32
+ body *pipe // non-nil if expecting DATA frames
+ cw closeWaiter // closed wait stream transitions to closed state
+ ctx contextContext
+ cancelCtx func()
// owned by serverConn's serve loop:
- bodyBytes int64 // body bytes seen so far
- declBodyBytes int64 // or -1 if undeclared
- flow flow // limits writing from Handler to client
- inflow flow // what the client is allowed to POST/etc to us
- parent *stream // or nil
- weight uint8
- state streamState
- sentReset bool // only true once detached from streams map
- gotReset bool // only true once detacted from streams map
+ bodyBytes int64 // body bytes seen so far
+ declBodyBytes int64 // or -1 if undeclared
+ flow flow // limits writing from Handler to client
+ inflow flow // what the client is allowed to POST/etc to us
+ parent *stream // or nil
+ numTrailerValues int64
+ weight uint8
+ state streamState
+ sentReset bool // only true once detached from streams map
+ gotReset bool // only true once detacted from streams map
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ reqBuf []byte
+
+ trailer http.Header // accumulated trailers
+ reqTrailer http.Header // handler's Request.Trailer
}
func (sc *serverConn) Framer() *Framer { return sc.framer }
@@ -474,12 +492,55 @@ func (sc *serverConn) logf(format string, args ...interface{}) {
}
}
+// errno returns v's underlying uintptr, else 0.
+//
+// TODO: remove this helper function once http2 can use build
+// tags. See comment in isClosedConnError.
+func errno(v error) uintptr {
+ if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
+ return uintptr(rv.Uint())
+ }
+ return 0
+}
+
+// isClosedConnError reports whether err is an error from use of a closed
+// network connection.
+func isClosedConnError(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ // TODO: remove this string search and be more like the Windows
+ // case below. That might involve modifying the standard library
+ // to return better error types.
+ str := err.Error()
+ if strings.Contains(str, "use of closed network connection") {
+ return true
+ }
+
+ // TODO(bradfitz): x/tools/cmd/bundle doesn't really support
+ // build tags, so I can't make an http2_windows.go file with
+ // Windows-specific stuff. Fix that and move this, once we
+ // have a way to bundle this into std's net/http somehow.
+ if runtime.GOOS == "windows" {
+ if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
+ if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
+ const WSAECONNABORTED = 10053
+ const WSAECONNRESET = 10054
+ if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
if err == nil {
return
}
- str := err.Error()
- if err == io.EOF || strings.Contains(str, "use of closed network connection") {
+ if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) {
// Boring, expected errors.
sc.vlogf(format, args...)
} else {
@@ -487,55 +548,6 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
}
}
-func (sc *serverConn) onNewHeaderField(f hpack.HeaderField) {
- sc.serveG.check()
- sc.vlogf("got header field %+v", f)
- switch {
- case !validHeader(f.Name):
- sc.req.invalidHeader = true
- case strings.HasPrefix(f.Name, ":"):
- if sc.req.sawRegularHeader {
- sc.logf("pseudo-header after regular header")
- sc.req.invalidHeader = true
- return
- }
- var dst *string
- switch f.Name {
- case ":method":
- dst = &sc.req.method
- case ":path":
- dst = &sc.req.path
- case ":scheme":
- dst = &sc.req.scheme
- case ":authority":
- dst = &sc.req.authority
- default:
- // 8.1.2.1 Pseudo-Header Fields
- // "Endpoints MUST treat a request or response
- // that contains undefined or invalid
- // pseudo-header fields as malformed (Section
- // 8.1.2.6)."
- sc.logf("invalid pseudo-header %q", f.Name)
- sc.req.invalidHeader = true
- return
- }
- if *dst != "" {
- sc.logf("duplicate pseudo-header %q sent", f.Name)
- sc.req.invalidHeader = true
- return
- }
- *dst = f.Value
- default:
- sc.req.sawRegularHeader = true
- sc.req.header.Add(sc.canonicalHeader(f.Name), f.Value)
- const headerFieldOverhead = 32 // per spec
- sc.req.headerListSize += int64(len(f.Name)) + int64(len(f.Value)) + headerFieldOverhead
- if sc.req.headerListSize > int64(sc.maxHeaderListSize()) {
- sc.hpackDecoder.SetEmitEnabled(false)
- }
- }
-}
-
func (sc *serverConn) canonicalHeader(v string) string {
sc.serveG.check()
cv, ok := commonCanonHeader[v]
@@ -570,10 +582,11 @@ type readFrameResult struct {
// It's run on its own goroutine.
func (sc *serverConn) readFrames() {
gate := make(gate)
+ gateDone := gate.Done
for {
f, err := sc.framer.ReadFrame()
select {
- case sc.readFrameCh <- readFrameResult{f, err, gate.Done}:
+ case sc.readFrameCh <- readFrameResult{f, err, gateDone}:
case <-sc.doneServing:
return
}
@@ -582,6 +595,9 @@ func (sc *serverConn) readFrames() {
case <-sc.doneServing:
return
}
+ if terminalReadFrameError(err) {
+ return
+ }
}
}
@@ -615,6 +631,7 @@ func (sc *serverConn) stopShutdownTimer() {
}
func (sc *serverConn) notePanic() {
+ // Note: this is for serverConn.serve panicking, not http.Handler code.
if testHookOnPanicMu != nil {
testHookOnPanicMu.Lock()
defer testHookOnPanicMu.Unlock()
@@ -636,7 +653,9 @@ func (sc *serverConn) serve() {
defer sc.stopShutdownTimer()
defer close(sc.doneServing) // unblocks handlers trying to send
- sc.vlogf("HTTP/2 connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
+ if VerboseLogs {
+ sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
+ }
sc.writeFrame(frameWriteMsg{
write: writeSettings{
@@ -653,7 +672,7 @@ func (sc *serverConn) serve() {
sc.unackedSettings++
if err := sc.readPreface(); err != nil {
- sc.condlogf(err, "error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
+ sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
return
}
// Now that we've got the preface, get us out of the
@@ -719,7 +738,9 @@ func (sc *serverConn) readPreface() error {
return errors.New("timeout waiting for client preface")
case err := <-errc:
if err == nil {
- sc.vlogf("client %v said hello", sc.conn.RemoteAddr())
+ if VerboseLogs {
+ sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
+ }
}
return err
}
@@ -804,7 +825,23 @@ func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error {
// If you're not on the serve goroutine, use writeFrameFromHandler instead.
func (sc *serverConn) writeFrame(wm frameWriteMsg) {
sc.serveG.check()
- sc.writeSched.add(wm)
+
+ var ignoreWrite bool
+
+ // Don't send a 100-continue response if we've already sent headers.
+ // See golang.org/issue/14030.
+ switch wm.write.(type) {
+ case *writeResHeaders:
+ wm.stream.wroteHeaders = true
+ case write100ContinueHeadersFrame:
+ if wm.stream.wroteHeaders {
+ ignoreWrite = true
+ }
+ }
+
+ if !ignoreWrite {
+ sc.writeSched.add(wm)
+ }
sc.scheduleFrameWrite()
}
@@ -837,6 +874,11 @@ func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
go sc.writeFrameAsync(wm)
}
+// errHandlerPanicked is the error given to any callers blocked in a read from
+// Request.Body when the main goroutine panics. Since most handlers read in the
+// the main ServeHTTP goroutine, this will show up rarely.
+var errHandlerPanicked = errors.New("http2: handler panicked")
+
// wroteFrame is called on the serve goroutine with the result of
// whatever happened on writeFrameAsync.
func (sc *serverConn) wroteFrame(res frameWriteResult) {
@@ -851,6 +893,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
closeStream := endsStream(wm.write)
+ if _, ok := wm.write.(handlerPanicRST); ok {
+ sc.closeStream(st, errHandlerPanicked)
+ }
+
// Reply (if requested) to the blocked ServeHTTP goroutine.
if ch := wm.done; ch != nil {
select {
@@ -876,7 +922,7 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
// state here anyway, after telling the peer
// we're hanging up on them.
st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
- errCancel := StreamError{st.id, ErrCodeCancel}
+ errCancel := streamError(st.id, ErrCodeCancel)
sc.resetStream(errCancel)
case stateHalfClosedRemote:
sc.closeStream(st, errHandlerComplete)
@@ -963,18 +1009,6 @@ func (sc *serverConn) resetStream(se StreamError) {
}
}
-// curHeaderStreamID returns the stream ID of the header block we're
-// currently in the middle of reading. If this returns non-zero, the
-// next frame must be a CONTINUATION with this stream id.
-func (sc *serverConn) curHeaderStreamID() uint32 {
- sc.serveG.check()
- st := sc.req.stream
- if st == nil {
- return 0
- }
- return st.id
-}
-
// processFrameFromReader processes the serve loop's read from readFrameCh from the
// frame-reading goroutine.
// processFrameFromReader returns whether the connection should be kept open.
@@ -986,7 +1020,7 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
sc.goAway(ErrCodeFrameSize)
return true // goAway will close the loop
}
- clientGone := err == io.EOF || strings.Contains(err.Error(), "use of closed network connection")
+ clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
if clientGone {
// TODO: could we also get into this state if
// the peer does a half close
@@ -1000,7 +1034,9 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
}
} else {
f := res.f
- sc.vlogf("got %v: %#v", f.Header(), f)
+ if VerboseLogs {
+ sc.vlogf("http2: server read frame %v", summarizeFrame(f))
+ }
err = sc.processFrame(f)
if err == nil {
return true
@@ -1015,14 +1051,14 @@ func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
sc.goAway(ErrCodeFlowControl)
return true
case ConnectionError:
- sc.logf("%v: %v", sc.conn.RemoteAddr(), ev)
+ sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
sc.goAway(ErrCode(ev))
return true // goAway will handle shutdown
default:
if res.err != nil {
- sc.logf("disconnecting; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
+ sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
} else {
- sc.logf("disconnection due to other error: %v", err)
+ sc.logf("http2: server closing client connection: %v", err)
}
return false
}
@@ -1039,21 +1075,11 @@ func (sc *serverConn) processFrame(f Frame) error {
sc.sawFirstSettings = true
}
- if s := sc.curHeaderStreamID(); s != 0 {
- if cf, ok := f.(*ContinuationFrame); !ok {
- return ConnectionError(ErrCodeProtocol)
- } else if cf.Header().StreamID != s {
- return ConnectionError(ErrCodeProtocol)
- }
- }
-
switch f := f.(type) {
case *SettingsFrame:
return sc.processSettings(f)
- case *HeadersFrame:
+ case *MetaHeadersFrame:
return sc.processHeaders(f)
- case *ContinuationFrame:
- return sc.processContinuation(f)
case *WindowUpdateFrame:
return sc.processWindowUpdate(f)
case *PingFrame:
@@ -1069,14 +1095,14 @@ func (sc *serverConn) processFrame(f Frame) error {
// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
return ConnectionError(ErrCodeProtocol)
default:
- sc.vlogf("Ignoring frame: %v", f.Header())
+ sc.vlogf("http2: server ignoring frame: %v", f.Header())
return nil
}
}
func (sc *serverConn) processPing(f *PingFrame) error {
sc.serveG.check()
- if f.Flags.Has(FlagSettingsAck) {
+ if f.IsAck() {
// 6.7 PING: " An endpoint MUST NOT respond to PING frames
// containing this flag."
return nil
@@ -1107,7 +1133,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
return nil
}
if !st.flow.add(int32(f.Increment)) {
- return StreamError{f.StreamID, ErrCodeFlowControl}
+ return streamError(f.StreamID, ErrCodeFlowControl)
}
default: // connection-level flow control
if !sc.flow.add(int32(f.Increment)) {
@@ -1132,7 +1158,8 @@ func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
}
if st != nil {
st.gotReset = true
- sc.closeStream(st, StreamError{f.StreamID, f.ErrCode})
+ st.cancelCtx()
+ sc.closeStream(st, streamError(f.StreamID, f.ErrCode))
}
return nil
}
@@ -1149,10 +1176,26 @@ func (sc *serverConn) closeStream(st *stream, err error) {
}
delete(sc.streams, st.id)
if p := st.body; p != nil {
+ // Return any buffered unread bytes worth of conn-level flow control.
+ // See golang.org/issue/16481
+ sc.sendWindowUpdate(nil, p.Len())
+
p.CloseWithError(err)
}
st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
sc.writeSched.forgetStream(st.id)
+ if st.reqBuf != nil {
+ // Stash this request body buffer (64k) away for reuse
+ // by a future POST/PUT/etc.
+ //
+ // TODO(bradfitz): share on the server? sync.Pool?
+ // Server requires locks and might hurt contention.
+ // sync.Pool might work, or might be worse, depending
+ // on goroutine CPU migrations. (get and put on
+ // separate CPUs). Maybe a mix of strategies. But
+ // this is an easy win for now.
+ sc.freeRequestBodyBuf = st.reqBuf
+ }
}
func (sc *serverConn) processSettings(f *SettingsFrame) error {
@@ -1180,7 +1223,9 @@ func (sc *serverConn) processSetting(s Setting) error {
if err := s.Valid(); err != nil {
return err
}
- sc.vlogf("processing setting %v", s)
+ if VerboseLogs {
+ sc.vlogf("http2: server processing setting %v", s)
+ }
switch s.ID {
case SettingHeaderTableSize:
sc.headerTableSize = s.Val
@@ -1199,6 +1244,9 @@ func (sc *serverConn) processSetting(s Setting) error {
// Unknown setting: "An endpoint that receives a SETTINGS
// frame with any unknown or unsupported identifier MUST
// ignore that setting."
+ if VerboseLogs {
+ sc.vlogf("http2: server ignoring unknown setting %v", s)
+ }
}
return nil
}
@@ -1233,57 +1281,103 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
func (sc *serverConn) processData(f *DataFrame) error {
sc.serveG.check()
+ data := f.Data()
+
// "If a DATA frame is received whose stream is not in "open"
// or "half closed (local)" state, the recipient MUST respond
// with a stream error (Section 5.4.2) of type STREAM_CLOSED."
id := f.Header().StreamID
st, ok := sc.streams[id]
- if !ok || st.state != stateOpen {
+ if !ok || st.state != stateOpen || st.gotTrailerHeader {
// This includes sending a RST_STREAM if the stream is
// in stateHalfClosedLocal (which currently means that
// the http.Handler returned, so it's done reading &
// done writing). Try to stop the client from sending
// more DATA.
- return StreamError{id, ErrCodeStreamClosed}
+
+ // But still enforce their connection-level flow control,
+ // and return any flow control bytes since we're not going
+ // to consume them.
+ if sc.inflow.available() < int32(f.Length) {
+ return streamError(id, ErrCodeFlowControl)
+ }
+ // Deduct the flow control from inflow, since we're
+ // going to immediately add it back in
+ // sendWindowUpdate, which also schedules sending the
+ // frames.
+ sc.inflow.take(int32(f.Length))
+ sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+
+ return streamError(id, ErrCodeStreamClosed)
}
if st.body == nil {
panic("internal error: should have a body in this state")
}
- data := f.Data()
// Sender sending more than they'd declared?
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
- return StreamError{id, ErrCodeStreamClosed}
+ return streamError(id, ErrCodeStreamClosed)
}
- if len(data) > 0 {
+ if f.Length > 0 {
// Check whether the client has flow control quota.
- if int(st.inflow.available()) < len(data) {
- return StreamError{id, ErrCodeFlowControl}
+ if st.inflow.available() < int32(f.Length) {
+ return streamError(id, ErrCodeFlowControl)
}
- st.inflow.take(int32(len(data)))
- wrote, err := st.body.Write(data)
- if err != nil {
- return StreamError{id, ErrCodeStreamClosed}
+ st.inflow.take(int32(f.Length))
+
+ if len(data) > 0 {
+ wrote, err := st.body.Write(data)
+ if err != nil {
+ return streamError(id, ErrCodeStreamClosed)
+ }
+ if wrote != len(data) {
+ panic("internal error: bad Writer")
+ }
+ st.bodyBytes += int64(len(data))
}
- if wrote != len(data) {
- panic("internal error: bad Writer")
+
+ // Return any padded flow control now, since we won't
+ // refund it later on body reads.
+ if pad := int32(f.Length) - int32(len(data)); pad > 0 {
+ sc.sendWindowUpdate32(nil, pad)
+ sc.sendWindowUpdate32(st, pad)
}
- st.bodyBytes += int64(len(data))
}
if f.StreamEnded() {
- if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
- st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
- st.declBodyBytes, st.bodyBytes))
- } else {
- st.body.CloseWithError(io.EOF)
- }
- st.state = stateHalfClosedRemote
+ st.endStream()
}
return nil
}
-func (sc *serverConn) processHeaders(f *HeadersFrame) error {
+// endStream closes a Request.Body's pipe. It is called when a DATA
+// frame says a request body is over (or after trailers).
+func (st *stream) endStream() {
+ sc := st.sc
+ sc.serveG.check()
+
+ if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
+ st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
+ st.declBodyBytes, st.bodyBytes))
+ } else {
+ st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
+ st.body.CloseWithError(io.EOF)
+ }
+ st.state = stateHalfClosedRemote
+}
+
+// copyTrailersToHandlerRequest is run in the Handler's goroutine in
+// its Request.Body.Read just before it gets io.EOF.
+func (st *stream) copyTrailersToHandlerRequest() {
+ for k, vv := range st.trailer {
+ if _, ok := st.reqTrailer[k]; ok {
+ // Only copy it over it was pre-declared.
+ st.reqTrailer[k] = vv
+ }
+ }
+}
+
+func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
sc.serveG.check()
id := f.Header().StreamID
if sc.inGoAway {
@@ -1291,22 +1385,39 @@ func (sc *serverConn) processHeaders(f *HeadersFrame) error {
return nil
}
// http://http2.github.io/http2-spec/#rfc.section.5.1.1
- if id%2 != 1 || id <= sc.maxStreamID || sc.req.stream != nil {
- // Streams initiated by a client MUST use odd-numbered
- // stream identifiers. [...] The identifier of a newly
- // established stream MUST be numerically greater than all
- // streams that the initiating endpoint has opened or
- // reserved. [...] An endpoint that receives an unexpected
- // stream identifier MUST respond with a connection error
- // (Section 5.4.1) of type PROTOCOL_ERROR.
+ // Streams initiated by a client MUST use odd-numbered stream
+ // identifiers. [...] An endpoint that receives an unexpected
+ // stream identifier MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id%2 != 1 {
return ConnectionError(ErrCodeProtocol)
}
- if id > sc.maxStreamID {
- sc.maxStreamID = id
+ // A HEADERS frame can be used to create a new stream or
+ // send a trailer for an open one. If we already have a stream
+ // open, let it process its own HEADERS frame (trailers at this
+ // point, if it's valid).
+ st := sc.streams[f.Header().StreamID]
+ if st != nil {
+ return st.processTrailerHeaders(f)
+ }
+
+ // [...] The identifier of a newly established stream MUST be
+ // numerically greater than all streams that the initiating
+ // endpoint has opened or reserved. [...] An endpoint that
+ // receives an unexpected stream identifier MUST respond with
+ // a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id <= sc.maxStreamID {
+ return ConnectionError(ErrCodeProtocol)
}
- st := &stream{
- id: id,
- state: stateOpen,
+ sc.maxStreamID = id
+
+ ctx, cancelCtx := contextWithCancel(sc.baseCtx)
+ st = &stream{
+ sc: sc,
+ id: id,
+ state: stateOpen,
+ ctx: ctx,
+ cancelCtx: cancelCtx,
}
if f.StreamEnded() {
st.state = stateHalfClosedRemote
@@ -1326,35 +1437,6 @@ func (sc *serverConn) processHeaders(f *HeadersFrame) error {
if sc.curOpenStreams == 1 {
sc.setConnState(http.StateActive)
}
- sc.req = requestParam{
- stream: st,
- header: make(http.Header),
- }
- sc.hpackDecoder.SetEmitEnabled(true)
- return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded())
-}
-
-func (sc *serverConn) processContinuation(f *ContinuationFrame) error {
- sc.serveG.check()
- st := sc.streams[f.Header().StreamID]
- if st == nil || sc.curHeaderStreamID() != st.id {
- return ConnectionError(ErrCodeProtocol)
- }
- return sc.processHeaderBlockFragment(st, f.HeaderBlockFragment(), f.HeadersEnded())
-}
-
-func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bool) error {
- sc.serveG.check()
- if _, err := sc.hpackDecoder.Write(frag); err != nil {
- return ConnectionError(ErrCodeCompression)
- }
- if !end {
- return nil
- }
- if err := sc.hpackDecoder.Close(); err != nil {
- return ConnectionError(ErrCodeCompression)
- }
- defer sc.resetPendingRequest()
if sc.curOpenStreams > sc.advMaxStreams {
// "Endpoints MUST NOT exceed the limit set by their
// peer. An endpoint that receives a HEADERS frame
@@ -1364,33 +1446,69 @@ func (sc *serverConn) processHeaderBlockFragment(st *stream, frag []byte, end bo
// REFUSED_STREAM."
if sc.unackedSettings == 0 {
// They should know better.
- return StreamError{st.id, ErrCodeProtocol}
+ return streamError(st.id, ErrCodeProtocol)
}
// Assume it's a network race, where they just haven't
// received our last SETTINGS update. But actually
// this can't happen yet, because we don't yet provide
// a way for users to adjust server parameters at
// runtime.
- return StreamError{st.id, ErrCodeRefusedStream}
+ return streamError(st.id, ErrCodeRefusedStream)
}
- rw, req, err := sc.newWriterAndRequest()
+ rw, req, err := sc.newWriterAndRequest(st, f)
if err != nil {
return err
}
+ st.reqTrailer = req.Trailer
+ if st.reqTrailer != nil {
+ st.trailer = make(http.Header)
+ }
st.body = req.Body.(*requestBody).pipe // may be nil
st.declBodyBytes = req.ContentLength
handler := sc.handler.ServeHTTP
- if !sc.hpackDecoder.EmitEnabled() {
+ if f.Truncated {
// Their header list was too long. Send a 431 error.
handler = handleHeaderListTooLong
+ } else if err := checkValidHTTP2Request(req); err != nil {
+ handler = new400Handler(err)
}
go sc.runHandler(rw, req, handler)
return nil
}
+func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
+ sc := st.sc
+ sc.serveG.check()
+ if st.gotTrailerHeader {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ st.gotTrailerHeader = true
+ if !f.StreamEnded() {
+ return streamError(st.id, ErrCodeProtocol)
+ }
+
+ if len(f.PseudoFields()) > 0 {
+ return streamError(st.id, ErrCodeProtocol)
+ }
+ if st.trailer != nil {
+ for _, hf := range f.RegularFields() {
+ key := sc.canonicalHeader(hf.Name)
+ if !ValidTrailerHeader(key) {
+ // TODO: send more details to the peer somehow. But http2 has
+ // no way to send debug data at a stream level. Discuss with
+ // HTTP folk.
+ return streamError(st.id, ErrCodeProtocol)
+ }
+ st.trailer[key] = append(st.trailer[key], hf.Value)
+ }
+ }
+ st.endStream()
+ return nil
+}
+
func (sc *serverConn) processPriority(f *PriorityFrame) error {
adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam)
return nil
@@ -1432,19 +1550,21 @@ func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority
}
}
-// resetPendingRequest zeros out all state related to a HEADERS frame
-// and its zero or more CONTINUATION frames sent to start a new
-// request.
-func (sc *serverConn) resetPendingRequest() {
+func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
sc.serveG.check()
- sc.req = requestParam{}
-}
-func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, error) {
- sc.serveG.check()
- rp := &sc.req
- if rp.invalidHeader || rp.method == "" || rp.path == "" ||
- (rp.scheme != "https" && rp.scheme != "http") {
+ method := f.PseudoValue("method")
+ path := f.PseudoValue("path")
+ scheme := f.PseudoValue("scheme")
+ authority := f.PseudoValue("authority")
+
+ isConnect := method == "CONNECT"
+ if isConnect {
+ if path != "" || scheme != "" || authority == "" {
+ return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ }
+ } else if method == "" || path == "" ||
+ (scheme != "https" && scheme != "http") {
// See 8.1.2.6 Malformed Requests and Responses:
//
// Malformed requests or responses that are detected
@@ -1455,55 +1575,100 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err
// "All HTTP/2 requests MUST include exactly one valid
// value for the :method, :scheme, and :path
// pseudo-header fields"
- return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol}
+ return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ }
+
+ bodyOpen := !f.StreamEnded()
+ if method == "HEAD" && bodyOpen {
+ // HEAD requests can't have bodies
+ return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
}
var tlsState *tls.ConnectionState // nil if not scheme https
- if rp.scheme == "https" {
+
+ if scheme == "https" {
tlsState = sc.tlsState
}
- authority := rp.authority
+
+ header := make(http.Header)
+ for _, hf := range f.RegularFields() {
+ header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+ }
+
if authority == "" {
- authority = rp.header.Get("Host")
+ authority = header.Get("Host")
}
- needsContinue := rp.header.Get("Expect") == "100-continue"
+ needsContinue := header.Get("Expect") == "100-continue"
if needsContinue {
- rp.header.Del("Expect")
+ header.Del("Expect")
}
// Merge Cookie headers into one "; "-delimited value.
- if cookies := rp.header["Cookie"]; len(cookies) > 1 {
- rp.header.Set("Cookie", strings.Join(cookies, "; "))
+ if cookies := header["Cookie"]; len(cookies) > 1 {
+ header.Set("Cookie", strings.Join(cookies, "; "))
+ }
+
+ // Setup Trailers
+ var trailer http.Header
+ for _, v := range header["Trailer"] {
+ for _, key := range strings.Split(v, ",") {
+ key = http.CanonicalHeaderKey(strings.TrimSpace(key))
+ switch key {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ // Bogus. (copy of http1 rules)
+ // Ignore.
+ default:
+ if trailer == nil {
+ trailer = make(http.Header)
+ }
+ trailer[key] = nil
+ }
+ }
}
- bodyOpen := rp.stream.state == stateOpen
+ delete(header, "Trailer")
+
body := &requestBody{
conn: sc,
- stream: rp.stream,
+ stream: st,
needsContinue: needsContinue,
}
- // TODO: handle asterisk '*' requests + test
- url, err := url.ParseRequestURI(rp.path)
- if err != nil {
- // TODO: find the right error code?
- return nil, nil, StreamError{rp.stream.id, ErrCodeProtocol}
+ var url_ *url.URL
+ var requestURI string
+ if isConnect {
+ url_ = &url.URL{Host: authority}
+ requestURI = authority // mimic HTTP/1 server behavior
+ } else {
+ var err error
+ url_, err = url.ParseRequestURI(path)
+ if err != nil {
+ return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
+ }
+ requestURI = path
}
req := &http.Request{
- Method: rp.method,
- URL: url,
+ Method: method,
+ URL: url_,
RemoteAddr: sc.remoteAddrStr,
- Header: rp.header,
- RequestURI: rp.path,
+ Header: header,
+ RequestURI: requestURI,
Proto: "HTTP/2.0",
ProtoMajor: 2,
ProtoMinor: 0,
TLS: tlsState,
Host: authority,
Body: body,
+ Trailer: trailer,
}
+ req = requestWithContext(req, st.ctx)
if bodyOpen {
+ // Disabled, per golang.org/issue/14960:
+ // st.reqBuf = sc.getRequestBodyBuf()
+ // TODO: remove this 64k of garbage per request (again, but without a data race):
+ buf := make([]byte, initialWindowSize)
+
body.pipe = &pipe{
- b: &fixedBuffer{buf: make([]byte, initialWindowSize)}, // TODO: share/remove XXX
+ b: &fixedBuffer{buf: buf},
}
- if vv, ok := rp.header["Content-Length"]; ok {
+ if vv, ok := header["Content-Length"]; ok {
req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
} else {
req.ContentLength = -1
@@ -1516,7 +1681,7 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err
rws.conn = sc
rws.bw = bwSave
rws.bw.Reset(chunkWriter{rws})
- rws.stream = rp.stream
+ rws.stream = st
rws.req = req
rws.body = body
@@ -1524,11 +1689,37 @@ func (sc *serverConn) newWriterAndRequest() (*responseWriter, *http.Request, err
return rw, req, nil
}
+func (sc *serverConn) getRequestBodyBuf() []byte {
+ sc.serveG.check()
+ if buf := sc.freeRequestBodyBuf; buf != nil {
+ sc.freeRequestBodyBuf = nil
+ return buf
+ }
+ return make([]byte, initialWindowSize)
+}
+
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
- defer rw.handlerDone()
- // TODO: catch panics like net/http.Server
+ didPanic := true
+ defer func() {
+ rw.rws.stream.cancelCtx()
+ if didPanic {
+ e := recover()
+ // Same as net/http:
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ sc.writeFrameFromHandler(frameWriteMsg{
+ write: handlerPanicRST{rw.rws.stream.id},
+ stream: rw.rws.stream,
+ })
+ sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
+ return
+ }
+ rw.handlerDone()
+ }()
handler(rw, req)
+ didPanic = false
}
func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {
@@ -1664,7 +1855,7 @@ type requestBody struct {
func (b *requestBody) Close() error {
if b.pipe != nil {
- b.pipe.CloseWithError(errClosedBody)
+ b.pipe.BreakWithError(errClosedBody)
}
b.closed = true
return nil
@@ -1715,11 +1906,15 @@ type responseWriterState struct {
// mutated by http.Handler goroutine:
handlerHeader http.Header // nil until called
snapHeader http.Header // snapshot of handlerHeader at WriteHeader time
+ trailers []string // set in writeChunk
status int // status code passed to WriteHeader
wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
sentHeader bool // have we sent the header frame?
handlerDone bool // handler has finished
+ sentContentLen int64 // non-zero if handler set a Content-Length header
+ wroteBytes int64
+
closeNotifierMu sync.Mutex // guards closeNotifierCh
closeNotifierCh chan bool // nil until first used
}
@@ -1728,6 +1923,23 @@ type chunkWriter struct{ rws *responseWriterState }
func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
+func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 }
+
+// declareTrailer is called for each Trailer header when the
+// response header is written. It notes that a header will need to be
+// written in the trailers at the end of the response.
+func (rws *responseWriterState) declareTrailer(k string) {
+ k = http.CanonicalHeaderKey(k)
+ if !ValidTrailerHeader(k) {
+ // Forbidden by RFC 2616 14.40.
+ rws.conn.logf("ignoring invalid trailer %q", k)
+ return
+ }
+ if !strSliceContains(rws.trailers, k) {
+ rws.trailers = append(rws.trailers, k)
+ }
+}
+
// writeChunk writes chunks from the bufio.Writer. But because
// bufio.Writer may bypass its chunking, sometimes p may be
// arbitrarily large.
@@ -1738,16 +1950,38 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
if !rws.wroteHeader {
rws.writeHeader(200)
}
+
+ isHeadResp := rws.req.Method == "HEAD"
if !rws.sentHeader {
rws.sentHeader = true
- var ctype, clen string // implicit ones, if we can calculate it
- if rws.handlerDone && rws.snapHeader.Get("Content-Length") == "" {
+ var ctype, clen string
+ if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
+ rws.snapHeader.Del("Content-Length")
+ clen64, err := strconv.ParseInt(clen, 10, 64)
+ if err == nil && clen64 >= 0 {
+ rws.sentContentLen = clen64
+ } else {
+ clen = ""
+ }
+ }
+ if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
clen = strconv.Itoa(len(p))
}
- if rws.snapHeader.Get("Content-Type") == "" {
+ _, hasContentType := rws.snapHeader["Content-Type"]
+ if !hasContentType && bodyAllowedForStatus(rws.status) {
ctype = http.DetectContentType(p)
}
- endStream := rws.handlerDone && len(p) == 0
+ var date string
+ if _, ok := rws.snapHeader["Date"]; !ok {
+ // TODO(bradfitz): be faster here, like net/http? measure.
+ date = time.Now().UTC().Format(http.TimeFormat)
+ }
+
+ for _, v := range rws.snapHeader["Trailer"] {
+ foreachHeaderElement(v, rws.declareTrailer)
+ }
+
+ endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
streamID: rws.stream.id,
httpResCode: rws.status,
@@ -1755,6 +1989,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
endStream: endStream,
contentType: ctype,
contentLength: clen,
+ date: date,
})
if err != nil {
return 0, err
@@ -1763,16 +1998,89 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
return 0, nil
}
}
+ if isHeadResp {
+ return len(p), nil
+ }
if len(p) == 0 && !rws.handlerDone {
return 0, nil
}
- if err := rws.conn.writeDataFromHandler(rws.stream, p, rws.handlerDone); err != nil {
- return 0, err
+ if rws.handlerDone {
+ rws.promoteUndeclaredTrailers()
+ }
+
+ endStream := rws.handlerDone && !rws.hasTrailers()
+ if len(p) > 0 || endStream {
+ // only send a 0 byte DATA frame if we're ending the stream.
+ if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
+ return 0, err
+ }
+ }
+
+ if rws.handlerDone && rws.hasTrailers() {
+ err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ streamID: rws.stream.id,
+ h: rws.handlerHeader,
+ trailers: rws.trailers,
+ endStream: true,
+ })
+ return len(p), err
}
return len(p), nil
}
+// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
+// that, if present, signals that the map entry is actually for
+// the response trailers, and not the response headers. The prefix
+// is stripped after the ServeHTTP call finishes and the values are
+// sent in the trailers.
+//
+// This mechanism is intended only for trailers that are not known
+// prior to the headers being written. If the set of trailers is fixed
+// or known before the header is written, the normal Go trailers mechanism
+// is preferred:
+// https://golang.org/pkg/net/http/#ResponseWriter
+// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+const TrailerPrefix = "Trailer:"
+
+// promoteUndeclaredTrailers permits http.Handlers to set trailers
+// after the header has already been flushed. Because the Go
+// ResponseWriter interface has no way to set Trailers (only the
+// Header), and because we didn't want to expand the ResponseWriter
+// interface, and because nobody used trailers, and because RFC 2616
+// says you SHOULD (but not must) predeclare any trailers in the
+// header, the official ResponseWriter rules said trailers in Go must
+// be predeclared, and then we reuse the same ResponseWriter.Header()
+// map to mean both Headers and Trailers. When it's time to write the
+// Trailers, we pick out the fields of Headers that were declared as
+// trailers. That worked for a while, until we found the first major
+// user of Trailers in the wild: gRPC (using them only over http2),
+// and gRPC libraries permit setting trailers mid-stream without
+// predeclarnig them. So: change of plans. We still permit the old
+// way, but we also permit this hack: if a Header() key begins with
+// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
+// invalid token byte anyway, there is no ambiguity. (And it's already
+// filtered out) It's mildly hacky, but not terrible.
+//
+// This method runs after the Handler is done and promotes any Header
+// fields to be trailers.
+func (rws *responseWriterState) promoteUndeclaredTrailers() {
+ for k, vv := range rws.handlerHeader {
+ if !strings.HasPrefix(k, TrailerPrefix) {
+ continue
+ }
+ trailerKey := strings.TrimPrefix(k, TrailerPrefix)
+ rws.declareTrailer(trailerKey)
+ rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv
+ }
+
+ if len(rws.trailers) > 1 {
+ sorter := sorterPool.Get().(*sorter)
+ sorter.SortStrings(rws.trailers)
+ sorterPool.Put(sorter)
+ }
+}
+
func (w *responseWriter) Flush() {
rws := w.rws
if rws == nil {
@@ -1875,6 +2183,15 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int,
if !rws.wroteHeader {
w.WriteHeader(200)
}
+ if !bodyAllowedForStatus(rws.status) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
+ if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
+ // TODO: send a RST_STREAM
+ return 0, errors.New("http2: handler wrote more than declared Content-Length")
+ }
+
if dataB != nil {
return rws.bw.Write(dataB)
} else {
@@ -1884,11 +2201,92 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int,
func (w *responseWriter) handlerDone() {
rws := w.rws
- if rws == nil {
- panic("handlerDone called twice")
- }
rws.handlerDone = true
w.Flush()
w.rws = nil
responseWriterStatePool.Put(rws)
}
+
+// foreachHeaderElement splits v according to the "#rule" construction
+// in RFC 2616 section 2.1 and calls fn for each non-empty element.
+func foreachHeaderElement(v string, fn func(string)) {
+ v = textproto.TrimString(v)
+ if v == "" {
+ return
+ }
+ if !strings.Contains(v, ",") {
+ fn(v)
+ return
+ }
+ for _, f := range strings.Split(v, ",") {
+ if f = textproto.TrimString(f); f != "" {
+ fn(f)
+ }
+ }
+}
+
+// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
+var connHeaders = []string{
+ "Connection",
+ "Keep-Alive",
+ "Proxy-Connection",
+ "Transfer-Encoding",
+ "Upgrade",
+}
+
+// checkValidHTTP2Request checks whether req is a valid HTTP/2 request,
+// per RFC 7540 Section 8.1.2.2.
+// The returned error is reported to users.
+func checkValidHTTP2Request(req *http.Request) error {
+ for _, h := range connHeaders {
+ if _, ok := req.Header[h]; ok {
+ return fmt.Errorf("request header %q is not valid in HTTP/2", h)
+ }
+ }
+ te := req.Header["Te"]
+ if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
+ return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
+ }
+ return nil
+}
+
+func new400Handler(err error) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ }
+}
+
+// ValidTrailerHeader reports whether name is a valid header field name to appear
+// in trailers.
+// See: http://tools.ietf.org/html/rfc7230#section-4.1.2
+func ValidTrailerHeader(name string) bool {
+ name = http.CanonicalHeaderKey(name)
+ if strings.HasPrefix(name, "If-") || badTrailer[name] {
+ return false
+ }
+ return true
+}
+
+var badTrailer = map[string]bool{
+ "Authorization": true,
+ "Cache-Control": true,
+ "Connection": true,
+ "Content-Encoding": true,
+ "Content-Length": true,
+ "Content-Range": true,
+ "Content-Type": true,
+ "Expect": true,
+ "Host": true,
+ "Keep-Alive": true,
+ "Max-Forwards": true,
+ "Pragma": true,
+ "Proxy-Authenticate": true,
+ "Proxy-Authorization": true,
+ "Proxy-Connection": true,
+ "Range": true,
+ "Realm": true,
+ "Te": true,
+ "Trailer": true,
+ "Transfer-Encoding": true,
+ "Www-Authenticate": true,
+}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 6f0d1256..3cefc22a 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -9,19 +9,24 @@ package http2
import (
"bufio"
"bytes"
+ "compress/gzip"
"crypto/tls"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
+ "math"
"net"
"net/http"
+ "sort"
"strconv"
"strings"
"sync"
+ "time"
"golang.org/x/net/http2/hpack"
+ "golang.org/x/net/lex/httplex"
)
const (
@@ -37,6 +42,8 @@ const (
// transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
// a stream-level WINDOW_UPDATE for at a time.
transportDefaultStreamMinRefresh = 4 << 10
+
+ defaultUserAgent = "Go-http-client/2.0"
)
// Transport is an HTTP/2 Transport.
@@ -57,74 +64,195 @@ type Transport struct {
// tls.Client. If nil, the default configuration is used.
TLSClientConfig *tls.Config
- // TODO: switch to RWMutex
- // TODO: add support for sharing conns based on cert names
- // (e.g. share conn for googleapis.com and appspot.com)
- connMu sync.Mutex
- conns map[string][]*clientConn // key is host:port
+ // ConnPool optionally specifies an alternate connection pool to use.
+ // If nil, the default is used.
+ ConnPool ClientConnPool
+
+ // DisableCompression, if true, prevents the Transport from
+ // requesting compression with an "Accept-Encoding: gzip"
+ // request header when the Request contains no existing
+ // Accept-Encoding value. If the Transport requests gzip on
+ // its own and gets a gzipped response, it's transparently
+ // decoded in the Response.Body. However, if the user
+ // explicitly requested gzip it is not automatically
+ // uncompressed.
+ DisableCompression bool
+
+ // AllowHTTP, if true, permits HTTP/2 requests using the insecure,
+ // plain-text "http" scheme. Note that this does not enable h2c support.
+ AllowHTTP bool
+
+ // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to
+ // send in the initial settings frame. It is how many bytes
+ // of response headers are allow. Unlike the http2 spec, zero here
+ // means to use a default limit (currently 10MB). If you actually
+ // want to advertise an ulimited value to the peer, Transport
+ // interprets the highest possible value here (0xffffffff or 1<<32-1)
+ // to mean no limit.
+ MaxHeaderListSize uint32
+
+ // t1, if non-nil, is the standard library Transport using
+ // this transport. Its settings are used (but not its
+ // RoundTrip method, etc).
+ t1 *http.Transport
+
+ connPoolOnce sync.Once
+ connPoolOrDef ClientConnPool // non-nil version of ConnPool
+}
+
+func (t *Transport) maxHeaderListSize() uint32 {
+ if t.MaxHeaderListSize == 0 {
+ return 10 << 20
+ }
+ if t.MaxHeaderListSize == 0xffffffff {
+ return 0
+ }
+ return t.MaxHeaderListSize
+}
+
+func (t *Transport) disableCompression() bool {
+ return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
+}
+
+var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6")
+
+// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
+// It requires Go 1.6 or later and returns an error if the net/http package is too old
+// or if t1 has already been HTTP/2-enabled.
+func ConfigureTransport(t1 *http.Transport) error {
+ _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go
+ return err
+}
+
+func (t *Transport) connPool() ClientConnPool {
+ t.connPoolOnce.Do(t.initConnPool)
+ return t.connPoolOrDef
}
-// clientConn is the state of a single HTTP/2 client connection to an
+func (t *Transport) initConnPool() {
+ if t.ConnPool != nil {
+ t.connPoolOrDef = t.ConnPool
+ } else {
+ t.connPoolOrDef = &clientConnPool{t: t}
+ }
+}
+
+// ClientConn is the state of a single HTTP/2 client connection to an
// HTTP/2 server.
-type clientConn struct {
- t *Transport
- tconn net.Conn
- tlsState *tls.ConnectionState
- connKey []string // key(s) this connection is cached in, in t.conns
+type ClientConn struct {
+ t *Transport
+ tconn net.Conn // usually *tls.Conn, except specialized impls
+ tlsState *tls.ConnectionState // nil only for specialized impls
+ singleUse bool // whether being used for a single http.Request
// readLoop goroutine fields:
readerDone chan struct{} // closed on error
readerErr error // set before readerDone is closed
- mu sync.Mutex // guards following
- cond *sync.Cond // hold mu; broadcast on flow/closed changes
- flow flow // our conn-level flow control quota (cs.flow is per stream)
- inflow flow // peer's conn-level flow control
- closed bool
- goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
- streams map[uint32]*clientStream // client-initiated
- nextStreamID uint32
- bw *bufio.Writer
- br *bufio.Reader
- fr *Framer
- // Settings from peer:
+ mu sync.Mutex // guards following
+ cond *sync.Cond // hold mu; broadcast on flow/closed changes
+ flow flow // our conn-level flow control quota (cs.flow is per stream)
+ inflow flow // peer's conn-level flow control
+ closed bool
+ wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
+ goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
+ goAwayDebug string // goAway frame's debug data, retained as a string
+ streams map[uint32]*clientStream // client-initiated
+ nextStreamID uint32
+ bw *bufio.Writer
+ br *bufio.Reader
+ fr *Framer
+ lastActive time.Time
+ // Settings from peer: (also guarded by mu)
maxFrameSize uint32
maxConcurrentStreams uint32
initialWindowSize uint32
- hbuf bytes.Buffer // HPACK encoder writes into this
- henc *hpack.Encoder
- freeBuf [][]byte
- wmu sync.Mutex // held while writing; acquire AFTER wmu if holding both
+ hbuf bytes.Buffer // HPACK encoder writes into this
+ henc *hpack.Encoder
+ freeBuf [][]byte
+
+ wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
werr error // first write error that has occurred
}
// clientStream is the state for a single HTTP/2 stream. One of these
// is created for each Transport.RoundTrip call.
type clientStream struct {
- cc *clientConn
- ID uint32
- resc chan resAndError
- bufPipe pipe // buffered pipe with the flow-controlled response payload
-
- flow flow // guarded by cc.mu
- inflow flow // guarded by cc.mu
+ cc *ClientConn
+ req *http.Request
+ trace *clientTrace // or nil
+ ID uint32
+ resc chan resAndError
+ bufPipe pipe // buffered pipe with the flow-controlled response payload
+ requestedGzip bool
+ on100 func() // optional code to run if get a 100 continue response
+
+ flow flow // guarded by cc.mu
+ inflow flow // guarded by cc.mu
+ bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
+ readErr error // sticky read error; owned by transportResponseBody.Read
+ stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
peerReset chan struct{} // closed on peer reset
resetErr error // populated before peerReset is closed
+
+ done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu
+
+ // owned by clientConnReadLoop:
+ firstByte bool // got the first response byte
+ pastHeaders bool // got first MetaHeadersFrame (actual headers)
+ pastTrailers bool // got optional second MetaHeadersFrame (trailers)
+
+ trailer http.Header // accumulated trailers
+ resTrailer *http.Header // client's Response.Trailer
}
-// checkReset reports any error sent in a RST_STREAM frame by the
-// server.
-func (cs *clientStream) checkReset() error {
+// awaitRequestCancel runs in its own goroutine and waits for the user
+// to cancel a RoundTrip request, its context to expire, or for the
+// request to be done (any way it might be removed from the cc.streams
+// map: peer reset, successful completion, TCP connection breakage,
+// etc)
+func (cs *clientStream) awaitRequestCancel(req *http.Request) {
+ ctx := reqContext(req)
+ if req.Cancel == nil && ctx.Done() == nil {
+ return
+ }
+ select {
+ case <-req.Cancel:
+ cs.bufPipe.CloseWithError(errRequestCanceled)
+ cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ case <-ctx.Done():
+ cs.bufPipe.CloseWithError(ctx.Err())
+ cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ case <-cs.done:
+ }
+}
+
+// checkResetOrDone reports any error sent in a RST_STREAM frame by the
+// server, or errStreamClosed if the stream is complete.
+func (cs *clientStream) checkResetOrDone() error {
select {
case <-cs.peerReset:
return cs.resetErr
+ case <-cs.done:
+ return errStreamClosed
default:
return nil
}
}
+func (cs *clientStream) abortRequestBodyWrite(err error) {
+ if err == nil {
+ panic("nil error")
+ }
+ cc := cs.cc
+ cc.mu.Lock()
+ cs.stopReqBody = err
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+}
+
type stickyErrWriter struct {
w io.Writer
err *error
@@ -139,27 +267,54 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
return
}
+var ErrNoCachedConn = errors.New("http2: no cached connection was available")
+
+// RoundTripOpt are options for the Transport.RoundTripOpt method.
+type RoundTripOpt struct {
+ // OnlyCachedConn controls whether RoundTripOpt may
+ // create a new TCP connection. If set true and
+ // no cached connection is available, RoundTripOpt
+ // will return ErrNoCachedConn.
+ OnlyCachedConn bool
+}
+
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
- if req.URL.Scheme != "https" {
- return nil, errors.New("http2: unsupported scheme")
+ return t.RoundTripOpt(req, RoundTripOpt{})
+}
+
+// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
+// and returns a host:port. The port 443 is added if needed.
+func authorityAddr(scheme string, authority string) (addr string) {
+ if _, _, err := net.SplitHostPort(authority); err == nil {
+ return authority
+ }
+ port := "443"
+ if scheme == "http" {
+ port = "80"
}
+ return net.JoinHostPort(authority, port)
+}
- host, port, err := net.SplitHostPort(req.URL.Host)
- if err != nil {
- host = req.URL.Host
- port = "443"
+// RoundTripOpt is like RoundTrip, but takes options.
+func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
+ if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
+ return nil, errors.New("http2: unsupported scheme")
}
+ addr := authorityAddr(req.URL.Scheme, req.URL.Host)
for {
- cc, err := t.getClientConn(host, port)
+ cc, err := t.connPool().GetClientConn(req, addr)
if err != nil {
+ t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
return nil, err
}
- res, err := cc.roundTrip(req)
- if shouldRetryRequest(err) { // TODO: or clientconn is overloaded (too many outstanding requests)?
+ traceGotConn(req, cc)
+ res, err := cc.RoundTrip(req)
+ if shouldRetryRequest(req, err) {
continue
}
if err != nil {
+ t.vlogf("RoundTrip failure: %v", err)
return nil, err
}
return res, nil
@@ -170,113 +325,32 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
// connected from previous requests but are now sitting idle.
// It does not interrupt any connections currently in use.
func (t *Transport) CloseIdleConnections() {
- t.connMu.Lock()
- defer t.connMu.Unlock()
- for _, vv := range t.conns {
- for _, cc := range vv {
- cc.closeIfIdle()
- }
- }
-}
-
-var errClientConnClosed = errors.New("http2: client conn is closed")
-
-func shouldRetryRequest(err error) bool {
- // TODO: or GOAWAY graceful shutdown stuff
- return err == errClientConnClosed
-}
-
-func (t *Transport) removeClientConn(cc *clientConn) {
- t.connMu.Lock()
- defer t.connMu.Unlock()
- for _, key := range cc.connKey {
- vv, ok := t.conns[key]
- if !ok {
- continue
- }
- newList := filterOutClientConn(vv, cc)
- if len(newList) > 0 {
- t.conns[key] = newList
- } else {
- delete(t.conns, key)
- }
- }
-}
-
-func filterOutClientConn(in []*clientConn, exclude *clientConn) []*clientConn {
- out := in[:0]
- for _, v := range in {
- if v != exclude {
- out = append(out, v)
- }
- }
- // If we filtered it out, zero out the last item to prevent
- // the GC from seeing it.
- if len(in) != len(out) {
- in[len(in)-1] = nil
+ if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok {
+ cp.closeIdleConnections()
}
- return out
}
-// AddIdleConn adds c as an idle conn for Transport.
-// It assumes that c has not yet exchanged SETTINGS frames.
-// The addr maybe be either "host" or "host:port".
-func (t *Transport) AddIdleConn(addr string, c *tls.Conn) error {
- var key string
- _, _, err := net.SplitHostPort(addr)
- if err == nil {
- key = addr
- } else {
- key = addr + ":443"
- }
- cc, err := t.newClientConn(key, c)
- if err != nil {
- return err
- }
-
- t.addConn(key, cc)
- return nil
-}
+var (
+ errClientConnClosed = errors.New("http2: client conn is closed")
+ errClientConnUnusable = errors.New("http2: client conn not usable")
+)
-func (t *Transport) addConn(key string, cc *clientConn) {
- t.connMu.Lock()
- defer t.connMu.Unlock()
- if t.conns == nil {
- t.conns = make(map[string][]*clientConn)
- }
- t.conns[key] = append(t.conns[key], cc)
+func shouldRetryRequest(req *http.Request, err error) bool {
+ // TODO: retry GET requests (no bodies) more aggressively, if shutdown
+ // before response.
+ return err == errClientConnUnusable
}
-func (t *Transport) getClientConn(host, port string) (*clientConn, error) {
- key := net.JoinHostPort(host, port)
-
- t.connMu.Lock()
- for _, cc := range t.conns[key] {
- if cc.canTakeNewRequest() {
- t.connMu.Unlock()
- return cc, nil
- }
- }
- t.connMu.Unlock()
-
- // TODO(bradfitz): use a singleflight.Group to only lock once per 'key'.
- // Probably need to vendor it in as github.com/golang/sync/singleflight
- // though, since the net package already uses it? Also lines up with
- // sameer, bcmills, et al wanting to open source some sync stuff.
- cc, err := t.dialClientConn(host, port, key)
+func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) {
+ host, _, err := net.SplitHostPort(addr)
if err != nil {
return nil, err
}
- t.addConn(key, cc)
- return cc, nil
-}
-
-func (t *Transport) dialClientConn(host, port, key string) (*clientConn, error) {
- tconn, err := t.dialTLS()("tcp", net.JoinHostPort(host, port), t.newTLSConfig(host))
+ tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host))
if err != nil {
return nil, err
}
- return t.newClientConn(key, tconn)
+ return t.newClientConn(tconn, singleUse)
}
func (t *Transport) newTLSConfig(host string) *tls.Config {
@@ -284,8 +358,12 @@ func (t *Transport) newTLSConfig(host string) *tls.Config {
if t.TLSClientConfig != nil {
*cfg = *t.TLSClientConfig
}
- cfg.NextProtos = []string{NextProtoTLS} // TODO: don't override if already in list
- cfg.ServerName = host
+ if !strSliceContains(cfg.NextProtos, NextProtoTLS) {
+ cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...)
+ }
+ if cfg.ServerName == "" {
+ cfg.ServerName = host
+ }
return cfg
}
@@ -319,44 +397,70 @@ func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.C
return cn, nil
}
-func (t *Transport) newClientConn(key string, tconn net.Conn) (*clientConn, error) {
- if _, err := tconn.Write(clientPreface); err != nil {
- return nil, err
+// disableKeepAlives reports whether connections should be closed as
+// soon as possible after handling the first request.
+func (t *Transport) disableKeepAlives() bool {
+ return t.t1 != nil && t.t1.DisableKeepAlives
+}
+
+func (t *Transport) expectContinueTimeout() time.Duration {
+ if t.t1 == nil {
+ return 0
}
+ return transportExpectContinueTimeout(t.t1)
+}
+
+func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
+ return t.newClientConn(c, false)
+}
- cc := &clientConn{
+func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
+ cc := &ClientConn{
t: t,
- tconn: tconn,
- connKey: []string{key}, // TODO: cert's validated hostnames too
+ tconn: c,
readerDone: make(chan struct{}),
nextStreamID: 1,
maxFrameSize: 16 << 10, // spec default
initialWindowSize: 65535, // spec default
maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
streams: make(map[uint32]*clientStream),
+ singleUse: singleUse,
+ wantSettingsAck: true,
}
+ if VerboseLogs {
+ t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
+ }
+
cc.cond = sync.NewCond(&cc.mu)
cc.flow.add(int32(initialWindowSize))
// TODO: adjust this writer size to account for frame size +
// MTU + crypto/tls record padding.
- cc.bw = bufio.NewWriter(stickyErrWriter{tconn, &cc.werr})
- cc.br = bufio.NewReader(tconn)
+ cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr})
+ cc.br = bufio.NewReader(c)
cc.fr = NewFramer(cc.bw, cc.br)
+ cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+ cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
+
+ // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on
+ // henc in response to SETTINGS frames?
cc.henc = hpack.NewEncoder(&cc.hbuf)
- type connectionStater interface {
- ConnectionState() tls.ConnectionState
- }
- if cs, ok := tconn.(connectionStater); ok {
+ if cs, ok := c.(connectionStater); ok {
state := cs.ConnectionState()
cc.tlsState = &state
}
- cc.fr.WriteSettings(
- Setting{ID: SettingEnablePush, Val: 0},
- Setting{ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
- )
+ initialSettings := []Setting{
+ {ID: SettingEnablePush, Val: 0},
+ {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
+ }
+ if max := t.maxHeaderListSize(); max != 0 {
+ initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
+ }
+
+ cc.bw.Write(clientPreface)
+ cc.fr.WriteSettings(initialSettings...)
cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
cc.bw.Flush()
@@ -364,61 +468,55 @@ func (t *Transport) newClientConn(key string, tconn net.Conn) (*clientConn, erro
return nil, cc.werr
}
- // Read the obligatory SETTINGS frame
- f, err := cc.fr.ReadFrame()
- if err != nil {
- return nil, err
- }
- sf, ok := f.(*SettingsFrame)
- if !ok {
- return nil, fmt.Errorf("expected settings frame, got: %T", f)
- }
- cc.fr.WriteSettingsAck()
- cc.bw.Flush()
-
- sf.ForeachSetting(func(s Setting) error {
- switch s.ID {
- case SettingMaxFrameSize:
- cc.maxFrameSize = s.Val
- case SettingMaxConcurrentStreams:
- cc.maxConcurrentStreams = s.Val
- case SettingInitialWindowSize:
- cc.initialWindowSize = s.Val
- default:
- // TODO(bradfitz): handle more
- t.vlogf("Unhandled Setting: %v", s)
- }
- return nil
- })
-
go cc.readLoop()
return cc, nil
}
-func (cc *clientConn) setGoAway(f *GoAwayFrame) {
+func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
cc.mu.Lock()
defer cc.mu.Unlock()
+
+ old := cc.goAway
cc.goAway = f
+
+ // Merge the previous and current GoAway error frames.
+ if cc.goAwayDebug == "" {
+ cc.goAwayDebug = string(f.DebugData())
+ }
+ if old != nil && old.ErrCode != ErrCodeNo {
+ cc.goAway.ErrCode = old.ErrCode
+ }
}
-func (cc *clientConn) canTakeNewRequest() bool {
+func (cc *ClientConn) CanTakeNewRequest() bool {
cc.mu.Lock()
defer cc.mu.Unlock()
- return cc.goAway == nil &&
+ return cc.canTakeNewRequestLocked()
+}
+
+func (cc *ClientConn) canTakeNewRequestLocked() bool {
+ if cc.singleUse && cc.nextStreamID > 1 {
+ return false
+ }
+ return cc.goAway == nil && !cc.closed &&
int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
- cc.nextStreamID < 2147483647
+ cc.nextStreamID < math.MaxInt32
}
-func (cc *clientConn) closeIfIdle() {
+func (cc *ClientConn) closeIfIdle() {
cc.mu.Lock()
if len(cc.streams) > 0 {
cc.mu.Unlock()
return
}
cc.closed = true
+ nextID := cc.nextStreamID
// TODO: do clients send GOAWAY too? maybe? Just Close:
cc.mu.Unlock()
+ if VerboseLogs {
+ cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2)
+ }
cc.tconn.Close()
}
@@ -428,7 +526,7 @@ const maxAllocFrameSize = 512 << 10
// They're capped at the min of the peer's max frame size or 512KB
// (kinda arbitrarily), but definitely capped so we don't allocate 4GB
// bufers.
-func (cc *clientConn) frameScratchBuffer() []byte {
+func (cc *ClientConn) frameScratchBuffer() []byte {
cc.mu.Lock()
size := cc.maxFrameSize
if size > maxAllocFrameSize {
@@ -445,7 +543,7 @@ func (cc *clientConn) frameScratchBuffer() []byte {
return make([]byte, size)
}
-func (cc *clientConn) putFrameScratchBuffer(buf []byte) {
+func (cc *ClientConn) putFrameScratchBuffer(buf []byte) {
cc.mu.Lock()
defer cc.mu.Unlock()
const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate.
@@ -462,121 +560,352 @@ func (cc *clientConn) putFrameScratchBuffer(buf []byte) {
// forget about it.
}
-func (cc *clientConn) roundTrip(req *http.Request) (*http.Response, error) {
- cc.mu.Lock()
+// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
+// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
+var errRequestCanceled = errors.New("net/http: request canceled")
+
+func commaSeparatedTrailers(req *http.Request) (string, error) {
+ keys := make([]string, 0, len(req.Trailer))
+ for k := range req.Trailer {
+ k = http.CanonicalHeaderKey(k)
+ switch k {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ return "", &badStringError{"invalid Trailer key", k}
+ }
+ keys = append(keys, k)
+ }
+ if len(keys) > 0 {
+ sort.Strings(keys)
+ // TODO: could do better allocation-wise here, but trailers are rare,
+ // so being lazy for now.
+ return strings.Join(keys, ","), nil
+ }
+ return "", nil
+}
+
+func (cc *ClientConn) responseHeaderTimeout() time.Duration {
+ if cc.t.t1 != nil {
+ return cc.t.t1.ResponseHeaderTimeout
+ }
+ // No way to do this (yet?) with just an http2.Transport. Probably
+ // no need. Request.Cancel this is the new way. We only need to support
+ // this for compatibility with the old http.Transport fields when
+ // we're doing transparent http2.
+ return 0
+}
+
+// checkConnHeaders checks whether req has any invalid connection-level headers.
+// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
+// Certain headers are special-cased as okay but not transmitted later.
+func checkConnHeaders(req *http.Request) error {
+ if v := req.Header.Get("Upgrade"); v != "" {
+ return errors.New("http2: invalid Upgrade request header")
+ }
+ if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 {
+ return errors.New("http2: invalid Transfer-Encoding request header")
+ }
+ if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 {
+ return errors.New("http2: invalid Connection request header")
+ }
+ return nil
+}
+
+func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) {
+ body = req.Body
+ if body == nil {
+ return nil, 0
+ }
+ if req.ContentLength != 0 {
+ return req.Body, req.ContentLength
+ }
+
+ // We have a body but a zero content length. Test to see if
+ // it's actually zero or just unset.
+ var buf [1]byte
+ n, rerr := io.ReadFull(body, buf[:])
+ if rerr != nil && rerr != io.EOF {
+ return errorReader{rerr}, -1
+ }
+ if n == 1 {
+ // Oh, guess there is data in this Body Reader after all.
+ // The ContentLength field just wasn't set.
+ // Stich the Body back together again, re-attaching our
+ // consumed byte.
+ return io.MultiReader(bytes.NewReader(buf[:]), body), -1
+ }
+ // Body is actually zero bytes.
+ return nil, 0
+}
+
+func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
+ if err := checkConnHeaders(req); err != nil {
+ return nil, err
+ }
+
+ trailers, err := commaSeparatedTrailers(req)
+ if err != nil {
+ return nil, err
+ }
+ hasTrailers := trailers != ""
+
+ body, contentLen := bodyAndLength(req)
+ hasBody := body != nil
- if cc.closed {
+ cc.mu.Lock()
+ cc.lastActive = time.Now()
+ if cc.closed || !cc.canTakeNewRequestLocked() {
+ cc.mu.Unlock()
+ return nil, errClientConnUnusable
+ }
+
+ // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
+ var requestedGzip bool
+ if !cc.t.disableCompression() &&
+ req.Header.Get("Accept-Encoding") == "" &&
+ req.Header.Get("Range") == "" &&
+ req.Method != "HEAD" {
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: http://www.gzip.org/zlib/zlib_faq.html#faq38
+ //
+ // Note that we don't request this for HEAD requests,
+ // due to a bug in nginx:
+ // http://trac.nginx.org/nginx/ticket/358
+ // https://golang.org/issue/5522
+ //
+ // We don't request gzip if the request is for a range, since
+ // auto-decoding a portion of a gzipped document will just fail
+ // anyway. See https://golang.org/issue/8923
+ requestedGzip = true
+ }
+
+ // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
+ // sent by writeRequestBody below, along with any Trailers,
+ // again in form HEADERS{1}, CONTINUATION{0,})
+ hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen)
+ if err != nil {
cc.mu.Unlock()
- return nil, errClientConnClosed
+ return nil, err
}
cs := cc.newStream()
- hasBody := req.Body != nil
-
- // we send: HEADERS[+CONTINUATION] + (DATA?)
- hdrs := cc.encodeHeaders(req)
- first := true
+ cs.req = req
+ cs.trace = requestTrace(req)
+ cs.requestedGzip = requestedGzip
+ bodyWriter := cc.t.getBodyWriterState(cs, body)
+ cs.on100 = bodyWriter.on100
cc.wmu.Lock()
- frameSize := int(cc.maxFrameSize)
- for len(hdrs) > 0 && cc.werr == nil {
- chunk := hdrs
- if len(chunk) > frameSize {
- chunk = chunk[:frameSize]
- }
- hdrs = hdrs[len(chunk):]
- endHeaders := len(hdrs) == 0
- if first {
- cc.fr.WriteHeaders(HeadersFrameParam{
- StreamID: cs.ID,
- BlockFragment: chunk,
- EndStream: !hasBody,
- EndHeaders: endHeaders,
- })
- first = false
- } else {
- cc.fr.WriteContinuation(cs.ID, endHeaders, chunk)
- }
- }
- cc.bw.Flush()
- werr := cc.werr
+ endStream := !hasBody && !hasTrailers
+ werr := cc.writeHeaders(cs.ID, endStream, hdrs)
cc.wmu.Unlock()
+ traceWroteHeaders(cs.trace)
cc.mu.Unlock()
if werr != nil {
+ if hasBody {
+ req.Body.Close() // per RoundTripper contract
+ bodyWriter.cancel()
+ }
+ cc.forgetStreamID(cs.ID)
+ // Don't bother sending a RST_STREAM (our write already failed;
+ // no need to keep writing)
+ traceWroteRequest(cs.trace, werr)
return nil, werr
}
- var bodyCopyErrc chan error
- var gotResHeaders chan struct{} // closed on resheaders
+ var respHeaderTimer <-chan time.Time
if hasBody {
- bodyCopyErrc = make(chan error, 1)
- gotResHeaders = make(chan struct{})
- go func() {
- bodyCopyErrc <- cs.writeRequestBody(req.Body, gotResHeaders)
- }()
+ bodyWriter.scheduleBodyWrite()
+ } else {
+ traceWroteRequest(cs.trace, nil)
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C
+ }
+ }
+
+ readLoopResCh := cs.resc
+ bodyWritten := false
+ ctx := reqContext(req)
+
+ handleReadLoopResponse := func(re resAndError) (*http.Response, error) {
+ res := re.res
+ if re.err != nil || res.StatusCode > 299 {
+ // On error or status code 3xx, 4xx, 5xx, etc abort any
+ // ongoing write, assuming that the server doesn't care
+ // about our request body. If the server replied with 1xx or
+ // 2xx, however, then assume the server DOES potentially
+ // want our body (e.g. full-duplex streaming:
+ // golang.org/issue/13444). If it turns out the server
+ // doesn't, they'll RST_STREAM us soon enough. This is a
+ // heuristic to avoid adding knobs to Transport. Hopefully
+ // we can keep it.
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWrite)
+ }
+ if re.err != nil {
+ cc.forgetStreamID(cs.ID)
+ return nil, re.err
+ }
+ res.Request = req
+ res.TLS = cc.tlsState
+ return res, nil
}
for {
select {
- case re := <-cs.resc:
- if gotResHeaders != nil {
- close(gotResHeaders)
+ case re := <-readLoopResCh:
+ return handleReadLoopResponse(re)
+ case <-respHeaderTimer:
+ cc.forgetStreamID(cs.ID)
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
}
- if re.err != nil {
- return nil, re.err
+ return nil, errTimeout
+ case <-ctx.Done():
+ cc.forgetStreamID(cs.ID)
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ }
+ return nil, ctx.Err()
+ case <-req.Cancel:
+ cc.forgetStreamID(cs.ID)
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ }
+ return nil, errRequestCanceled
+ case <-cs.peerReset:
+ // processResetStream already removed the
+ // stream from the streams map; no need for
+ // forgetStreamID.
+ return nil, cs.resetErr
+ case err := <-bodyWriter.resc:
+ // Prefer the read loop's response, if available. Issue 16102.
+ select {
+ case re := <-readLoopResCh:
+ return handleReadLoopResponse(re)
+ default:
}
- res := re.res
- res.Request = req
- res.TLS = cc.tlsState
- return res, nil
- case err := <-bodyCopyErrc:
if err != nil {
return nil, err
}
+ bodyWritten = true
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C
+ }
}
}
}
-var errServerResponseBeforeRequestBody = errors.New("http2: server sent response while still writing request body")
+// requires cc.wmu be held
+func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error {
+ first := true // first frame written (HEADERS is first, then CONTINUATION)
+ frameSize := int(cc.maxFrameSize)
+ for len(hdrs) > 0 && cc.werr == nil {
+ chunk := hdrs
+ if len(chunk) > frameSize {
+ chunk = chunk[:frameSize]
+ }
+ hdrs = hdrs[len(chunk):]
+ endHeaders := len(hdrs) == 0
+ if first {
+ cc.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: chunk,
+ EndStream: endStream,
+ EndHeaders: endHeaders,
+ })
+ first = false
+ } else {
+ cc.fr.WriteContinuation(streamID, endHeaders, chunk)
+ }
+ }
+ // TODO(bradfitz): this Flush could potentially block (as
+ // could the WriteHeaders call(s) above), which means they
+ // wouldn't respond to Request.Cancel being readable. That's
+ // rare, but this should probably be in a goroutine.
+ cc.bw.Flush()
+ return cc.werr
+}
-func (cs *clientStream) writeRequestBody(body io.Reader, gotResHeaders <-chan struct{}) error {
+// internal error values; they don't escape to callers
+var (
+ // abort request body write; don't send cancel
+ errStopReqBodyWrite = errors.New("http2: aborting request body write")
+
+ // abort request body write, but send stream reset of cancel.
+ errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
+)
+
+func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
cc := cs.cc
sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
buf := cc.frameScratchBuffer()
defer cc.putFrameScratchBuffer(buf)
- for !sentEnd {
- var sawEOF bool
- n, err := io.ReadFull(body, buf)
- if err == io.ErrUnexpectedEOF {
+ defer func() {
+ traceWroteRequest(cs.trace, err)
+ // TODO: write h12Compare test showing whether
+ // Request.Body is closed by the Transport,
+ // and in multiple cases: server replies <=299 and >299
+ // while still writing request body
+ cerr := bodyCloser.Close()
+ if err == nil {
+ err = cerr
+ }
+ }()
+
+ req := cs.req
+ hasTrailers := req.Trailer != nil
+
+ var sawEOF bool
+ for !sawEOF {
+ n, err := body.Read(buf)
+ if err == io.EOF {
sawEOF = true
err = nil
- } else if err == io.EOF {
- break
} else if err != nil {
return err
}
- toWrite := buf[:n]
- for len(toWrite) > 0 && err == nil {
+ remain := buf[:n]
+ for len(remain) > 0 && err == nil {
var allowed int32
- allowed, err = cs.awaitFlowControl(int32(len(toWrite)))
- if err != nil {
+ allowed, err = cs.awaitFlowControl(len(remain))
+ switch {
+ case err == errStopReqBodyWrite:
+ return err
+ case err == errStopReqBodyWriteAndCancel:
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ return err
+ case err != nil:
return err
}
-
cc.wmu.Lock()
- select {
- case <-gotResHeaders:
- err = errServerResponseBeforeRequestBody
- case <-cs.peerReset:
- err = cs.resetErr
- default:
- data := toWrite[:allowed]
- toWrite = toWrite[allowed:]
- sentEnd = sawEOF && len(toWrite) == 0
- err = cc.fr.WriteData(cs.ID, sentEnd, data)
+ data := remain[:allowed]
+ remain = remain[allowed:]
+ sentEnd = sawEOF && len(remain) == 0 && !hasTrailers
+ err = cc.fr.WriteData(cs.ID, sentEnd, data)
+ if err == nil {
+ // TODO(bradfitz): this flush is for latency, not bandwidth.
+ // Most requests won't need this. Make this opt-in or opt-out?
+ // Use some heuristic on the body type? Nagel-like timers?
+ // Based on 'n'? Only last chunk of this for loop, unless flow control
+ // tokens are low? For now, always:
+ err = cc.bw.Flush()
}
cc.wmu.Unlock()
}
@@ -585,17 +914,26 @@ func (cs *clientStream) writeRequestBody(body io.Reader, gotResHeaders <-chan st
}
}
- var err error
+ var trls []byte
+ if !sentEnd && hasTrailers {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ trls = cc.encodeTrailers(req)
+ }
cc.wmu.Lock()
- if !sentEnd {
+ defer cc.wmu.Unlock()
+
+ // Avoid forgetting to send an END_STREAM if the encoded
+ // trailers are 0 bytes. Both results produce and END_STREAM.
+ if len(trls) > 0 {
+ err = cc.writeHeaders(cs.ID, true, trls)
+ } else {
err = cc.fr.WriteData(cs.ID, true, nil)
}
if ferr := cc.bw.Flush(); ferr != nil && err == nil {
err = ferr
}
- cc.wmu.Unlock()
-
return err
}
@@ -603,7 +941,7 @@ func (cs *clientStream) writeRequestBody(body io.Reader, gotResHeaders <-chan st
// control tokens from the server.
// It returns either the non-zero number of tokens taken or an error
// if the stream is dead.
-func (cs *clientStream) awaitFlowControl(maxBytes int32) (taken int32, err error) {
+func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
cc := cs.cc
cc.mu.Lock()
defer cc.mu.Unlock()
@@ -611,13 +949,17 @@ func (cs *clientStream) awaitFlowControl(maxBytes int32) (taken int32, err error
if cc.closed {
return 0, errClientConnClosed
}
- if err := cs.checkReset(); err != nil {
+ if cs.stopReqBody != nil {
+ return 0, cs.stopReqBody
+ }
+ if err := cs.checkResetOrDone(); err != nil {
return 0, err
}
if a := cs.flow.available(); a > 0 {
take := a
- if take > maxBytes {
- take = maxBytes
+ if int(take) > maxBytes {
+
+ take = int32(maxBytes) // can't truncate int; take is int32
}
if take > int32(cc.maxFrameSize) {
take = int32(cc.maxFrameSize)
@@ -629,31 +971,124 @@ func (cs *clientStream) awaitFlowControl(maxBytes int32) (taken int32, err error
}
}
+type badStringError struct {
+ what string
+ str string
+}
+
+func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
+
// requires cc.mu be held.
-func (cc *clientConn) encodeHeaders(req *http.Request) []byte {
+func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
cc.hbuf.Reset()
- // TODO(bradfitz): figure out :authority-vs-Host stuff between http2 and Go
host := req.Host
if host == "" {
host = req.URL.Host
}
+ // Check for any invalid headers and return an error before we
+ // potentially pollute our hpack state. (We want to be able to
+ // continue to reuse the hpack encoder for future requests)
+ for k, vv := range req.Header {
+ if !httplex.ValidHeaderFieldName(k) {
+ return nil, fmt.Errorf("invalid HTTP header name %q", k)
+ }
+ for _, v := range vv {
+ if !httplex.ValidHeaderFieldValue(v) {
+ return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k)
+ }
+ }
+ }
+
// 8.1.2.3 Request Pseudo-Header Fields
// The :path pseudo-header field includes the path and query parts of the
// target URI (the path-absolute production and optionally a '?' character
// followed by the query production (see Sections 3.3 and 3.4 of
// [RFC3986]).
- cc.writeHeader(":authority", host) // probably not right for all sites
+ cc.writeHeader(":authority", host)
cc.writeHeader(":method", req.Method)
- cc.writeHeader(":path", req.URL.RequestURI())
- cc.writeHeader(":scheme", "https")
+ if req.Method != "CONNECT" {
+ cc.writeHeader(":path", req.URL.RequestURI())
+ cc.writeHeader(":scheme", "https")
+ }
+ if trailers != "" {
+ cc.writeHeader("trailer", trailers)
+ }
+ var didUA bool
for k, vv := range req.Header {
lowKey := strings.ToLower(k)
- if lowKey == "host" {
+ switch lowKey {
+ case "host", "content-length":
+ // Host is :authority, already sent.
+ // Content-Length is automatic, set below.
+ continue
+ case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive":
+ // Per 8.1.2.2 Connection-Specific Header
+ // Fields, don't send connection-specific
+ // fields. We have already checked if any
+ // are error-worthy so just ignore the rest.
continue
+ case "user-agent":
+ // Match Go's http1 behavior: at most one
+ // User-Agent. If set to nil or empty string,
+ // then omit it. Otherwise if not mentioned,
+ // include the default (below).
+ didUA = true
+ if len(vv) < 1 {
+ continue
+ }
+ vv = vv[:1]
+ if vv[0] == "" {
+ continue
+ }
+ }
+ for _, v := range vv {
+ cc.writeHeader(lowKey, v)
}
+ }
+ if shouldSendReqContentLength(req.Method, contentLength) {
+ cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10))
+ }
+ if addGzipHeader {
+ cc.writeHeader("accept-encoding", "gzip")
+ }
+ if !didUA {
+ cc.writeHeader("user-agent", defaultUserAgent)
+ }
+ return cc.hbuf.Bytes(), nil
+}
+
+// shouldSendReqContentLength reports whether the http2.Transport should send
+// a "content-length" request header. This logic is basically a copy of the net/http
+// transferWriter.shouldSendContentLength.
+// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
+// -1 means unknown.
+func shouldSendReqContentLength(method string, contentLength int64) bool {
+ if contentLength > 0 {
+ return true
+ }
+ if contentLength < 0 {
+ return false
+ }
+ // For zero bodies, whether we send a content-length depends on the method.
+ // It also kinda doesn't matter for http2 either way, with END_STREAM.
+ switch method {
+ case "POST", "PUT", "PATCH":
+ return true
+ default:
+ return false
+ }
+}
+
+// requires cc.mu be held.
+func (cc *ClientConn) encodeTrailers(req *http.Request) []byte {
+ cc.hbuf.Reset()
+ for k, vv := range req.Trailer {
+ // Transfer-Encoding, etc.. have already been filter at the
+ // start of RoundTrip
+ lowKey := strings.ToLower(k)
for _, v := range vv {
cc.writeHeader(lowKey, v)
}
@@ -661,7 +1096,10 @@ func (cc *clientConn) encodeHeaders(req *http.Request) []byte {
return cc.hbuf.Bytes()
}
-func (cc *clientConn) writeHeader(name, value string) {
+func (cc *ClientConn) writeHeader(name, value string) {
+ if VerboseLogs {
+ log.Printf("http2: Transport encoding header %q = %q", name, value)
+ }
cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
}
@@ -671,12 +1109,13 @@ type resAndError struct {
}
// requires cc.mu be held.
-func (cc *clientConn) newStream() *clientStream {
+func (cc *ClientConn) newStream() *clientStream {
cs := &clientStream{
cc: cc,
ID: cc.nextStreamID,
resc: make(chan resAndError, 1),
peerReset: make(chan struct{}),
+ done: make(chan struct{}),
}
cs.flow.add(int32(cc.initialWindowSize))
cs.flow.setConnFlow(&cc.flow)
@@ -687,41 +1126,36 @@ func (cc *clientConn) newStream() *clientStream {
return cs
}
-func (cc *clientConn) streamByID(id uint32, andRemove bool) *clientStream {
+func (cc *ClientConn) forgetStreamID(id uint32) {
+ cc.streamByID(id, true)
+}
+
+func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
cc.mu.Lock()
defer cc.mu.Unlock()
cs := cc.streams[id]
- if andRemove {
+ if andRemove && cs != nil && !cc.closed {
+ cc.lastActive = time.Now()
delete(cc.streams, id)
+ close(cs.done)
+ cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
}
return cs
}
// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
type clientConnReadLoop struct {
- cc *clientConn
- activeRes map[uint32]*clientStream // keyed by streamID
-
- // continueStreamID is the stream ID we're waiting for
- // continuation frames for.
- continueStreamID uint32
-
- hdec *hpack.Decoder
-
- // Fields reset on each HEADERS:
- nextRes *http.Response
- sawRegHeader bool // saw non-pseudo header
- reqMalformed error // non-nil once known to be malformed
+ cc *ClientConn
+ activeRes map[uint32]*clientStream // keyed by streamID
+ closeWhenIdle bool
}
// readLoop runs in its own goroutine and reads and dispatches frames.
-func (cc *clientConn) readLoop() {
+func (cc *ClientConn) readLoop() {
rl := &clientConnReadLoop{
cc: cc,
activeRes: make(map[uint32]*clientStream),
}
- // TODO: figure out henc size
- rl.hdec = hpack.NewDecoder(initialHeaderTableSize, rl.onNewHeaderField)
defer rl.cleanup()
cc.readerErr = rl.run()
@@ -732,20 +1166,47 @@ func (cc *clientConn) readLoop() {
}
}
+// GoAwayError is returned by the Transport when the server closes the
+// TCP connection after sending a GOAWAY frame.
+type GoAwayError struct {
+ LastStreamID uint32
+ ErrCode ErrCode
+ DebugData string
+}
+
+func (e GoAwayError) Error() string {
+ return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q",
+ e.LastStreamID, e.ErrCode, e.DebugData)
+}
+
+func isEOFOrNetReadError(err error) bool {
+ if err == io.EOF {
+ return true
+ }
+ ne, ok := err.(*net.OpError)
+ return ok && ne.Op == "read"
+}
+
func (rl *clientConnReadLoop) cleanup() {
cc := rl.cc
defer cc.tconn.Close()
- defer cc.t.removeClientConn(cc)
+ defer cc.t.connPool().MarkDead(cc)
defer close(cc.readerDone)
// Close any response bodies if the server closes prematurely.
// TODO: also do this if we've written the headers but not
// gotten a response yet.
err := cc.readerErr
- if err == io.EOF {
+ cc.mu.Lock()
+ if cc.goAway != nil && isEOFOrNetReadError(err) {
+ err = GoAwayError{
+ LastStreamID: cc.goAway.LastStreamID,
+ ErrCode: cc.goAway.ErrCode,
+ DebugData: cc.goAwayDebug,
+ }
+ } else if err == io.EOF {
err = io.ErrUnexpectedEOF
}
- cc.mu.Lock()
for _, cs := range rl.activeRes {
cs.bufPipe.CloseWithError(err)
}
@@ -754,6 +1215,7 @@ func (rl *clientConnReadLoop) cleanup() {
case cs.resc <- resAndError{err: err}:
default:
}
+ close(cs.done)
}
cc.closed = true
cc.cond.Broadcast()
@@ -762,114 +1224,239 @@ func (rl *clientConnReadLoop) cleanup() {
func (rl *clientConnReadLoop) run() error {
cc := rl.cc
+ rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
+ gotReply := false // ever saw a HEADERS reply
+ gotSettings := false
for {
f, err := cc.fr.ReadFrame()
+ if err != nil {
+ cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
+ }
if se, ok := err.(StreamError); ok {
- // TODO: deal with stream errors from the framer.
- return se
+ if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil {
+ cs.cc.writeStreamReset(cs.ID, se.Code, err)
+ if se.Cause == nil {
+ se.Cause = cc.fr.errDetail
+ }
+ rl.endStreamError(cs, se)
+ }
+ continue
} else if err != nil {
return err
}
- cc.vlogf("Transport received %v: %#v", f.Header(), f)
-
- streamID := f.Header().StreamID
-
- _, isContinue := f.(*ContinuationFrame)
- if isContinue {
- if streamID != rl.continueStreamID {
- cc.logf("Protocol violation: got CONTINUATION with id %d; want %d", streamID, rl.continueStreamID)
+ if VerboseLogs {
+ cc.vlogf("http2: Transport received %s", summarizeFrame(f))
+ }
+ if !gotSettings {
+ if _, ok := f.(*SettingsFrame); !ok {
+ cc.logf("protocol error: received %T before a SETTINGS frame", f)
return ConnectionError(ErrCodeProtocol)
}
- } else if rl.continueStreamID != 0 {
- // Continue frames need to be adjacent in the stream
- // and we were in the middle of headers.
- cc.logf("Protocol violation: got %T for stream %d, want CONTINUATION for %d", f, streamID, rl.continueStreamID)
- return ConnectionError(ErrCodeProtocol)
+ gotSettings = true
}
+ maybeIdle := false // whether frame might transition us to idle
switch f := f.(type) {
- case *HeadersFrame:
+ case *MetaHeadersFrame:
err = rl.processHeaders(f)
- case *ContinuationFrame:
- err = rl.processContinuation(f)
+ maybeIdle = true
+ gotReply = true
case *DataFrame:
err = rl.processData(f)
+ maybeIdle = true
case *GoAwayFrame:
err = rl.processGoAway(f)
+ maybeIdle = true
case *RSTStreamFrame:
err = rl.processResetStream(f)
+ maybeIdle = true
case *SettingsFrame:
err = rl.processSettings(f)
case *PushPromiseFrame:
err = rl.processPushPromise(f)
case *WindowUpdateFrame:
err = rl.processWindowUpdate(f)
+ case *PingFrame:
+ err = rl.processPing(f)
default:
cc.logf("Transport: unhandled response frame type %T", f)
}
if err != nil {
+ if VerboseLogs {
+ cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
+ }
return err
}
+ if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 {
+ cc.closeIfIdle()
+ }
}
}
-func (rl *clientConnReadLoop) processHeaders(f *HeadersFrame) error {
- rl.sawRegHeader = false
- rl.reqMalformed = nil
- rl.nextRes = &http.Response{
- Proto: "HTTP/2.0",
- ProtoMajor: 2,
- Header: make(http.Header),
- }
- return rl.processHeaderBlockFragment(f.HeaderBlockFragment(), f.StreamID, f.HeadersEnded(), f.StreamEnded())
-}
-
-func (rl *clientConnReadLoop) processContinuation(f *ContinuationFrame) error {
- return rl.processHeaderBlockFragment(f.HeaderBlockFragment(), f.StreamID, f.HeadersEnded(), f.StreamEnded())
-}
-
-func (rl *clientConnReadLoop) processHeaderBlockFragment(frag []byte, streamID uint32, headersEnded, streamEnded bool) error {
+func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
cc := rl.cc
- cs := cc.streamByID(streamID, streamEnded)
+ cs := cc.streamByID(f.StreamID, f.StreamEnded())
if cs == nil {
- // We could return a ConnectionError(ErrCodeProtocol)
- // here, except that in the case of us canceling
- // client requests, we may also delete from the
- // streams map, in which case we forgot that we sent
- // this request. So, just ignore any responses for
- // now. They might've been in-flight before the
- // server got our RST_STREAM.
+ // We'd get here if we canceled a request while the
+ // server had its response still in flight. So if this
+ // was just something we canceled, ignore it.
return nil
}
- _, err := rl.hdec.Write(frag)
+ if !cs.firstByte {
+ if cs.trace != nil {
+ // TODO(bradfitz): move first response byte earlier,
+ // when we first read the 9 byte header, not waiting
+ // until all the HEADERS+CONTINUATION frames have been
+ // merged. This works for now.
+ traceFirstResponseByte(cs.trace)
+ }
+ cs.firstByte = true
+ }
+ if !cs.pastHeaders {
+ cs.pastHeaders = true
+ } else {
+ return rl.processTrailers(cs, f)
+ }
+
+ res, err := rl.handleResponse(cs, f)
if err != nil {
- return err
+ if _, ok := err.(ConnectionError); ok {
+ return err
+ }
+ // Any other error type is a stream error.
+ cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err)
+ cs.resc <- resAndError{err: err}
+ return nil // return nil from process* funcs to keep conn alive
}
- if !headersEnded {
- rl.continueStreamID = cs.ID
+ if res == nil {
+ // (nil, nil) special case. See handleResponse docs.
return nil
}
+ if res.Body != noBody {
+ rl.activeRes[cs.ID] = cs
+ }
+ cs.resTrailer = &res.Trailer
+ cs.resc <- resAndError{res: res}
+ return nil
+}
+
+// may return error types nil, or ConnectionError. Any other error value
+// is a StreamError of type ErrCodeProtocol. The returned error in that case
+// is the detail.
+//
+// As a special case, handleResponse may return (nil, nil) to skip the
+// frame (currently only used for 100 expect continue). This special
+// case is going away after Issue 13851 is fixed.
+func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) {
+ if f.Truncated {
+ return nil, errResponseHeaderListSize
+ }
- // HEADERS (or CONTINUATION) are now over.
- rl.continueStreamID = 0
+ status := f.PseudoValue("status")
+ if status == "" {
+ return nil, errors.New("missing status pseudo header")
+ }
+ statusCode, err := strconv.Atoi(status)
+ if err != nil {
+ return nil, errors.New("malformed non-numeric status pseudo header")
+ }
- if rl.reqMalformed != nil {
- cs.resc <- resAndError{err: rl.reqMalformed}
- rl.cc.writeStreamReset(cs.ID, ErrCodeProtocol, rl.reqMalformed)
- return nil
+ if statusCode == 100 {
+ traceGot100Continue(cs.trace)
+ if cs.on100 != nil {
+ cs.on100() // forces any write delay timer to fire
+ }
+ cs.pastHeaders = false // do it all again
+ return nil, nil
}
- res := rl.nextRes
- if streamEnded {
+ header := make(http.Header)
+ res := &http.Response{
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ Header: header,
+ StatusCode: statusCode,
+ Status: status + " " + http.StatusText(statusCode),
+ }
+ for _, hf := range f.RegularFields() {
+ key := http.CanonicalHeaderKey(hf.Name)
+ if key == "Trailer" {
+ t := res.Trailer
+ if t == nil {
+ t = make(http.Header)
+ res.Trailer = t
+ }
+ foreachHeaderElement(hf.Value, func(v string) {
+ t[http.CanonicalHeaderKey(v)] = nil
+ })
+ } else {
+ header[key] = append(header[key], hf.Value)
+ }
+ }
+
+ streamEnded := f.StreamEnded()
+ isHead := cs.req.Method == "HEAD"
+ if !streamEnded || isHead {
+ res.ContentLength = -1
+ if clens := res.Header["Content-Length"]; len(clens) == 1 {
+ if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil {
+ res.ContentLength = clen64
+ } else {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ }
+ } else if len(clens) > 1 {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ }
+ }
+
+ if streamEnded || isHead {
res.Body = noBody
- } else {
- buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage
- cs.bufPipe = pipe{b: buf}
- res.Body = transportResponseBody{cs}
+ return res, nil
}
- rl.activeRes[cs.ID] = cs
- cs.resc <- resAndError{res: res}
- rl.nextRes = nil // unused now; will be reset next HEADERS frame
+
+ buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage
+ cs.bufPipe = pipe{b: buf}
+ cs.bytesRemain = res.ContentLength
+ res.Body = transportResponseBody{cs}
+ go cs.awaitRequestCancel(cs.req)
+
+ if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" {
+ res.Header.Del("Content-Encoding")
+ res.Header.Del("Content-Length")
+ res.ContentLength = -1
+ res.Body = &gzipReader{body: res.Body}
+ setResponseUncompressed(res)
+ }
+ return res, nil
+}
+
+func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error {
+ if cs.pastTrailers {
+ // Too many HEADERS frames for this stream.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ cs.pastTrailers = true
+ if !f.StreamEnded() {
+ // We expect that any headers for trailers also
+ // has END_STREAM.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if len(f.PseudoFields()) > 0 {
+ // No pseudo header fields are defined for trailers.
+ // TODO: ConnectionError might be overly harsh? Check.
+ return ConnectionError(ErrCodeProtocol)
+ }
+
+ trailer := make(http.Header)
+ for _, hf := range f.RegularFields() {
+ key := http.CanonicalHeaderKey(hf.Name)
+ trailer[key] = append(trailer[key], hf.Value)
+ }
+ cs.trailer = trailer
+
+ rl.endStream(cs)
return nil
}
@@ -881,13 +1468,35 @@ type transportResponseBody struct {
}
func (b transportResponseBody) Read(p []byte) (n int, err error) {
+ cs := b.cs
+ cc := cs.cc
+
+ if cs.readErr != nil {
+ return 0, cs.readErr
+ }
n, err = b.cs.bufPipe.Read(p)
+ if cs.bytesRemain != -1 {
+ if int64(n) > cs.bytesRemain {
+ n = int(cs.bytesRemain)
+ if err == nil {
+ err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
+ cc.writeStreamReset(cs.ID, ErrCodeProtocol, err)
+ }
+ cs.readErr = err
+ return int(cs.bytesRemain), err
+ }
+ cs.bytesRemain -= int64(n)
+ if err == io.EOF && cs.bytesRemain > 0 {
+ err = io.ErrUnexpectedEOF
+ cs.readErr = err
+ return n, err
+ }
+ }
if n == 0 {
+ // No flow control tokens to send back.
return
}
- cs := b.cs
- cc := cs.cc
cc.mu.Lock()
defer cc.mu.Unlock()
@@ -898,8 +1507,12 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) {
cc.inflow.add(connAdd)
}
if err == nil { // No need to refresh if the stream is over or failed.
- if v := cs.inflow.available(); v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {
- streamAdd = transportDefaultStreamFlow - v
+ // Consider any buffered body data (read from the conn but not
+ // consumed by the client) when computing flow control for this
+ // stream.
+ v := int(cs.inflow.available()) + cs.bufPipe.Len()
+ if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {
+ streamAdd = int32(transportDefaultStreamFlow - v)
cs.inflow.add(streamAdd)
}
}
@@ -917,49 +1530,147 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) {
return
}
+var errClosedResponseBody = errors.New("http2: response body closed")
+
func (b transportResponseBody) Close() error {
- if b.cs.bufPipe.Err() != io.EOF {
- // TODO: write test for this
- b.cs.cc.writeStreamReset(b.cs.ID, ErrCodeCancel, nil)
+ cs := b.cs
+ cc := cs.cc
+
+ serverSentStreamEnd := cs.bufPipe.Err() == io.EOF
+ unread := cs.bufPipe.Len()
+
+ if unread > 0 || !serverSentStreamEnd {
+ cc.mu.Lock()
+ cc.wmu.Lock()
+ if !serverSentStreamEnd {
+ cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
+ }
+ // Return connection-level flow control.
+ if unread > 0 {
+ cc.inflow.add(int32(unread))
+ cc.fr.WriteWindowUpdate(0, uint32(unread))
+ }
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ cc.mu.Unlock()
}
+
+ cs.bufPipe.BreakWithError(errClosedResponseBody)
return nil
}
func (rl *clientConnReadLoop) processData(f *DataFrame) error {
cc := rl.cc
cs := cc.streamByID(f.StreamID, f.StreamEnded())
+ data := f.Data()
if cs == nil {
+ cc.mu.Lock()
+ neverSent := cc.nextStreamID
+ cc.mu.Unlock()
+ if f.StreamID >= neverSent {
+ // We never asked for this.
+ cc.logf("http2: Transport received unsolicited DATA frame; closing connection")
+ return ConnectionError(ErrCodeProtocol)
+ }
+ // We probably did ask for this, but canceled. Just ignore it.
+ // TODO: be stricter here? only silently ignore things which
+ // we canceled, but not things which were closed normally
+ // by the peer? Tough without accumulating too much state.
+
+ // But at least return their flow control:
+ if f.Length > 0 {
+ cc.mu.Lock()
+ cc.inflow.add(int32(f.Length))
+ cc.mu.Unlock()
+
+ cc.wmu.Lock()
+ cc.fr.WriteWindowUpdate(0, uint32(f.Length))
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
return nil
}
- data := f.Data()
- if VerboseLogs {
- rl.cc.logf("DATA: %q", data)
- }
+ if f.Length > 0 {
+ if len(data) > 0 && cs.bufPipe.b == nil {
+ // Data frame after it's already closed?
+ cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
+ return ConnectionError(ErrCodeProtocol)
+ }
- // Check connection-level flow control.
- cc.mu.Lock()
- if cs.inflow.available() >= int32(len(data)) {
- cs.inflow.take(int32(len(data)))
- } else {
+ // Check connection-level flow control.
+ cc.mu.Lock()
+ if cs.inflow.available() >= int32(f.Length) {
+ cs.inflow.take(int32(f.Length))
+ } else {
+ cc.mu.Unlock()
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ // Return any padded flow control now, since we won't
+ // refund it later on body reads.
+ if pad := int32(f.Length) - int32(len(data)); pad > 0 {
+ cs.inflow.add(pad)
+ cc.inflow.add(pad)
+ cc.wmu.Lock()
+ cc.fr.WriteWindowUpdate(0, uint32(pad))
+ cc.fr.WriteWindowUpdate(cs.ID, uint32(pad))
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+ }
cc.mu.Unlock()
- return ConnectionError(ErrCodeFlowControl)
- }
- cc.mu.Unlock()
- if _, err := cs.bufPipe.Write(data); err != nil {
- return err
+ if len(data) > 0 {
+ if _, err := cs.bufPipe.Write(data); err != nil {
+ rl.endStreamError(cs, err)
+ return err
+ }
+ }
}
if f.StreamEnded() {
- cs.bufPipe.CloseWithError(io.EOF)
- delete(rl.activeRes, cs.ID)
+ rl.endStream(cs)
}
return nil
}
+var errInvalidTrailers = errors.New("http2: invalid trailers")
+
+func (rl *clientConnReadLoop) endStream(cs *clientStream) {
+ // TODO: check that any declared content-length matches, like
+ // server.go's (*stream).endStream method.
+ rl.endStreamError(cs, nil)
+}
+
+func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
+ var code func()
+ if err == nil {
+ err = io.EOF
+ code = cs.copyTrailers
+ }
+ cs.bufPipe.closeWithErrorAndCode(err, code)
+ delete(rl.activeRes, cs.ID)
+ if isConnectionCloseRequest(cs.req) {
+ rl.closeWhenIdle = true
+ }
+
+ select {
+ case cs.resc <- resAndError{err: err}:
+ default:
+ }
+}
+
+func (cs *clientStream) copyTrailers() {
+ for k, vv := range cs.trailer {
+ t := cs.resTrailer
+ if *t == nil {
+ *t = make(http.Header)
+ }
+ (*t)[k] = vv
+ }
+}
+
func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
cc := rl.cc
- cc.t.removeClientConn(cc)
+ cc.t.connPool().MarkDead(cc)
if f.ErrCode != 0 {
// TODO: deal with GOAWAY more. particularly the error code
cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
@@ -972,25 +1683,56 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
cc := rl.cc
cc.mu.Lock()
defer cc.mu.Unlock()
- return f.ForeachSetting(func(s Setting) error {
+
+ if f.IsAck() {
+ if cc.wantSettingsAck {
+ cc.wantSettingsAck = false
+ return nil
+ }
+ return ConnectionError(ErrCodeProtocol)
+ }
+
+ err := f.ForeachSetting(func(s Setting) error {
switch s.ID {
case SettingMaxFrameSize:
cc.maxFrameSize = s.Val
case SettingMaxConcurrentStreams:
cc.maxConcurrentStreams = s.Val
case SettingInitialWindowSize:
- // TODO: error if this is too large.
+ // Values above the maximum flow-control
+ // window size of 2^31-1 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
+ if s.Val > math.MaxInt32 {
+ return ConnectionError(ErrCodeFlowControl)
+ }
- // TODO: adjust flow control of still-open
+ // Adjust flow control of currently-open
// frames by the difference of the old initial
// window size and this one.
+ delta := int32(s.Val) - int32(cc.initialWindowSize)
+ for _, cs := range cc.streams {
+ cs.flow.add(delta)
+ }
+ cc.cond.Broadcast()
+
cc.initialWindowSize = s.Val
default:
- // TODO(bradfitz): handle more settings?
+ // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
cc.vlogf("Unhandled Setting: %v", s)
}
return nil
})
+ if err != nil {
+ return err
+ }
+
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+
+ cc.fr.WriteSettingsAck()
+ cc.bw.Flush()
+ return cc.werr
}
func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
@@ -1027,15 +1769,31 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
// which closes this, so there
// isn't a race.
default:
- err := StreamError{cs.ID, f.ErrCode}
+ err := streamError(cs.ID, f.ErrCode)
cs.resetErr = err
close(cs.peerReset)
cs.bufPipe.CloseWithError(err)
+ cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
}
delete(rl.activeRes, cs.ID)
return nil
}
+func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
+ if f.IsAck() {
+ // 6.7 PING: " An endpoint MUST NOT respond to PING frames
+ // containing this flag."
+ return nil
+ }
+ cc := rl.cc
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if err := cc.fr.WritePing(true, f.Data); err != nil {
+ return err
+ }
+ return cc.bw.Flush()
+}
+
func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
// We told the peer we don't want them.
// Spec says:
@@ -1047,53 +1805,27 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
return ConnectionError(ErrCodeProtocol)
}
-func (cc *clientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
- // TODO: do something with err? send it as a debug frame to the peer?
- // But that's only in GOAWAY. Invent a new frame type? Is there one already?
+func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
+ // TODO: map err to more interesting error codes, once the
+ // HTTP community comes up with some. But currently for
+ // RST_STREAM there's no equivalent to GOAWAY frame's debug
+ // data, and the error codes are all pretty vague ("cancel").
cc.wmu.Lock()
cc.fr.WriteRSTStream(streamID, code)
+ cc.bw.Flush()
cc.wmu.Unlock()
}
-// onNewHeaderField runs on the readLoop goroutine whenever a new
-// hpack header field is decoded.
-func (rl *clientConnReadLoop) onNewHeaderField(f hpack.HeaderField) {
- cc := rl.cc
- if VerboseLogs {
- cc.logf("Header field: %+v", f)
- }
- isPseudo := strings.HasPrefix(f.Name, ":")
- if isPseudo {
- if rl.sawRegHeader {
- rl.reqMalformed = errors.New("http2: invalid pseudo header after regular header")
- return
- }
- switch f.Name {
- case ":status":
- code, err := strconv.Atoi(f.Value)
- if err != nil {
- rl.reqMalformed = errors.New("http2: invalid :status")
- return
- }
- rl.nextRes.Status = f.Value + " " + http.StatusText(code)
- rl.nextRes.StatusCode = code
- default:
- // "Endpoints MUST NOT generate pseudo-header
- // fields other than those defined in this
- // document."
- rl.reqMalformed = fmt.Errorf("http2: unknown response pseudo header %q", f.Name)
- }
- } else {
- rl.sawRegHeader = true
- rl.nextRes.Header.Add(http.CanonicalHeaderKey(f.Name), f.Value)
- }
-}
+var (
+ errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
+ errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers")
+)
-func (cc *clientConn) logf(format string, args ...interface{}) {
+func (cc *ClientConn) logf(format string, args ...interface{}) {
cc.t.logf(format, args...)
}
-func (cc *clientConn) vlogf(format string, args ...interface{}) {
+func (cc *ClientConn) vlogf(format string, args ...interface{}) {
cc.t.vlogf(format, args...)
}
@@ -1108,3 +1840,128 @@ func (t *Transport) logf(format string, args ...interface{}) {
}
var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
+
+func strSliceContains(ss []string, s string) bool {
+ for _, v := range ss {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
+
+type erringRoundTripper struct{ err error }
+
+func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
+
+// gzipReader wraps a response body so it can lazily
+// call gzip.NewReader on the first call to Read
+type gzipReader struct {
+ body io.ReadCloser // underlying Response.Body
+ zr *gzip.Reader // lazily-initialized gzip reader
+ zerr error // sticky error
+}
+
+func (gz *gzipReader) Read(p []byte) (n int, err error) {
+ if gz.zerr != nil {
+ return 0, gz.zerr
+ }
+ if gz.zr == nil {
+ gz.zr, err = gzip.NewReader(gz.body)
+ if err != nil {
+ gz.zerr = err
+ return 0, err
+ }
+ }
+ return gz.zr.Read(p)
+}
+
+func (gz *gzipReader) Close() error {
+ return gz.body.Close()
+}
+
+type errorReader struct{ err error }
+
+func (r errorReader) Read(p []byte) (int, error) { return 0, r.err }
+
+// bodyWriterState encapsulates various state around the Transport's writing
+// of the request body, particularly regarding doing delayed writes of the body
+// when the request contains "Expect: 100-continue".
+type bodyWriterState struct {
+ cs *clientStream
+ timer *time.Timer // if non-nil, we're doing a delayed write
+ fnonce *sync.Once // to call fn with
+ fn func() // the code to run in the goroutine, writing the body
+ resc chan error // result of fn's execution
+ delay time.Duration // how long we should delay a delayed write for
+}
+
+func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) {
+ s.cs = cs
+ if body == nil {
+ return
+ }
+ resc := make(chan error, 1)
+ s.resc = resc
+ s.fn = func() {
+ resc <- cs.writeRequestBody(body, cs.req.Body)
+ }
+ s.delay = t.expectContinueTimeout()
+ if s.delay == 0 ||
+ !httplex.HeaderValuesContainsToken(
+ cs.req.Header["Expect"],
+ "100-continue") {
+ return
+ }
+ s.fnonce = new(sync.Once)
+
+ // Arm the timer with a very large duration, which we'll
+ // intentionally lower later. It has to be large now because
+ // we need a handle to it before writing the headers, but the
+ // s.delay value is defined to not start until after the
+ // request headers were written.
+ const hugeDuration = 365 * 24 * time.Hour
+ s.timer = time.AfterFunc(hugeDuration, func() {
+ s.fnonce.Do(s.fn)
+ })
+ return
+}
+
+func (s bodyWriterState) cancel() {
+ if s.timer != nil {
+ s.timer.Stop()
+ }
+}
+
+func (s bodyWriterState) on100() {
+ if s.timer == nil {
+ // If we didn't do a delayed write, ignore the server's
+ // bogus 100 continue response.
+ return
+ }
+ s.timer.Stop()
+ go func() { s.fnonce.Do(s.fn) }()
+}
+
+// scheduleBodyWrite starts writing the body, either immediately (in
+// the common case) or after the delay timeout. It should not be
+// called until after the headers have been written.
+func (s bodyWriterState) scheduleBodyWrite() {
+ if s.timer == nil {
+ // We're not doing a delayed write (see
+ // getBodyWriterState), so just start the writing
+ // goroutine immediately.
+ go s.fn()
+ return
+ }
+ traceWait100Continue(s.cs.trace)
+ if s.timer.Stop() {
+ s.timer.Reset(s.delay)
+ }
+}
+
+// isConnectionCloseRequest reports whether req should use its own
+// connection for a single request and then close the connection.
+func isConnectionCloseRequest(req *http.Request) bool {
+ return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close")
+}
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
index 9050bebb..27ef0dd4 100644
--- a/vendor/golang.org/x/net/http2/write.go
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -7,10 +7,12 @@ package http2
import (
"bytes"
"fmt"
+ "log"
"net/http"
"time"
"golang.org/x/net/http2/hpack"
+ "golang.org/x/net/lex/httplex"
)
// writeFramer is implemented by any type that is used to write frames.
@@ -23,7 +25,11 @@ type writeFramer interface {
// frame writing scheduler (see writeScheduler in writesched.go).
//
// This interface is implemented by *serverConn.
-// TODO: use it from the client code too, once it exists.
+//
+// TODO: decide whether to a) use this in the client code (which didn't
+// end up using this yet, because it has a simpler design, not
+// currently implementing priorities), or b) delete this and
+// make the server code a bit more concrete.
type writeContext interface {
Framer() *Framer
Flush() error
@@ -91,6 +97,16 @@ func (w *writeData) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
}
+// handlerPanicRST is the message sent from handler goroutines when
+// the handler panics.
+type handlerPanicRST struct {
+ StreamID uint32
+}
+
+func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
+}
+
func (se StreamError) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
}
@@ -108,40 +124,48 @@ func (writeSettingsAck) writeFrame(ctx writeContext) error {
}
// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
-// for HTTP response headers from a server handler.
+// for HTTP response headers or trailers from a server handler.
type writeResHeaders struct {
streamID uint32
- httpResCode int
+ httpResCode int // 0 means no ":status" line
h http.Header // may be nil
+ trailers []string // if non-nil, which keys of h to write. nil means all.
endStream bool
+ date string
contentType string
contentLength string
}
+func encKV(enc *hpack.Encoder, k, v string) {
+ if VerboseLogs {
+ log.Printf("http2: server encoding header %q = %q", k, v)
+ }
+ enc.WriteField(hpack.HeaderField{Name: k, Value: v})
+}
+
func (w *writeResHeaders) writeFrame(ctx writeContext) error {
enc, buf := ctx.HeaderEncoder()
buf.Reset()
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: httpCodeString(w.httpResCode)})
- for k, vv := range w.h {
- k = lowerHeader(k)
- for _, v := range vv {
- // TODO: more of "8.1.2.2 Connection-Specific Header Fields"
- if k == "transfer-encoding" && v != "trailers" {
- continue
- }
- enc.WriteField(hpack.HeaderField{Name: k, Value: v})
- }
+
+ if w.httpResCode != 0 {
+ encKV(enc, ":status", httpCodeString(w.httpResCode))
}
+
+ encodeHeaders(enc, w.h, w.trailers)
+
if w.contentType != "" {
- enc.WriteField(hpack.HeaderField{Name: "content-type", Value: w.contentType})
+ encKV(enc, "content-type", w.contentType)
}
if w.contentLength != "" {
- enc.WriteField(hpack.HeaderField{Name: "content-length", Value: w.contentLength})
+ encKV(enc, "content-length", w.contentLength)
+ }
+ if w.date != "" {
+ encKV(enc, "date", w.date)
}
headerBlock := buf.Bytes()
- if len(headerBlock) == 0 {
+ if len(headerBlock) == 0 && w.trailers == nil {
panic("unexpected empty hpack")
}
@@ -187,7 +211,7 @@ type write100ContinueHeadersFrame struct {
func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
enc, buf := ctx.HeaderEncoder()
buf.Reset()
- enc.WriteField(hpack.HeaderField{Name: ":status", Value: "100"})
+ encKV(enc, ":status", "100")
return ctx.Framer().WriteHeaders(HeadersFrameParam{
StreamID: w.streamID,
BlockFragment: buf.Bytes(),
@@ -204,3 +228,37 @@ type writeWindowUpdate struct {
func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
}
+
+func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
+ if keys == nil {
+ sorter := sorterPool.Get().(*sorter)
+ // Using defer here, since the returned keys from the
+ // sorter.Keys method is only valid until the sorter
+ // is returned:
+ defer sorterPool.Put(sorter)
+ keys = sorter.Keys(h)
+ }
+ for _, k := range keys {
+ vv := h[k]
+ k = lowerHeader(k)
+ if !validWireHeaderFieldName(k) {
+ // Skip it as backup paranoia. Per
+ // golang.org/issue/14048, these should
+ // already be rejected at a higher level.
+ continue
+ }
+ isTE := k == "transfer-encoding"
+ for _, v := range vv {
+ if !httplex.ValidHeaderFieldValue(v) {
+ // TODO: return an error? golang.org/issue/14048
+ // For now just omit it.
+ continue
+ }
+ // TODO: more of "8.1.2.2 Connection-Specific Header Fields"
+ if isTE && v != "trailers" {
+ continue
+ }
+ encKV(enc, k, v)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go
new file mode 100644
index 00000000..bd0ec24f
--- /dev/null
+++ b/vendor/golang.org/x/net/lex/httplex/httplex.go
@@ -0,0 +1,312 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httplex contains rules around lexical matters of various
+// HTTP-related specifications.
+//
+// This package is shared by the standard library (which vendors it)
+// and x/net/http2. It comes with no API stability promise.
+package httplex
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+var isTokenTable = [127]bool{
+ '!': true,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'W': true,
+ 'V': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '|': true,
+ '~': true,
+}
+
+func IsTokenRune(r rune) bool {
+ i := int(r)
+ return i < len(isTokenTable) && isTokenTable[i]
+}
+
+func isNotToken(r rune) bool {
+ return !IsTokenRune(r)
+}
+
+// HeaderValuesContainsToken reports whether any string in values
+// contains the provided token, ASCII case-insensitively.
+func HeaderValuesContainsToken(values []string, token string) bool {
+ for _, v := range values {
+ if headerValueContainsToken(v, token) {
+ return true
+ }
+ }
+ return false
+}
+
+// isOWS reports whether b is an optional whitespace byte, as defined
+// by RFC 7230 section 3.2.3.
+func isOWS(b byte) bool { return b == ' ' || b == '\t' }
+
+// trimOWS returns x with all optional whitespace removes from the
+// beginning and end.
+func trimOWS(x string) string {
+ // TODO: consider using strings.Trim(x, " \t") instead,
+ // if and when it's fast enough. See issue 10292.
+ // But this ASCII-only code will probably always beat UTF-8
+ // aware code.
+ for len(x) > 0 && isOWS(x[0]) {
+ x = x[1:]
+ }
+ for len(x) > 0 && isOWS(x[len(x)-1]) {
+ x = x[:len(x)-1]
+ }
+ return x
+}
+
+// headerValueContainsToken reports whether v (assumed to be a
+// 0#element, in the ABNF extension described in RFC 7230 section 7)
+// contains token amongst its comma-separated tokens, ASCII
+// case-insensitively.
+func headerValueContainsToken(v string, token string) bool {
+ v = trimOWS(v)
+ if comma := strings.IndexByte(v, ','); comma != -1 {
+ return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)
+ }
+ return tokenEqual(v, token)
+}
+
+// lowerASCII returns the ASCII lowercase version of b.
+func lowerASCII(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+}
+
+// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
+func tokenEqual(t1, t2 string) bool {
+ if len(t1) != len(t2) {
+ return false
+ }
+ for i, b := range t1 {
+ if b >= utf8.RuneSelf {
+ // No UTF-8 or non-ASCII allowed in tokens.
+ return false
+ }
+ if lowerASCII(byte(b)) != lowerASCII(t2[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// isLWS reports whether b is linear white space, according
+// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
+// LWS = [CRLF] 1*( SP | HT )
+func isLWS(b byte) bool { return b == ' ' || b == '\t' }
+
+// isCTL reports whether b is a control byte, according
+// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
+// CTL = <any US-ASCII control character
+// (octets 0 - 31) and DEL (127)>
+func isCTL(b byte) bool {
+ const del = 0x7f // a CTL
+ return b < ' ' || b == del
+}
+
+// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name.
+// HTTP/2 imposes the additional restriction that uppercase ASCII
+// letters are not allowed.
+//
+// RFC 7230 says:
+// header-field = field-name ":" OWS field-value OWS
+// field-name = token
+// token = 1*tchar
+// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
+// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
+func ValidHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+ }
+ for _, r := range v {
+ if !IsTokenRune(r) {
+ return false
+ }
+ }
+ return true
+}
+
+// ValidHostHeader reports whether h is a valid host header.
+func ValidHostHeader(h string) bool {
+ // The latest spec is actually this:
+ //
+ // http://tools.ietf.org/html/rfc7230#section-5.4
+ // Host = uri-host [ ":" port ]
+ //
+ // Where uri-host is:
+ // http://tools.ietf.org/html/rfc3986#section-3.2.2
+ //
+ // But we're going to be much more lenient for now and just
+ // search for any byte that's not a valid byte in any of those
+ // expressions.
+ for i := 0; i < len(h); i++ {
+ if !validHostByte[h[i]] {
+ return false
+ }
+ }
+ return true
+}
+
+// See the validHostHeader comment.
+var validHostByte = [256]bool{
+ '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true,
+ '8': true, '9': true,
+
+ 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true,
+ 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true,
+ 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true,
+ 'y': true, 'z': true,
+
+ 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true,
+ 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true,
+ 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true,
+ 'Y': true, 'Z': true,
+
+ '!': true, // sub-delims
+ '$': true, // sub-delims
+ '%': true, // pct-encoded (and used in IPv6 zones)
+ '&': true, // sub-delims
+ '(': true, // sub-delims
+ ')': true, // sub-delims
+ '*': true, // sub-delims
+ '+': true, // sub-delims
+ ',': true, // sub-delims
+ '-': true, // unreserved
+ '.': true, // unreserved
+ ':': true, // IPv6address + Host expression's optional port
+ ';': true, // sub-delims
+ '=': true, // sub-delims
+ '[': true,
+ '\'': true, // sub-delims
+ ']': true,
+ '_': true, // unreserved
+ '~': true, // unreserved
+}
+
+// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
+// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
+//
+// message-header = field-name ":" [ field-value ]
+// field-value = *( field-content | LWS )
+// field-content = <the OCTETs making up the field-value
+// and consisting of either *TEXT or combinations
+// of token, separators, and quoted-string>
+//
+// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
+//
+// TEXT = <any OCTET except CTLs,
+// but including LWS>
+// LWS = [CRLF] 1*( SP | HT )
+// CTL = <any US-ASCII control character
+// (octets 0 - 31) and DEL (127)>
+//
+// RFC 7230 says:
+// field-value = *( field-content / obs-fold )
+// obj-fold = N/A to http2, and deprecated
+// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+// field-vchar = VCHAR / obs-text
+// obs-text = %x80-FF
+// VCHAR = "any visible [USASCII] character"
+//
+// http2 further says: "Similarly, HTTP/2 allows header field values
+// that are not valid. While most of the values that can be encoded
+// will not alter header field parsing, carriage return (CR, ASCII
+// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
+// 0x0) might be exploited by an attacker if they are translated
+// verbatim. Any request or response that contains a character not
+// permitted in a header field value MUST be treated as malformed
+// (Section 8.1.2.6). Valid characters are defined by the
+// field-content ABNF rule in Section 3.2 of [RFC7230]."
+//
+// This function does not (yet?) properly handle the rejection of
+// strings that begin or end with SP or HTAB.
+func ValidHeaderFieldValue(v string) bool {
+ for i := 0; i < len(v); i++ {
+ b := v[i]
+ if isCTL(b) && !isLWS(b) {
+ return false
+ }
+ }
+ return true
+}