Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-pages.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Thomas <nick@gitlab.com>2018-10-26 18:45:52 +0300
committerNick Thomas <nick@gitlab.com>2018-10-26 18:45:52 +0300
commitdedadb37ddd3e3dd5a1f3ac9ea5496c602556443 (patch)
tree862fe171f9fa30405d06e8fd5f7856c4b3f7ddbc
parent49cc251dafd31762dd9eca096a9eba963c469a26 (diff)
parent2c77587f78abab2c442df95b1b6cc49e85aefb07 (diff)
Merge branch 'bjk/prom_0.9' into 'master'
Update Prometheus vendoring See merge request gitlab-org/gitlab-pages!116
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/collector.go36
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/desc.go8
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/fnv.go13
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector.go13
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/histogram.go155
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/http.go29
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go85
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/labels.go13
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/metric.go56
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector.go105
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/registry.go120
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/summary.go17
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/value.go12
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/wrap.go179
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_create.go379
-rw-r--r--vendor/vendor.json36
16 files changed, 965 insertions, 291 deletions
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
index 3c9bae24..c0d70b2f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -40,7 +40,8 @@ type Collector interface {
// Collector may yield any Metric it sees fit in its Collect method.
//
// This method idempotently sends the same descriptors throughout the
- // lifetime of the Collector.
+ // lifetime of the Collector. It may be called concurrently and
+ // therefore must be implemented in a concurrency safe way.
//
// If a Collector encounters an error while executing this method, it
// must send an invalid descriptor (created with NewInvalidDesc) to
@@ -61,6 +62,39 @@ type Collector interface {
Collect(chan<- Metric)
}
+// DescribeByCollect is a helper to implement the Describe method of a custom
+// Collector. It collects the metrics from the provided Collector and sends
+// their descriptors to the provided channel.
+//
+// If a Collector collects the same metrics throughout its lifetime, its
+// Describe method can simply be implemented as:
+//
+// func (c customCollector) Describe(ch chan<- *Desc) {
+// DescribeByCollect(c, ch)
+// }
+//
+// However, this will not work if the metrics collected change dynamically over
+// the lifetime of the Collector in a way that their combined set of descriptors
+// changes as well. The shortcut implementation will then violate the contract
+// of the Describe method. If a Collector sometimes collects no metrics at all
+// (for example vectors like CounterVec, GaugeVec, etc., which only collect
+// metrics after a metric with a fully specified label set has been accessed),
+// it might even get registered as an unchecked Collecter (cf. the Register
+// method of the Registerer interface). Hence, only use this shortcut
+// implementation of Describe if you are certain to fulfill the contract.
+//
+// The Collector example demonstrates a use of DescribeByCollect.
+func DescribeByCollect(c Collector, descs chan<- *Desc) {
+ metrics := make(chan Metric)
+ go func() {
+ c.Collect(metrics)
+ close(metrics)
+ }()
+ for m := range metrics {
+ descs <- m.Desc()
+ }
+}
+
// selfCollector implements Collector for a single Metric so that the Metric
// collects itself. Add it as an anonymous field to a struct that implements
// Metric, and call init with the Metric itself as an argument.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 4a755b0f..7b8827ff 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -67,7 +67,7 @@ type Desc struct {
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
// and will be reported on registration time. variableLabels and constLabels can
-// be nil if no such labels should be set. fqName and help must not be empty.
+// be nil if no such labels should be set. fqName must not be empty.
//
// variableLabels only contain the label names. Their label values are variable
// and therefore not part of the Desc. (They are managed within the Metric.)
@@ -80,10 +80,6 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
help: help,
variableLabels: variableLabels,
}
- if help == "" {
- d.err = errors.New("empty help string")
- return d
- }
if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d
@@ -156,7 +152,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
Value: proto.String(v),
})
}
- sort.Sort(LabelPairSorter(d.constLabelPairs))
+ sort.Sort(labelPairSorter(d.constLabelPairs))
return d
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
index e3b67df8..3d383a73 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package prometheus
// Inline and byte-free variant of hash/fnv's fnv64a.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index 0440bd12..ba3b9333 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package prometheus
import (
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 331783a7..4d7fa976 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -16,7 +16,9 @@ package prometheus
import (
"fmt"
"math"
+ "runtime"
"sort"
+ "sync"
"sync/atomic"
"github.com/golang/protobuf/proto"
@@ -108,8 +110,9 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
}
// HistogramOpts bundles the options for creating a Histogram metric. It is
-// mandatory to set Name and Help to a non-empty string. All other fields are
-// optional and can safely be left at their zero value.
+// mandatory to set Name to a non-empty string. All other fields are optional
+// and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
type HistogramOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Histogram (created by joining these components with
@@ -120,7 +123,7 @@ type HistogramOpts struct {
Subsystem string
Name string
- // Help provides information about this Histogram. Mandatory!
+ // Help provides information about this Histogram.
//
// Metrics with the same fully-qualified name must have the same Help
// string.
@@ -184,6 +187,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
desc: desc,
upperBounds: opts.Buckets,
labelPairs: makeLabelPairs(desc, labelValues),
+ counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
@@ -200,28 +204,53 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
}
}
- // Finally we know the final length of h.upperBounds and can make counts.
- h.counts = make([]uint64, len(h.upperBounds))
+ // Finally we know the final length of h.upperBounds and can make counts
+ // for both states:
+ h.counts[0].buckets = make([]uint64, len(h.upperBounds))
+ h.counts[1].buckets = make([]uint64, len(h.upperBounds))
h.init(h) // Init self-collection.
return h
}
-type histogram struct {
+type histogramCounts struct {
// sumBits contains the bits of the float64 representing the sum of all
// observations. sumBits and count have to go first in the struct to
// guarantee alignment for atomic operations.
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
sumBits uint64
count uint64
+ buckets []uint64
+}
- selfCollector
- // Note that there is no mutex required.
+type histogram struct {
+ // countAndHotIdx is a complicated one. For lock-free yet atomic
+ // observations, we need to save the total count of observations again,
+ // combined with the index of the currently-hot counts struct, so that
+ // we can perform the operation on both values atomically. The least
+ // significant bit defines the hot counts struct. The remaining 63 bits
+ // represent the total count of observations. This happens under the
+ // assumption that the 63bit count will never overflow. Rationale: An
+ // observations takes about 30ns. Let's assume it could happen in
+ // 10ns. Overflowing the counter will then take at least (2^63)*10ns,
+ // which is about 3000 years.
+ //
+ // This has to be first in the struct for 64bit alignment. See
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ countAndHotIdx uint64
- desc *Desc
+ selfCollector
+ desc *Desc
+ writeMtx sync.Mutex // Only used in the Write method.
upperBounds []float64
- counts []uint64
+
+ // Two counts, one is "hot" for lock-free observations, the other is
+ // "cold" for writing out a dto.Metric. It has to be an array of
+ // pointers to guarantee 64bit alignment of the histogramCounts, see
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+ counts [2]*histogramCounts
+ hotIdx int // Index of currently-hot counts. Only used within Write.
labelPairs []*dto.LabelPair
}
@@ -241,36 +270,113 @@ func (h *histogram) Observe(v float64) {
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
i := sort.SearchFloat64s(h.upperBounds, v)
- if i < len(h.counts) {
- atomic.AddUint64(&h.counts[i], 1)
+
+ // We increment h.countAndHotIdx by 2 so that the counter in the upper
+ // 63 bits gets incremented by 1. At the same time, we get the new value
+ // back, which we can use to find the currently-hot counts.
+ n := atomic.AddUint64(&h.countAndHotIdx, 2)
+ hotCounts := h.counts[n%2]
+
+ if i < len(h.upperBounds) {
+ atomic.AddUint64(&hotCounts.buckets[i], 1)
}
- atomic.AddUint64(&h.count, 1)
for {
- oldBits := atomic.LoadUint64(&h.sumBits)
+ oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
- if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
+ if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
break
}
}
+ // Increment count last as we take it as a signal that the observation
+ // is complete.
+ atomic.AddUint64(&hotCounts.count, 1)
}
func (h *histogram) Write(out *dto.Metric) error {
- his := &dto.Histogram{}
- buckets := make([]*dto.Bucket, len(h.upperBounds))
+ var (
+ his = &dto.Histogram{}
+ buckets = make([]*dto.Bucket, len(h.upperBounds))
+ hotCounts, coldCounts *histogramCounts
+ count uint64
+ )
- his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
- his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
- var count uint64
+ // For simplicity, we mutex the rest of this method. It is not in the
+ // hot path, i.e. Observe is called much more often than Write. The
+ // complication of making Write lock-free isn't worth it.
+ h.writeMtx.Lock()
+ defer h.writeMtx.Unlock()
+
+ // This is a bit arcane, which is why the following spells out this if
+ // clause in English:
+ //
+ // If the currently-hot counts struct is #0, we atomically increment
+ // h.countAndHotIdx by 1 so that from now on Observe will use the counts
+ // struct #1. Furthermore, the atomic increment gives us the new value,
+ // which, in its most significant 63 bits, tells us the count of
+ // observations done so far up to and including currently ongoing
+ // observations still using the counts struct just changed from hot to
+ // cold. To have a normal uint64 for the count, we bitshift by 1 and
+ // save the result in count. We also set h.hotIdx to 1 for the next
+ // Write call, and we will refer to counts #1 as hotCounts and to counts
+ // #0 as coldCounts.
+ //
+ // If the currently-hot counts struct is #1, we do the corresponding
+ // things the other way round. We have to _decrement_ h.countAndHotIdx
+ // (which is a bit arcane in itself, as we have to express -1 with an
+ // unsigned int...).
+ if h.hotIdx == 0 {
+ count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
+ h.hotIdx = 1
+ hotCounts = h.counts[1]
+ coldCounts = h.counts[0]
+ } else {
+ count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
+ h.hotIdx = 0
+ hotCounts = h.counts[0]
+ coldCounts = h.counts[1]
+ }
+
+ // Now we have to wait for the now-declared-cold counts to actually cool
+ // down, i.e. wait for all observations still using it to finish. That's
+ // the case once the count in the cold counts struct is the same as the
+ // one atomically retrieved from the upper 63bits of h.countAndHotIdx.
+ for {
+ if count == atomic.LoadUint64(&coldCounts.count) {
+ break
+ }
+ runtime.Gosched() // Let observations get work done.
+ }
+
+ his.SampleCount = proto.Uint64(count)
+ his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits)))
+ var cumCount uint64
for i, upperBound := range h.upperBounds {
- count += atomic.LoadUint64(&h.counts[i])
+ cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
buckets[i] = &dto.Bucket{
- CumulativeCount: proto.Uint64(count),
+ CumulativeCount: proto.Uint64(cumCount),
UpperBound: proto.Float64(upperBound),
}
}
+
his.Bucket = buckets
out.Histogram = his
out.Label = h.labelPairs
+
+ // Finally add all the cold counts to the new hot counts and reset the cold counts.
+ atomic.AddUint64(&hotCounts.count, count)
+ atomic.StoreUint64(&coldCounts.count, 0)
+ for {
+ oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
+ if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+ atomic.StoreUint64(&coldCounts.sumBits, 0)
+ break
+ }
+ }
+ for i := range h.upperBounds {
+ atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
+ atomic.StoreUint64(&coldCounts.buckets[i], 0)
+ }
return nil
}
@@ -454,7 +560,7 @@ func (h *constHistogram) Write(out *dto.Metric) error {
// bucket.
//
// NewConstHistogram returns an error if the length of labelValues is not
-// consistent with the variable labels in Desc.
+// consistent with the variable labels in Desc or if Desc is invalid.
func NewConstHistogram(
desc *Desc,
count uint64,
@@ -462,6 +568,9 @@ func NewConstHistogram(
buckets map[float64]uint64,
labelValues ...string,
) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go
index 3f1fa151..4b8e6027 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go
@@ -61,15 +61,15 @@ func giveBuf(buf *bytes.Buffer) {
// name).
//
// Deprecated: Please note the issues described in the doc comment of
-// InstrumentHandler. You might want to consider using
-// promhttp.InstrumentedHandler instead.
+// InstrumentHandler. You might want to consider using promhttp.Handler instead.
func Handler() http.Handler {
return InstrumentHandler("prometheus", UninstrumentedHandler())
}
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
//
-// Deprecated: Use promhttp.Handler instead. See there for further documentation.
+// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{})
+// instead. See there for further documentation.
func UninstrumentedHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
mfs, err := DefaultGatherer.Gather()
@@ -149,21 +149,14 @@ var now nower = nowFunc(func() time.Time {
// (label name "method") and HTTP status code (label name "code").
//
// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
-// package promhttp instead. The issues are the following:
-//
-// - It uses Summaries rather than Histograms. Summaries are not useful if
-// aggregation across multiple instances is required.
-//
-// - It uses microseconds as unit, which is deprecated and should be replaced by
-// seconds.
-//
-// - The size of the request is calculated in a separate goroutine. Since this
-// calculator requires access to the request header, it creates a race with
-// any writes to the header performed during request handling.
-// httputil.ReverseProxy is a prominent example for a handler
-// performing such writes.
-//
-// - It has additional issues with HTTP/2, cf.
+// package promhttp instead. The issues are the following: (1) It uses Summaries
+// rather than Histograms. Summaries are not useful if aggregation across
+// multiple instances is required. (2) It uses microseconds as unit, which is
+// deprecated and should be replaced by seconds. (3) The size of the request is
+// calculated in a separate goroutine. Since this calculator requires access to
+// the request header, it creates a race with any writes to the header performed
+// during request handling. httputil.ReverseProxy is a prominent example for a
+// handler performing such writes. (4) It has additional issues with HTTP/2, cf.
// https://github.com/prometheus/client_golang/issues/272.
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
new file mode 100644
index 00000000..351c26e1
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
@@ -0,0 +1,85 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "sort"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// metricSorter is a sortable slice of *dto.Metric.
+type metricSorter []*dto.Metric
+
+func (s metricSorter) Len() int {
+ return len(s)
+}
+
+func (s metricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s metricSorter) Less(i, j int) bool {
+ if len(s[i].Label) != len(s[j].Label) {
+ // This should not happen. The metrics are
+ // inconsistent. However, we have to deal with the fact, as
+ // people might use custom collectors or metric family injection
+ // to create inconsistent metrics. So let's simply compare the
+ // number of labels in this case. That will still yield
+ // reproducible sorting.
+ return len(s[i].Label) < len(s[j].Label)
+ }
+ for n, lp := range s[i].Label {
+ vi := lp.GetValue()
+ vj := s[j].Label[n].GetValue()
+ if vi != vj {
+ return vi < vj
+ }
+ }
+
+ // We should never arrive here. Multiple metrics with the same
+ // label set in the same scrape will lead to undefined ingestion
+ // behavior. However, as above, we have to provide stable sorting
+ // here, even for inconsistent metrics. So sort equal metrics
+ // by their timestamp, with missing timestamps (implying "now")
+ // coming last.
+ if s[i].TimestampMs == nil {
+ return false
+ }
+ if s[j].TimestampMs == nil {
+ return true
+ }
+ return s[i].GetTimestampMs() < s[j].GetTimestampMs()
+}
+
+// NormalizeMetricFamilies returns a MetricFamily slice with empty
+// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
+// the slice, with the contained Metrics sorted within each MetricFamily.
+func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
+ for _, mf := range metricFamiliesByName {
+ sort.Sort(metricSorter(mf.Metric))
+ }
+ names := make([]string, 0, len(metricFamiliesByName))
+ for name, mf := range metricFamiliesByName {
+ if len(mf.Metric) > 0 {
+ names = append(names, name)
+ }
+ }
+ sort.Strings(names)
+ result := make([]*dto.MetricFamily, 0, len(names))
+ for _, name := range names {
+ result = append(result, metricFamiliesByName[name])
+ }
+ return result
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
index 2502e373..e68f132e 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package prometheus
import (
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index 76035bca..55e6d86d 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -15,6 +15,9 @@ package prometheus
import (
"strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
)
@@ -43,9 +46,8 @@ type Metric interface {
// While populating dto.Metric, it is the responsibility of the
// implementation to ensure validity of the Metric protobuf (like valid
// UTF-8 strings or syntactically valid metric and label names). It is
- // recommended to sort labels lexicographically. (Implementers may find
- // LabelPairSorter useful for that.) Callers of Write should still make
- // sure of sorting if they depend on it.
+ // recommended to sort labels lexicographically. Callers of Write should
+ // still make sure of sorting if they depend on it.
Write(*dto.Metric) error
// TODO(beorn7): The original rationale of passing in a pre-allocated
// dto.Metric protobuf to save allocations has disappeared. The
@@ -57,8 +59,9 @@ type Metric interface {
// implementation XXX has its own XXXOpts type, but in most cases, it is just be
// an alias of this type (which might change when the requirement arises.)
//
-// It is mandatory to set Name and Help to a non-empty string. All other fields
-// are optional and can safely be left at their zero value.
+// It is mandatory to set Name to a non-empty string. All other fields are
+// optional and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
type Opts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Metric (created by joining these components with
@@ -69,7 +72,7 @@ type Opts struct {
Subsystem string
Name string
- // Help provides information about this metric. Mandatory!
+ // Help provides information about this metric.
//
// Metrics with the same fully-qualified name must have the same Help
// string.
@@ -110,20 +113,19 @@ func BuildFQName(namespace, subsystem, name string) string {
return name
}
-// LabelPairSorter implements sort.Interface. It is used to sort a slice of
-// dto.LabelPair pointers. This is useful for implementing the Write method of
-// custom metrics.
-type LabelPairSorter []*dto.LabelPair
+// labelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers.
+type labelPairSorter []*dto.LabelPair
-func (s LabelPairSorter) Len() int {
+func (s labelPairSorter) Len() int {
return len(s)
}
-func (s LabelPairSorter) Swap(i, j int) {
+func (s labelPairSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
-func (s LabelPairSorter) Less(i, j int) bool {
+func (s labelPairSorter) Less(i, j int) bool {
return s[i].GetName() < s[j].GetName()
}
@@ -142,3 +144,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric {
func (m *invalidMetric) Desc() *Desc { return m.desc }
func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
+
+type timestampedMetric struct {
+ Metric
+ t time.Time
+}
+
+func (m timestampedMetric) Write(pb *dto.Metric) error {
+ e := m.Metric.Write(pb)
+ pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
+ return e
+}
+
+// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
+// way that it has an explicit timestamp set to the provided Time. This is only
+// useful in rare cases as the timestamp of a Prometheus metric should usually
+// be set by the Prometheus server during scraping. Exceptions include mirroring
+// metrics with given timestamps from other metric
+// sources.
+//
+// NewMetricWithTimestamp works best with MustNewConstMetric,
+// MustNewConstHistogram, and MustNewConstSummary, see example.
+//
+// Currently, the exposition formats used by Prometheus are limited to
+// millisecond resolution. Thus, the provided time will be rounded down to the
+// next full millisecond value.
+func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
+ return timestampedMetric{Metric: m, t: t}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
index 5ab2b1c9..55176d58 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -13,11 +13,17 @@
package prometheus
-import "github.com/prometheus/procfs"
+import (
+ "errors"
+ "os"
+
+ "github.com/prometheus/procfs"
+)
type processCollector struct {
collectFn func(chan<- Metric)
pidFn func() (int, error)
+ reportErrors bool
cpuTotal *Desc
openFDs, maxFDs *Desc
vsize, maxVsize *Desc
@@ -25,34 +31,56 @@ type processCollector struct {
startTime *Desc
}
+// ProcessCollectorOpts defines the behavior of a process metrics collector
+// created with NewProcessCollector.
+type ProcessCollectorOpts struct {
+ // PidFn returns the PID of the process the collector collects metrics
+ // for. It is called upon each collection. By default, the PID of the
+ // current process is used, as determined on construction time by
+ // calling os.Getpid().
+ PidFn func() (int, error)
+ // If non-empty, each of the collected metrics is prefixed by the
+ // provided string and an underscore ("_").
+ Namespace string
+ // If true, any error encountered during collection is reported as an
+ // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
+ // and the collected metrics will be incomplete. (Possibly, no metrics
+ // will be collected at all.) While that's usually not desired, it is
+ // appropriate for the common "mix-in" of process metrics, where process
+ // metrics are nice to have, but failing to collect them should not
+ // disrupt the collection of the remaining metrics.
+ ReportErrors bool
+}
+
// NewProcessCollector returns a collector which exports the current state of
// process metrics including CPU, memory and file descriptor usage as well as
-// the process start time for the given process ID under the given namespace.
+// the process start time. The detailed behavior is defined by the provided
+// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
+// collector for the current process with an empty namespace string and no error
+// reporting.
//
// Currently, the collector depends on a Linux-style proc filesystem and
// therefore only exports metrics for Linux.
-func NewProcessCollector(pid int, namespace string) Collector {
- return NewProcessCollectorPIDFn(
- func() (int, error) { return pid, nil },
- namespace,
- )
-}
-
-// NewProcessCollectorPIDFn works like NewProcessCollector but the process ID is
-// determined on each collect anew by calling the given pidFn function.
-func NewProcessCollectorPIDFn(
- pidFn func() (int, error),
- namespace string,
-) Collector {
+//
+// Note: An older version of this function had the following signature:
+//
+// NewProcessCollector(pid int, namespace string) Collector
+//
+// Most commonly, it was called as
+//
+// NewProcessCollector(os.Getpid(), "")
+//
+// The following call of the current version is equivalent to the above:
+//
+// NewProcessCollector(ProcessCollectorOpts{})
+func NewProcessCollector(opts ProcessCollectorOpts) Collector {
ns := ""
- if len(namespace) > 0 {
- ns = namespace + "_"
+ if len(opts.Namespace) > 0 {
+ ns = opts.Namespace + "_"
}
- c := processCollector{
- pidFn: pidFn,
- collectFn: func(chan<- Metric) {},
-
+ c := &processCollector{
+ reportErrors: opts.ReportErrors,
cpuTotal: NewDesc(
ns+"process_cpu_seconds_total",
"Total user and system CPU time spent in seconds.",
@@ -90,12 +118,23 @@ func NewProcessCollectorPIDFn(
),
}
+ if opts.PidFn == nil {
+ pid := os.Getpid()
+ c.pidFn = func() (int, error) { return pid, nil }
+ } else {
+ c.pidFn = opts.PidFn
+ }
+
// Set up process metric collection if supported by the runtime.
if _, err := procfs.NewStat(); err == nil {
c.collectFn = c.processCollect
+ } else {
+ c.collectFn = func(ch chan<- Metric) {
+ c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
+ }
}
- return &c
+ return c
}
// Describe returns all descriptions of the collector.
@@ -114,16 +153,16 @@ func (c *processCollector) Collect(ch chan<- Metric) {
c.collectFn(ch)
}
-// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
-// client allows users to configure the error behavior.
func (c *processCollector) processCollect(ch chan<- Metric) {
pid, err := c.pidFn()
if err != nil {
+ c.reportError(ch, nil, err)
return
}
p, err := procfs.NewProc(pid)
if err != nil {
+ c.reportError(ch, nil, err)
return
}
@@ -133,15 +172,33 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
if startTime, err := stat.StartTime(); err == nil {
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+ } else {
+ c.reportError(ch, c.startTime, err)
}
+ } else {
+ c.reportError(ch, nil, err)
}
if fds, err := p.FileDescriptorsLen(); err == nil {
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
+ } else {
+ c.reportError(ch, c.openFDs, err)
}
if limits, err := p.NewLimits(); err == nil {
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
+ } else {
+ c.reportError(ch, nil, err)
+ }
+}
+
+func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
+ if !c.reportErrors {
+ return
+ }
+ if desc == nil {
+ desc = NewInvalidDesc(err)
}
+ ch <- NewInvalidMetric(desc, err)
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 896838f1..e422ef38 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -16,7 +16,6 @@ package prometheus
import (
"bytes"
"fmt"
- "os"
"runtime"
"sort"
"strings"
@@ -26,6 +25,8 @@ import (
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus/internal"
)
const (
@@ -52,7 +53,7 @@ var (
)
func init() {
- MustRegister(NewProcessCollector(os.Getpid(), ""))
+ MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
MustRegister(NewGoCollector())
}
@@ -106,9 +107,6 @@ type Registerer interface {
// Collector, and for providing a Collector that will not cause
// inconsistent metrics on collection. (This would lead to scrape
// errors.)
- //
- // It is in general not safe to register the same Collector multiple
- // times concurrently.
Register(Collector) error
// MustRegister works like Register but registers any number of
// Collectors and panics upon the first registration that causes an
@@ -272,7 +270,12 @@ func (r *Registry) Register(c Collector) error {
close(descChan)
}()
r.mtx.Lock()
- defer r.mtx.Unlock()
+ defer func() {
+ // Drain channel in case of premature return to not leak a goroutine.
+ for range descChan {
+ }
+ r.mtx.Unlock()
+ }()
// Conduct various tests...
for desc := range descChan {
@@ -527,7 +530,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
break
}
}
- return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+ return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
}
// processMetric is an internal helper method only used by the Gather method.
@@ -538,6 +541,11 @@ func processMetric(
registeredDescIDs map[uint64]struct{},
) error {
desc := metric.Desc()
+ // Wrapped metrics collected by an unchecked Collector can have an
+ // invalid Desc.
+ if desc.err != nil {
+ return desc.err
+ }
dtoMetric := &dto.Metric{}
if err := metric.Write(dtoMetric); err != nil {
return fmt.Errorf("error collecting metric %v: %s", desc, err)
@@ -707,72 +715,7 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
}
}
}
- return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
-}
-
-// metricSorter is a sortable slice of *dto.Metric.
-type metricSorter []*dto.Metric
-
-func (s metricSorter) Len() int {
- return len(s)
-}
-
-func (s metricSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s metricSorter) Less(i, j int) bool {
- if len(s[i].Label) != len(s[j].Label) {
- // This should not happen. The metrics are
- // inconsistent. However, we have to deal with the fact, as
- // people might use custom collectors or metric family injection
- // to create inconsistent metrics. So let's simply compare the
- // number of labels in this case. That will still yield
- // reproducible sorting.
- return len(s[i].Label) < len(s[j].Label)
- }
- for n, lp := range s[i].Label {
- vi := lp.GetValue()
- vj := s[j].Label[n].GetValue()
- if vi != vj {
- return vi < vj
- }
- }
-
- // We should never arrive here. Multiple metrics with the same
- // label set in the same scrape will lead to undefined ingestion
- // behavior. However, as above, we have to provide stable sorting
- // here, even for inconsistent metrics. So sort equal metrics
- // by their timestamp, with missing timestamps (implying "now")
- // coming last.
- if s[i].TimestampMs == nil {
- return false
- }
- if s[j].TimestampMs == nil {
- return true
- }
- return s[i].GetTimestampMs() < s[j].GetTimestampMs()
-}
-
-// normalizeMetricFamilies returns a MetricFamily slice with empty
-// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
-// the slice, with the contained Metrics sorted within each MetricFamily.
-func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
- for _, mf := range metricFamiliesByName {
- sort.Sort(metricSorter(mf.Metric))
- }
- names := make([]string, 0, len(metricFamiliesByName))
- for name, mf := range metricFamiliesByName {
- if len(mf.Metric) > 0 {
- names = append(names, name)
- }
- }
- sort.Strings(names)
- result := make([]*dto.MetricFamily, 0, len(names))
- for _, name := range names {
- result = append(result, metricFamiliesByName[name])
- }
- return result
+ return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
}
// checkSuffixCollisions checks for collisions with the “magic” suffixes the
@@ -844,6 +787,8 @@ func checkMetricConsistency(
dtoMetric *dto.Metric,
metricHashes map[uint64]struct{},
) error {
+ name := metricFamily.GetName()
+
// Type consistency with metric family.
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
@@ -852,37 +797,46 @@ func checkMetricConsistency(
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
return fmt.Errorf(
"collected metric %q { %s} is not a %s",
- metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
+ name, dtoMetric, metricFamily.GetType(),
)
}
+ previousLabelName := ""
for _, labelPair := range dtoMetric.GetLabel() {
- if !checkLabelName(labelPair.GetName()) {
+ labelName := labelPair.GetName()
+ if labelName == previousLabelName {
+ return fmt.Errorf(
+ "collected metric %q { %s} has two or more labels with the same name: %s",
+ name, dtoMetric, labelName,
+ )
+ }
+ if !checkLabelName(labelName) {
return fmt.Errorf(
"collected metric %q { %s} has a label with an invalid name: %s",
- metricFamily.GetName(), dtoMetric, labelPair.GetName(),
+ name, dtoMetric, labelName,
)
}
- if dtoMetric.Summary != nil && labelPair.GetName() == quantileLabel {
+ if dtoMetric.Summary != nil && labelName == quantileLabel {
return fmt.Errorf(
"collected metric %q { %s} must not have an explicit %q label",
- metricFamily.GetName(), dtoMetric, quantileLabel,
+ name, dtoMetric, quantileLabel,
)
}
if !utf8.ValidString(labelPair.GetValue()) {
return fmt.Errorf(
"collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
- metricFamily.GetName(), dtoMetric, labelPair.GetName(), labelPair.GetValue())
+ name, dtoMetric, labelName, labelPair.GetValue())
}
+ previousLabelName = labelName
}
// Is the metric unique (i.e. no other metric with the same name and the same labels)?
h := hashNew()
- h = hashAdd(h, metricFamily.GetName())
+ h = hashAdd(h, name)
h = hashAddByte(h, separatorByte)
// Make sure label pairs are sorted. We depend on it for the consistency
// check.
- sort.Sort(LabelPairSorter(dtoMetric.Label))
+ sort.Sort(labelPairSorter(dtoMetric.Label))
for _, lp := range dtoMetric.Label {
h = hashAdd(h, lp.GetName())
h = hashAddByte(h, separatorByte)
@@ -892,7 +846,7 @@ func checkMetricConsistency(
if _, exists := metricHashes[h]; exists {
return fmt.Errorf(
"collected metric %q { %s} was collected before with the same name and label values",
- metricFamily.GetName(), dtoMetric,
+ name, dtoMetric,
)
}
metricHashes[h] = struct{}{}
@@ -926,7 +880,7 @@ func checkDescConsistency(
metricFamily.GetName(), dtoMetric, desc,
)
}
- sort.Sort(LabelPairSorter(lpsFromDesc))
+ sort.Sort(labelPairSorter(lpsFromDesc))
for i, lpFromDesc := range lpsFromDesc {
lpFromMetric := dtoMetric.Label[i]
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index 83b403c5..f7e92d82 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -37,7 +37,7 @@ const quantileLabel = "quantile"
// A typical use-case is the observation of request latencies. By default, a
// Summary provides the median, the 90th and the 99th percentile of the latency
// as rank estimations. However, the default behavior will change in the
-// upcoming v0.10 of the library. There will be no rank estiamtions at all by
+// upcoming v0.10 of the library. There will be no rank estimations at all by
// default. For a sane transition, it is recommended to set the desired rank
// estimations explicitly.
//
@@ -81,10 +81,10 @@ const (
)
// SummaryOpts bundles the options for creating a Summary metric. It is
-// mandatory to set Name and Help to a non-empty string. While all other fields
-// are optional and can safely be left at their zero value, it is recommended to
-// explicitly set the Objectives field to the desired value as the default value
-// will change in the upcoming v0.10 of the library.
+// mandatory to set Name to a non-empty string. While all other fields are
+// optional and can safely be left at their zero value, it is recommended to set
+// a help string and to explicitly set the Objectives field to the desired value
+// as the default value will change in the upcoming v0.10 of the library.
type SummaryOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Summary (created by joining these components with
@@ -95,7 +95,7 @@ type SummaryOpts struct {
Subsystem string
Name string
- // Help provides information about this Summary. Mandatory!
+ // Help provides information about this Summary.
//
// Metrics with the same fully-qualified name must have the same Help
// string.
@@ -586,7 +586,7 @@ func (s *constSummary) Write(out *dto.Metric) error {
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
//
// NewConstSummary returns an error if the length of labelValues is not
-// consistent with the variable labels in Desc.
+// consistent with the variable labels in Desc or if Desc is invalid.
func NewConstSummary(
desc *Desc,
count uint64,
@@ -594,6 +594,9 @@ func NewConstSummary(
quantiles map[float64]float64,
labelValues ...string,
) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index 9fb7eab0..eb248f10 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -17,9 +17,9 @@ import (
"fmt"
"sort"
- dto "github.com/prometheus/client_model/go"
-
"github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
)
// ValueType is an enumeration of metric types that represent a simple value.
@@ -77,8 +77,12 @@ func (v *valueFunc) Write(out *dto.Metric) error {
// operations. However, when implementing custom Collectors, it is useful as a
// throw-away metric that is generated on the fly to send it to Prometheus in
// the Collect method. NewConstMetric returns an error if the length of
-// labelValues is not consistent with the variable labels in Desc.
+// labelValues is not consistent with the variable labels in Desc or if Desc is
+// invalid.
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err
}
@@ -153,6 +157,6 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
})
}
labelPairs = append(labelPairs, desc.constLabelPairs...)
- sort.Sort(LabelPairSorter(labelPairs))
+ sort.Sort(labelPairSorter(labelPairs))
return labelPairs
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
new file mode 100644
index 00000000..49159bf3
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -0,0 +1,179 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/golang/protobuf/proto"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// WrapRegistererWith returns a Registerer wrapping the provided
+// Registerer. Collectors registered with the returned Registerer will be
+// registered with the wrapped Registerer in a modified way. The modified
+// Collector adds the provided Labels to all Metrics it collects (as
+// ConstLabels). The Metrics collected by the unmodified Collector must not
+// duplicate any of those labels.
+//
+// WrapRegistererWith provides a way to add fixed labels to a subset of
+// Collectors. It should not be used to add fixed labels to all metrics exposed.
+//
+// The Collector example demonstrates a use of WrapRegistererWith.
+func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
+ return &wrappingRegisterer{
+ wrappedRegisterer: reg,
+ labels: labels,
+ }
+}
+
+// WrapRegistererWithPrefix returns a Registerer wrapping the provided
+// Registerer. Collectors registered with the returned Registerer will be
+// registered with the wrapped Registerer in a modified way. The modified
+// Collector adds the provided prefix to the name of all Metrics it collects.
+//
+// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
+// a sub-system. To make this work, register metrics of the sub-system with the
+// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
+// to use the same prefix for all metrics exposed. In particular, do not prefix
+// metric names that are standardized across applications, as that would break
+// horizontal monitoring, for example the metrics provided by the Go collector
+// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
+// fact, those metrics are already prefixed with “go_” or “process_”,
+// respectively.)
+func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
+ return &wrappingRegisterer{
+ wrappedRegisterer: reg,
+ prefix: prefix,
+ }
+}
+
+type wrappingRegisterer struct {
+ wrappedRegisterer Registerer
+ prefix string
+ labels Labels
+}
+
+func (r *wrappingRegisterer) Register(c Collector) error {
+ return r.wrappedRegisterer.Register(&wrappingCollector{
+ wrappedCollector: c,
+ prefix: r.prefix,
+ labels: r.labels,
+ })
+}
+
+func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
+ for _, c := range cs {
+ if err := r.Register(c); err != nil {
+ panic(err)
+ }
+ }
+}
+
+func (r *wrappingRegisterer) Unregister(c Collector) bool {
+ return r.wrappedRegisterer.Unregister(&wrappingCollector{
+ wrappedCollector: c,
+ prefix: r.prefix,
+ labels: r.labels,
+ })
+}
+
+type wrappingCollector struct {
+ wrappedCollector Collector
+ prefix string
+ labels Labels
+}
+
+func (c *wrappingCollector) Collect(ch chan<- Metric) {
+ wrappedCh := make(chan Metric)
+ go func() {
+ c.wrappedCollector.Collect(wrappedCh)
+ close(wrappedCh)
+ }()
+ for m := range wrappedCh {
+ ch <- &wrappingMetric{
+ wrappedMetric: m,
+ prefix: c.prefix,
+ labels: c.labels,
+ }
+ }
+}
+
+func (c *wrappingCollector) Describe(ch chan<- *Desc) {
+ wrappedCh := make(chan *Desc)
+ go func() {
+ c.wrappedCollector.Describe(wrappedCh)
+ close(wrappedCh)
+ }()
+ for desc := range wrappedCh {
+ ch <- wrapDesc(desc, c.prefix, c.labels)
+ }
+}
+
+type wrappingMetric struct {
+ wrappedMetric Metric
+ prefix string
+ labels Labels
+}
+
+func (m *wrappingMetric) Desc() *Desc {
+ return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
+}
+
+func (m *wrappingMetric) Write(out *dto.Metric) error {
+ if err := m.wrappedMetric.Write(out); err != nil {
+ return err
+ }
+ if len(m.labels) == 0 {
+ // No wrapping labels.
+ return nil
+ }
+ for ln, lv := range m.labels {
+ out.Label = append(out.Label, &dto.LabelPair{
+ Name: proto.String(ln),
+ Value: proto.String(lv),
+ })
+ }
+ sort.Sort(labelPairSorter(out.Label))
+ return nil
+}
+
+func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
+ constLabels := Labels{}
+ for _, lp := range desc.constLabelPairs {
+ constLabels[*lp.Name] = *lp.Value
+ }
+ for ln, lv := range labels {
+ if _, alreadyUsed := constLabels[ln]; alreadyUsed {
+ return &Desc{
+ fqName: desc.fqName,
+ help: desc.help,
+ variableLabels: desc.variableLabels,
+ constLabelPairs: desc.constLabelPairs,
+ err: fmt.Errorf("attempted wrapping with already existing label name %q", ln),
+ }
+ }
+ constLabels[ln] = lv
+ }
+ // NewDesc will do remaining validations.
+ newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
+ // Propagate errors if there was any. This will override any errer
+ // created by NewDesc above, i.e. earlier errors get precedence.
+ if desc.err != nil {
+ newDesc.err = desc.err
+ }
+ return newDesc
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
index f11321cd..46b74364 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -14,13 +14,44 @@
package expfmt
import (
+ "bytes"
"fmt"
"io"
"math"
- "strings"
+ "strconv"
+ "sync"
- dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer
+// implements it.
+type enhancedWriter interface {
+ io.Writer
+ WriteRune(r rune) (n int, err error)
+ WriteString(s string) (n int, err error)
+ WriteByte(c byte) error
+}
+
+const (
+ initialBufSize = 512
+ initialNumBufSize = 24
+)
+
+var (
+ bufPool = sync.Pool{
+ New: func() interface{} {
+ return bytes.NewBuffer(make([]byte, 0, initialNumBufSize))
+ },
+ }
+ numBufPool = sync.Pool{
+ New: func() interface{} {
+ b := make([]byte, 0, initialNumBufSize)
+ return &b
+ },
+ }
)
// MetricFamilyToText converts a MetricFamily proto message into text format and
@@ -32,37 +63,92 @@ import (
// will result in invalid text format output.
//
// This method fulfills the type 'prometheus.encoder'.
-func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
- var written int
-
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
// Fail-fast checks.
if len(in.Metric) == 0 {
- return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
}
name := in.GetName()
if name == "" {
- return written, fmt.Errorf("MetricFamily has no name: %s", in)
+ return 0, fmt.Errorf("MetricFamily has no name: %s", in)
}
+ // Try the interface upgrade. If it doesn't work, we'll use a
+ // bytes.Buffer from the sync.Pool and write out its content to out in a
+ // single go in the end.
+ w, ok := out.(enhancedWriter)
+ if !ok {
+ b := bufPool.Get().(*bytes.Buffer)
+ b.Reset()
+ w = b
+ defer func() {
+ bWritten, bErr := out.Write(b.Bytes())
+ written = bWritten
+ if err == nil {
+ err = bErr
+ }
+ bufPool.Put(b)
+ }()
+ }
+
+ var n int
+
// Comments, first HELP, then TYPE.
if in.Help != nil {
- n, err := fmt.Fprintf(
- out, "# HELP %s %s\n",
- name, escapeString(*in.Help, false),
- )
+ n, err = w.WriteString("# HELP ")
written += n
if err != nil {
- return written, err
+ return
+ }
+ n, err = w.WriteString(name)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return
+ }
+ n, err = writeEscapedString(w, *in.Help, false)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return
}
}
+ n, err = w.WriteString("# TYPE ")
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = w.WriteString(name)
+ written += n
+ if err != nil {
+ return
+ }
metricType := in.GetType()
- n, err := fmt.Fprintf(
- out, "# TYPE %s %s\n",
- name, strings.ToLower(metricType.String()),
- )
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ n, err = w.WriteString(" counter\n")
+ case dto.MetricType_GAUGE:
+ n, err = w.WriteString(" gauge\n")
+ case dto.MetricType_SUMMARY:
+ n, err = w.WriteString(" summary\n")
+ case dto.MetricType_UNTYPED:
+ n, err = w.WriteString(" untyped\n")
+ case dto.MetricType_HISTOGRAM:
+ n, err = w.WriteString(" histogram\n")
+ default:
+ return written, fmt.Errorf("unknown metric type %s", metricType.String())
+ }
written += n
if err != nil {
- return written, err
+ return
}
// Finally the samples, one line for each.
@@ -75,9 +161,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
)
}
n, err = writeSample(
- name, metric, "", "",
+ w, name, "", metric, "", 0,
metric.Counter.GetValue(),
- out,
)
case dto.MetricType_GAUGE:
if metric.Gauge == nil {
@@ -86,9 +171,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
)
}
n, err = writeSample(
- name, metric, "", "",
+ w, name, "", metric, "", 0,
metric.Gauge.GetValue(),
- out,
)
case dto.MetricType_UNTYPED:
if metric.Untyped == nil {
@@ -97,9 +181,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
)
}
n, err = writeSample(
- name, metric, "", "",
+ w, name, "", metric, "", 0,
metric.Untyped.GetValue(),
- out,
)
case dto.MetricType_SUMMARY:
if metric.Summary == nil {
@@ -109,29 +192,26 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
}
for _, q := range metric.Summary.Quantile {
n, err = writeSample(
- name, metric,
- model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
+ w, name, "", metric,
+ model.QuantileLabel, q.GetQuantile(),
q.GetValue(),
- out,
)
written += n
if err != nil {
- return written, err
+ return
}
}
n, err = writeSample(
- name+"_sum", metric, "", "",
+ w, name, "_sum", metric, "", 0,
metric.Summary.GetSampleSum(),
- out,
)
+ written += n
if err != nil {
- return written, err
+ return
}
- written += n
n, err = writeSample(
- name+"_count", metric, "", "",
+ w, name, "_count", metric, "", 0,
float64(metric.Summary.GetSampleCount()),
- out,
)
case dto.MetricType_HISTOGRAM:
if metric.Histogram == nil {
@@ -140,46 +220,42 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
)
}
infSeen := false
- for _, q := range metric.Histogram.Bucket {
+ for _, b := range metric.Histogram.Bucket {
n, err = writeSample(
- name+"_bucket", metric,
- model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
- float64(q.GetCumulativeCount()),
- out,
+ w, name, "_bucket", metric,
+ model.BucketLabel, b.GetUpperBound(),
+ float64(b.GetCumulativeCount()),
)
written += n
if err != nil {
- return written, err
+ return
}
- if math.IsInf(q.GetUpperBound(), +1) {
+ if math.IsInf(b.GetUpperBound(), +1) {
infSeen = true
}
}
if !infSeen {
n, err = writeSample(
- name+"_bucket", metric,
- model.BucketLabel, "+Inf",
+ w, name, "_bucket", metric,
+ model.BucketLabel, math.Inf(+1),
float64(metric.Histogram.GetSampleCount()),
- out,
)
+ written += n
if err != nil {
- return written, err
+ return
}
- written += n
}
n, err = writeSample(
- name+"_sum", metric, "", "",
+ w, name, "_sum", metric, "", 0,
metric.Histogram.GetSampleSum(),
- out,
)
+ written += n
if err != nil {
- return written, err
+ return
}
- written += n
n, err = writeSample(
- name+"_count", metric, "", "",
+ w, name, "_count", metric, "", 0,
float64(metric.Histogram.GetSampleCount()),
- out,
)
default:
return written, fmt.Errorf(
@@ -188,116 +264,219 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
}
written += n
if err != nil {
- return written, err
+ return
}
}
- return written, nil
+ return
}
-// writeSample writes a single sample in text format to out, given the metric
+// writeSample writes a single sample in text format to w, given the metric
// name, the metric proto message itself, optionally an additional label name
-// and value (use empty strings if not required), and the value. The function
-// returns the number of bytes written and any error encountered.
+// with a float64 value (use empty string as label name if not required), and
+// the value. The function returns the number of bytes written and any error
+// encountered.
func writeSample(
- name string,
+ w enhancedWriter,
+ name, suffix string,
metric *dto.Metric,
- additionalLabelName, additionalLabelValue string,
+ additionalLabelName string, additionalLabelValue float64,
value float64,
- out io.Writer,
) (int, error) {
var written int
- n, err := fmt.Fprint(out, name)
+ n, err := w.WriteString(name)
written += n
if err != nil {
return written, err
}
- n, err = labelPairsToText(
- metric.Label,
- additionalLabelName, additionalLabelValue,
- out,
+ if suffix != "" {
+ n, err = w.WriteString(suffix)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeLabelPairs(
+ w, metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
return written, err
}
- n, err = fmt.Fprintf(out, " %v", value)
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err = writeFloat(w, value)
written += n
if err != nil {
return written, err
}
if metric.TimestampMs != nil {
- n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err = writeInt(w, *metric.TimestampMs)
written += n
if err != nil {
return written, err
}
}
- n, err = out.Write([]byte{'\n'})
- written += n
+ err = w.WriteByte('\n')
+ written++
if err != nil {
return written, err
}
return written, nil
}
-// labelPairsToText converts a slice of LabelPair proto messages plus the
+// writeLabelPairs converts a slice of LabelPair proto messages plus the
// explicitly given additional label pair into text formatted as required by the
-// text format and writes it to 'out'. An empty slice in combination with an
-// empty string 'additionalLabelName' results in nothing being
-// written. Otherwise, the label pairs are written, escaped as required by the
-// text format, and enclosed in '{...}'. The function returns the number of
-// bytes written and any error encountered.
-func labelPairsToText(
+// text format and writes it to 'w'. An empty slice in combination with an empty
+// string 'additionalLabelName' results in nothing being written. Otherwise, the
+// label pairs are written, escaped as required by the text format, and enclosed
+// in '{...}'. The function returns the number of bytes written and any error
+// encountered.
+func writeLabelPairs(
+ w enhancedWriter,
in []*dto.LabelPair,
- additionalLabelName, additionalLabelValue string,
- out io.Writer,
+ additionalLabelName string, additionalLabelValue float64,
) (int, error) {
if len(in) == 0 && additionalLabelName == "" {
return 0, nil
}
- var written int
- separator := '{'
+ var (
+ written int
+ separator byte = '{'
+ )
for _, lp := range in {
- n, err := fmt.Fprintf(
- out, `%c%s="%s"`,
- separator, lp.GetName(), escapeString(lp.GetValue(), true),
- )
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err := w.WriteString(lp.GetName())
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = w.WriteString(`="`)
written += n
if err != nil {
return written, err
}
+ n, err = writeEscapedString(w, lp.GetValue(), true)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
separator = ','
}
if additionalLabelName != "" {
- n, err := fmt.Fprintf(
- out, `%c%s="%s"`,
- separator, additionalLabelName,
- escapeString(additionalLabelValue, true),
- )
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err := w.WriteString(additionalLabelName)
written += n
if err != nil {
return written, err
}
+ n, err = w.WriteString(`="`)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = writeFloat(w, additionalLabelValue)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
}
- n, err := out.Write([]byte{'}'})
- written += n
+ err := w.WriteByte('}')
+ written++
if err != nil {
return written, err
}
return written, nil
}
-var (
- escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
- escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
-)
-
-// escapeString replaces '\' by '\\', new line character by '\n', and - if
+// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
// includeDoubleQuote is true - '"' by '\"'.
-func escapeString(v string, includeDoubleQuote bool) string {
- if includeDoubleQuote {
- return escapeWithDoubleQuote.Replace(v)
+func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
+ var (
+ written, n int
+ err error
+ )
+ for _, r := range v {
+ switch r {
+ case '\\':
+ n, err = w.WriteString(`\\`)
+ case '\n':
+ n, err = w.WriteString(`\n`)
+ case '"':
+ if includeDoubleQuote {
+ n, err = w.WriteString(`\"`)
+ } else {
+ n, err = w.WriteRune(r)
+ }
+ default:
+ n, err = w.WriteRune(r)
+ }
+ written += n
+ if err != nil {
+ return written, err
+ }
}
+ return written, nil
+}
+
+// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
+// a few common cases for increased efficiency. For non-hardcoded cases, it uses
+// strconv.AppendFloat to avoid allocations, similar to writeInt.
+func writeFloat(w enhancedWriter, f float64) (int, error) {
+ switch {
+ case f == 1:
+ return 1, w.WriteByte('1')
+ case f == 0:
+ return 1, w.WriteByte('0')
+ case f == -1:
+ return w.WriteString("-1")
+ case math.IsNaN(f):
+ return w.WriteString("NaN")
+ case math.IsInf(f, +1):
+ return w.WriteString("+Inf")
+ case math.IsInf(f, -1):
+ return w.WriteString("-Inf")
+ default:
+ bp := numBufPool.Get().(*[]byte)
+ *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
+ written, err := w.Write(*bp)
+ numBufPool.Put(bp)
+ return written, err
+ }
+}
- return escape.Replace(v)
+// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
+// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
+// allocations.
+func writeInt(w enhancedWriter, i int64) (int, error) {
+ bp := numBufPool.Get().(*[]byte)
+ *bp = strconv.AppendInt((*bp)[:0], i, 10)
+ written, err := w.Write(*bp)
+ numBufPool.Put(bp)
+ return written, err
}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index c7357c5c..1df7f7ef 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -203,16 +203,28 @@
"revisionTime": "2018-01-27T01:58:12Z"
},
{
- "checksumSHA1": "vuxZYORrJkB2dB6OXtqVCfT7+0I=",
+ "checksumSHA1": "frS661rlSEZWE9CezHhnFioQK/I=",
"path": "github.com/prometheus/client_golang/prometheus",
- "revision": "5b23715facdef1452016bae512489c3cdf82458c",
- "revisionTime": "2018-08-08T08:05:07Z"
+ "revision": "1cafe34db7fdec6022e17e00e1c1ea501022f3e4",
+ "revisionTime": "2018-10-15T14:52:39Z",
+ "version": "v0.9",
+ "versionExact": "v0.9.0"
+ },
+ {
+ "checksumSHA1": "UBqhkyjCz47+S19MVTigxJ2VjVQ=",
+ "path": "github.com/prometheus/client_golang/prometheus/internal",
+ "revision": "1cafe34db7fdec6022e17e00e1c1ea501022f3e4",
+ "revisionTime": "2018-10-15T14:52:39Z",
+ "version": "v0.9",
+ "versionExact": "v0.9.0"
},
{
"checksumSHA1": "d5BiEvD8MrgpWQ6PQJUvawJsMak=",
"path": "github.com/prometheus/client_golang/prometheus/promhttp",
- "revision": "5b23715facdef1452016bae512489c3cdf82458c",
- "revisionTime": "2018-08-08T08:05:07Z"
+ "revision": "1cafe34db7fdec6022e17e00e1c1ea501022f3e4",
+ "revisionTime": "2018-10-15T14:52:39Z",
+ "version": "v0.9",
+ "versionExact": "v0.9.0"
},
{
"checksumSHA1": "V8xkqgmP66sq2ZW4QO5wi9a4oZE=",
@@ -221,22 +233,22 @@
"revisionTime": "2018-07-12T10:51:10Z"
},
{
- "checksumSHA1": "Q0mjhUEjAklUQvPkrOChWGLpvRY=",
+ "checksumSHA1": "hGf3xT6gRaJh2zAEbWj9YnV+K+0=",
"path": "github.com/prometheus/common/expfmt",
- "revision": "c7de2306084e37d54b8be01f3541a8464345e9a5",
- "revisionTime": "2018-08-01T06:44:54Z"
+ "revision": "bcb74de08d37a417cb6789eec1d6c810040f0470",
+ "revisionTime": "2018-10-15T12:42:27Z"
},
{
"checksumSHA1": "GWlM3d2vPYyNATtTFgftS10/A9w=",
"path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
- "revision": "c7de2306084e37d54b8be01f3541a8464345e9a5",
- "revisionTime": "2018-08-01T06:44:54Z"
+ "revision": "bcb74de08d37a417cb6789eec1d6c810040f0470",
+ "revisionTime": "2018-10-15T12:42:27Z"
},
{
"checksumSHA1": "EXTRY7DL9gFW8c341Dk6LDXCBn8=",
"path": "github.com/prometheus/common/model",
- "revision": "c7de2306084e37d54b8be01f3541a8464345e9a5",
- "revisionTime": "2018-08-01T06:44:54Z"
+ "revision": "bcb74de08d37a417cb6789eec1d6c810040f0470",
+ "revisionTime": "2018-10-15T12:42:27Z"
},
{
"checksumSHA1": "jo/zxF+Pfj5yZjReTKGOACq9IBs=",