mirror of
https://github.com/kubernetes-sigs/prometheus-adapter.git
synced 2026-04-06 17:57:51 +00:00
Add vendor folder to git
This commit is contained in:
parent
66cf5eaafb
commit
183585f56f
6916 changed files with 2629581 additions and 1 deletions
7
vendor/github.com/prometheus/common/.travis.yml
generated
vendored
Normal file
7
vendor/github.com/prometheus/common/.travis.yml
generated
vendored
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
sudo: false
|
||||
|
||||
language: go
|
||||
go:
|
||||
- 1.4.3
|
||||
- 1.5.1
|
||||
- tip
|
||||
11
vendor/github.com/prometheus/common/AUTHORS.md
generated
vendored
Normal file
11
vendor/github.com/prometheus/common/AUTHORS.md
generated
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
Maintainers of this repository:
|
||||
|
||||
* Fabian Reinartz <fabian@soundcloud.com>
|
||||
|
||||
The following individuals have contributed code to this repository
|
||||
(listed in alphabetical order):
|
||||
|
||||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
* Fabian Reinartz <fabian@soundcloud.com>
|
||||
* Julius Volz <julius@soundcloud.com>
|
||||
* Miguel Molina <hi@mvader.me>
|
||||
18
vendor/github.com/prometheus/common/CONTRIBUTING.md
generated
vendored
Normal file
18
vendor/github.com/prometheus/common/CONTRIBUTING.md
generated
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
# Contributing
|
||||
|
||||
Prometheus uses GitHub to manage reviews of pull requests.
|
||||
|
||||
* If you have a trivial fix or improvement, go ahead and create a pull
|
||||
request, addressing (with `@...`) one or more of the maintainers
|
||||
(see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
|
||||
|
||||
* If you plan to do something more involved, first discuss your ideas
|
||||
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
|
||||
This will avoid unnecessary work and surely give you and us a good deal
|
||||
of inspiration.
|
||||
|
||||
* Relevant coding style guidelines are the [Go Code Review
|
||||
Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
|
||||
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
|
||||
Practices for Production
|
||||
Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
|
||||
201
vendor/github.com/prometheus/common/LICENSE
generated
vendored
Normal file
201
vendor/github.com/prometheus/common/LICENSE
generated
vendored
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
5
vendor/github.com/prometheus/common/NOTICE
generated
vendored
Normal file
5
vendor/github.com/prometheus/common/NOTICE
generated
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
Common libraries shared by Prometheus Go components.
|
||||
Copyright 2015 The Prometheus Authors
|
||||
|
||||
This product includes software developed at
|
||||
SoundCloud Ltd. (http://soundcloud.com/).
|
||||
8
vendor/github.com/prometheus/common/README.md
generated
vendored
Normal file
8
vendor/github.com/prometheus/common/README.md
generated
vendored
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
# Common
|
||||
[](https://travis-ci.org/prometheus/common)
|
||||
|
||||
This repository contains Go libraries that are shared across Prometheus
|
||||
components and libraries.
|
||||
|
||||
* **model**: Shared data structures
|
||||
* **expfmt**: Decoding and encoding for the exposition format
|
||||
171
vendor/github.com/prometheus/common/expfmt/bench_test.go
generated
vendored
Normal file
171
vendor/github.com/prometheus/common/expfmt/bench_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,171 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
var parser TextParser
|
||||
|
||||
// Benchmarks to show how much penalty text format parsing actually inflicts.
|
||||
//
|
||||
// Example results on Linux 3.13.0, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz, go1.4.
|
||||
//
|
||||
// BenchmarkParseText 1000 1188535 ns/op 205085 B/op 6135 allocs/op
|
||||
// BenchmarkParseTextGzip 1000 1376567 ns/op 246224 B/op 6151 allocs/op
|
||||
// BenchmarkParseProto 10000 172790 ns/op 52258 B/op 1160 allocs/op
|
||||
// BenchmarkParseProtoGzip 5000 324021 ns/op 94931 B/op 1211 allocs/op
|
||||
// BenchmarkParseProtoMap 10000 187946 ns/op 58714 B/op 1203 allocs/op
|
||||
//
|
||||
// CONCLUSION: The overhead for the map is negligible. Text format needs ~5x more allocations.
|
||||
// Without compression, it needs ~7x longer, but with compression (the more relevant scenario),
|
||||
// the difference becomes less relevant, only ~4x.
|
||||
//
|
||||
// The test data contains 248 samples.
|
||||
//
|
||||
// BenchmarkProcessor002ParseOnly in the extraction package is not quite
|
||||
// comparable to the benchmarks here, but it gives an idea: JSON parsing is even
|
||||
// slower than text parsing and needs a comparable amount of allocs.
|
||||
|
||||
// BenchmarkParseText benchmarks the parsing of a text-format scrape into metric
|
||||
// family DTOs.
|
||||
func BenchmarkParseText(b *testing.B) {
|
||||
b.StopTimer()
|
||||
data, err := ioutil.ReadFile("testdata/text")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
if _, err := parser.TextToMetricFamilies(bytes.NewReader(data)); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkParseTextGzip benchmarks the parsing of a gzipped text-format scrape
|
||||
// into metric family DTOs.
|
||||
func BenchmarkParseTextGzip(b *testing.B) {
|
||||
b.StopTimer()
|
||||
data, err := ioutil.ReadFile("testdata/text.gz")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
in, err := gzip.NewReader(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if _, err := parser.TextToMetricFamilies(in); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkParseProto benchmarks the parsing of a protobuf-format scrape into
|
||||
// metric family DTOs. Note that this does not build a map of metric families
|
||||
// (as the text version does), because it is not required for Prometheus
|
||||
// ingestion either. (However, it is required for the text-format parsing, as
|
||||
// the metric family might be sprinkled all over the text, while the
|
||||
// protobuf-format guarantees bundling at one place.)
|
||||
func BenchmarkParseProto(b *testing.B) {
|
||||
b.StopTimer()
|
||||
data, err := ioutil.ReadFile("testdata/protobuf")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
family := &dto.MetricFamily{}
|
||||
in := bytes.NewReader(data)
|
||||
for {
|
||||
family.Reset()
|
||||
if _, err := pbutil.ReadDelimited(in, family); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkParseProtoGzip is like BenchmarkParseProto above, but parses gzipped
|
||||
// protobuf format.
|
||||
func BenchmarkParseProtoGzip(b *testing.B) {
|
||||
b.StopTimer()
|
||||
data, err := ioutil.ReadFile("testdata/protobuf.gz")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
family := &dto.MetricFamily{}
|
||||
in, err := gzip.NewReader(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
for {
|
||||
family.Reset()
|
||||
if _, err := pbutil.ReadDelimited(in, family); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkParseProtoMap is like BenchmarkParseProto but DOES put the parsed
|
||||
// metric family DTOs into a map. This is not happening during Prometheus
|
||||
// ingestion. It is just here to measure the overhead of that map creation and
|
||||
// separate it from the overhead of the text format parsing.
|
||||
func BenchmarkParseProtoMap(b *testing.B) {
|
||||
b.StopTimer()
|
||||
data, err := ioutil.ReadFile("testdata/protobuf")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
families := map[string]*dto.MetricFamily{}
|
||||
in := bytes.NewReader(data)
|
||||
for {
|
||||
family := &dto.MetricFamily{}
|
||||
if _, err := pbutil.ReadDelimited(in, family); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
b.Fatal(err)
|
||||
}
|
||||
families[family.GetName()] = family
|
||||
}
|
||||
}
|
||||
}
|
||||
411
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
Normal file
411
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
Normal file
|
|
@ -0,0 +1,411 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"mime"
|
||||
"net/http"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
// Decoder types decode an input stream into metric families.
|
||||
type Decoder interface {
|
||||
Decode(*dto.MetricFamily) error
|
||||
}
|
||||
|
||||
type DecodeOptions struct {
|
||||
// Timestamp is added to each value from the stream that has no explicit timestamp set.
|
||||
Timestamp model.Time
|
||||
}
|
||||
|
||||
// ResponseFormat extracts the correct format from a HTTP response header.
|
||||
// If no matching format can be found FormatUnknown is returned.
|
||||
func ResponseFormat(h http.Header) Format {
|
||||
ct := h.Get(hdrContentType)
|
||||
|
||||
mediatype, params, err := mime.ParseMediaType(ct)
|
||||
if err != nil {
|
||||
return FmtUnknown
|
||||
}
|
||||
|
||||
const (
|
||||
textType = "text/plain"
|
||||
jsonType = "application/json"
|
||||
)
|
||||
|
||||
switch mediatype {
|
||||
case ProtoType:
|
||||
if p, ok := params["proto"]; ok && p != ProtoProtocol {
|
||||
return FmtUnknown
|
||||
}
|
||||
if e, ok := params["encoding"]; ok && e != "delimited" {
|
||||
return FmtUnknown
|
||||
}
|
||||
return FmtProtoDelim
|
||||
|
||||
case textType:
|
||||
if v, ok := params["version"]; ok && v != TextVersion {
|
||||
return FmtUnknown
|
||||
}
|
||||
return FmtText
|
||||
|
||||
case jsonType:
|
||||
var prometheusAPIVersion string
|
||||
|
||||
if params["schema"] == "prometheus/telemetry" && params["version"] != "" {
|
||||
prometheusAPIVersion = params["version"]
|
||||
} else {
|
||||
prometheusAPIVersion = h.Get("X-Prometheus-API-Version")
|
||||
}
|
||||
|
||||
switch prometheusAPIVersion {
|
||||
case "0.0.2", "":
|
||||
return fmtJSON2
|
||||
default:
|
||||
return FmtUnknown
|
||||
}
|
||||
}
|
||||
|
||||
return FmtUnknown
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder based on the given input format.
|
||||
// If the input format does not imply otherwise, a text format decoder is returned.
|
||||
func NewDecoder(r io.Reader, format Format) Decoder {
|
||||
switch format {
|
||||
case FmtProtoDelim:
|
||||
return &protoDecoder{r: r}
|
||||
case fmtJSON2:
|
||||
return newJSON2Decoder(r)
|
||||
}
|
||||
return &textDecoder{r: r}
|
||||
}
|
||||
|
||||
// protoDecoder implements the Decoder interface for protocol buffers.
|
||||
type protoDecoder struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
// Decode implements the Decoder interface.
|
||||
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
|
||||
_, err := pbutil.ReadDelimited(d.r, v)
|
||||
return err
|
||||
}
|
||||
|
||||
// textDecoder implements the Decoder interface for the text protcol.
|
||||
type textDecoder struct {
|
||||
r io.Reader
|
||||
p TextParser
|
||||
fams []*dto.MetricFamily
|
||||
}
|
||||
|
||||
// Decode implements the Decoder interface.
|
||||
func (d *textDecoder) Decode(v *dto.MetricFamily) error {
|
||||
// TODO(fabxc): Wrap this as a line reader to make streaming safer.
|
||||
if len(d.fams) == 0 {
|
||||
// No cached metric families, read everything and parse metrics.
|
||||
fams, err := d.p.TextToMetricFamilies(d.r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(fams) == 0 {
|
||||
return io.EOF
|
||||
}
|
||||
d.fams = make([]*dto.MetricFamily, 0, len(fams))
|
||||
for _, f := range fams {
|
||||
d.fams = append(d.fams, f)
|
||||
}
|
||||
}
|
||||
|
||||
*v = *d.fams[0]
|
||||
d.fams = d.fams[1:]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type SampleDecoder struct {
|
||||
Dec Decoder
|
||||
Opts *DecodeOptions
|
||||
|
||||
f dto.MetricFamily
|
||||
}
|
||||
|
||||
func (sd *SampleDecoder) Decode(s *model.Vector) error {
|
||||
if err := sd.Dec.Decode(&sd.f); err != nil {
|
||||
return err
|
||||
}
|
||||
*s = extractSamples(&sd.f, sd.Opts)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extract samples builds a slice of samples from the provided metric families.
|
||||
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
|
||||
var all model.Vector
|
||||
for _, f := range fams {
|
||||
all = append(all, extractSamples(f, o)...)
|
||||
}
|
||||
return all
|
||||
}
|
||||
|
||||
func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
|
||||
switch f.GetType() {
|
||||
case dto.MetricType_COUNTER:
|
||||
return extractCounter(o, f)
|
||||
case dto.MetricType_GAUGE:
|
||||
return extractGauge(o, f)
|
||||
case dto.MetricType_SUMMARY:
|
||||
return extractSummary(o, f)
|
||||
case dto.MetricType_UNTYPED:
|
||||
return extractUntyped(o, f)
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
return extractHistogram(o, f)
|
||||
}
|
||||
panic("expfmt.extractSamples: unknown metric family type")
|
||||
}
|
||||
|
||||
func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
||||
samples := make(model.Vector, 0, len(f.Metric))
|
||||
|
||||
for _, m := range f.Metric {
|
||||
if m.Counter == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
lset := make(model.LabelSet, len(m.Label)+1)
|
||||
for _, p := range m.Label {
|
||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||
}
|
||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
|
||||
|
||||
smpl := &model.Sample{
|
||||
Metric: model.Metric(lset),
|
||||
Value: model.SampleValue(m.Counter.GetValue()),
|
||||
}
|
||||
|
||||
if m.TimestampMs != nil {
|
||||
smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
|
||||
} else {
|
||||
smpl.Timestamp = o.Timestamp
|
||||
}
|
||||
|
||||
samples = append(samples, smpl)
|
||||
}
|
||||
|
||||
return samples
|
||||
}
|
||||
|
||||
func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
||||
samples := make(model.Vector, 0, len(f.Metric))
|
||||
|
||||
for _, m := range f.Metric {
|
||||
if m.Gauge == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
lset := make(model.LabelSet, len(m.Label)+1)
|
||||
for _, p := range m.Label {
|
||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||
}
|
||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
|
||||
|
||||
smpl := &model.Sample{
|
||||
Metric: model.Metric(lset),
|
||||
Value: model.SampleValue(m.Gauge.GetValue()),
|
||||
}
|
||||
|
||||
if m.TimestampMs != nil {
|
||||
smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
|
||||
} else {
|
||||
smpl.Timestamp = o.Timestamp
|
||||
}
|
||||
|
||||
samples = append(samples, smpl)
|
||||
}
|
||||
|
||||
return samples
|
||||
}
|
||||
|
||||
func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
||||
samples := make(model.Vector, 0, len(f.Metric))
|
||||
|
||||
for _, m := range f.Metric {
|
||||
if m.Untyped == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
lset := make(model.LabelSet, len(m.Label)+1)
|
||||
for _, p := range m.Label {
|
||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||
}
|
||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
|
||||
|
||||
smpl := &model.Sample{
|
||||
Metric: model.Metric(lset),
|
||||
Value: model.SampleValue(m.Untyped.GetValue()),
|
||||
}
|
||||
|
||||
if m.TimestampMs != nil {
|
||||
smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
|
||||
} else {
|
||||
smpl.Timestamp = o.Timestamp
|
||||
}
|
||||
|
||||
samples = append(samples, smpl)
|
||||
}
|
||||
|
||||
return samples
|
||||
}
|
||||
|
||||
func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
||||
samples := make(model.Vector, 0, len(f.Metric))
|
||||
|
||||
for _, m := range f.Metric {
|
||||
if m.Summary == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
timestamp := o.Timestamp
|
||||
if m.TimestampMs != nil {
|
||||
timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
|
||||
}
|
||||
|
||||
for _, q := range m.Summary.Quantile {
|
||||
lset := make(model.LabelSet, len(m.Label)+2)
|
||||
for _, p := range m.Label {
|
||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||
}
|
||||
// BUG(matt): Update other names to "quantile".
|
||||
lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
|
||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
|
||||
|
||||
samples = append(samples, &model.Sample{
|
||||
Metric: model.Metric(lset),
|
||||
Value: model.SampleValue(q.GetValue()),
|
||||
Timestamp: timestamp,
|
||||
})
|
||||
}
|
||||
|
||||
lset := make(model.LabelSet, len(m.Label)+1)
|
||||
for _, p := range m.Label {
|
||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||
}
|
||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
|
||||
|
||||
samples = append(samples, &model.Sample{
|
||||
Metric: model.Metric(lset),
|
||||
Value: model.SampleValue(m.Summary.GetSampleSum()),
|
||||
Timestamp: timestamp,
|
||||
})
|
||||
|
||||
lset = make(model.LabelSet, len(m.Label)+1)
|
||||
for _, p := range m.Label {
|
||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||
}
|
||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
|
||||
|
||||
samples = append(samples, &model.Sample{
|
||||
Metric: model.Metric(lset),
|
||||
Value: model.SampleValue(m.Summary.GetSampleCount()),
|
||||
Timestamp: timestamp,
|
||||
})
|
||||
}
|
||||
|
||||
return samples
|
||||
}
|
||||
|
||||
func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
||||
samples := make(model.Vector, 0, len(f.Metric))
|
||||
|
||||
for _, m := range f.Metric {
|
||||
if m.Histogram == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
timestamp := o.Timestamp
|
||||
if m.TimestampMs != nil {
|
||||
timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
|
||||
}
|
||||
|
||||
infSeen := false
|
||||
|
||||
for _, q := range m.Histogram.Bucket {
|
||||
lset := make(model.LabelSet, len(m.Label)+2)
|
||||
for _, p := range m.Label {
|
||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||
}
|
||||
lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
|
||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
|
||||
|
||||
if math.IsInf(q.GetUpperBound(), +1) {
|
||||
infSeen = true
|
||||
}
|
||||
|
||||
samples = append(samples, &model.Sample{
|
||||
Metric: model.Metric(lset),
|
||||
Value: model.SampleValue(q.GetCumulativeCount()),
|
||||
Timestamp: timestamp,
|
||||
})
|
||||
}
|
||||
|
||||
lset := make(model.LabelSet, len(m.Label)+1)
|
||||
for _, p := range m.Label {
|
||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||
}
|
||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
|
||||
|
||||
samples = append(samples, &model.Sample{
|
||||
Metric: model.Metric(lset),
|
||||
Value: model.SampleValue(m.Histogram.GetSampleSum()),
|
||||
Timestamp: timestamp,
|
||||
})
|
||||
|
||||
lset = make(model.LabelSet, len(m.Label)+1)
|
||||
for _, p := range m.Label {
|
||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||
}
|
||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
|
||||
|
||||
count := &model.Sample{
|
||||
Metric: model.Metric(lset),
|
||||
Value: model.SampleValue(m.Histogram.GetSampleCount()),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
samples = append(samples, count)
|
||||
|
||||
if !infSeen {
|
||||
// Append an infinity bucket sample.
|
||||
lset := make(model.LabelSet, len(m.Label)+2)
|
||||
for _, p := range m.Label {
|
||||
lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
|
||||
}
|
||||
lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
|
||||
lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
|
||||
|
||||
samples = append(samples, &model.Sample{
|
||||
Metric: model.Metric(lset),
|
||||
Value: count.Value,
|
||||
Timestamp: timestamp,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return samples
|
||||
}
|
||||
356
vendor/github.com/prometheus/common/expfmt/decode_test.go
generated
vendored
Normal file
356
vendor/github.com/prometheus/common/expfmt/decode_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,356 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
func TestTextDecoder(t *testing.T) {
|
||||
var (
|
||||
ts = model.Now()
|
||||
in = `
|
||||
# Only a quite simple scenario with two metric families.
|
||||
# More complicated tests of the parser itself can be found in the text package.
|
||||
# TYPE mf2 counter
|
||||
mf2 3
|
||||
mf1{label="value1"} -3.14 123456
|
||||
mf1{label="value2"} 42
|
||||
mf2 4
|
||||
`
|
||||
out = model.Vector{
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "mf1",
|
||||
"label": "value1",
|
||||
},
|
||||
Value: -3.14,
|
||||
Timestamp: 123456,
|
||||
},
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "mf1",
|
||||
"label": "value2",
|
||||
},
|
||||
Value: 42,
|
||||
Timestamp: ts,
|
||||
},
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "mf2",
|
||||
},
|
||||
Value: 3,
|
||||
Timestamp: ts,
|
||||
},
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "mf2",
|
||||
},
|
||||
Value: 4,
|
||||
Timestamp: ts,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
dec := &SampleDecoder{
|
||||
Dec: &textDecoder{r: strings.NewReader(in)},
|
||||
Opts: &DecodeOptions{
|
||||
Timestamp: ts,
|
||||
},
|
||||
}
|
||||
var all model.Vector
|
||||
for {
|
||||
var smpls model.Vector
|
||||
err := dec.Decode(&smpls)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
all = append(all, smpls...)
|
||||
}
|
||||
sort.Sort(all)
|
||||
sort.Sort(out)
|
||||
if !reflect.DeepEqual(all, out) {
|
||||
t.Fatalf("output does not match")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProtoDecoder(t *testing.T) {
|
||||
|
||||
var testTime = model.Now()
|
||||
|
||||
scenarios := []struct {
|
||||
in string
|
||||
expected model.Vector
|
||||
}{
|
||||
{
|
||||
in: "",
|
||||
},
|
||||
{
|
||||
in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_label_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@",
|
||||
expected: model.Vector{
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_count",
|
||||
"some_label_name": "some_label_value",
|
||||
},
|
||||
Value: -42,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_count",
|
||||
"another_label_name": "another_label_value",
|
||||
},
|
||||
Value: 84,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "\xb9\x01\n\rrequest_count\x12\x12Number of requests\x18\x02\"O\n#\n\x0fsome_label_name\x12\x10some_label_value\"(\x1a\x12\t\xaeG\xe1z\x14\xae\xef?\x11\x00\x00\x00\x00\x00\x00E\xc0\x1a\x12\t+\x87\x16\xd9\xce\xf7\xef?\x11\x00\x00\x00\x00\x00\x00U\xc0\"A\n)\n\x12another_label_name\x12\x13another_label_value\"\x14\x1a\x12\t\x00\x00\x00\x00\x00\x00\xe0?\x11\x00\x00\x00\x00\x00\x00$@",
|
||||
expected: model.Vector{
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_count_count",
|
||||
"some_label_name": "some_label_value",
|
||||
},
|
||||
Value: 0,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_count_sum",
|
||||
"some_label_name": "some_label_value",
|
||||
},
|
||||
Value: 0,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_count",
|
||||
"some_label_name": "some_label_value",
|
||||
"quantile": "0.99",
|
||||
},
|
||||
Value: -42,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_count",
|
||||
"some_label_name": "some_label_value",
|
||||
"quantile": "0.999",
|
||||
},
|
||||
Value: -84,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_count_count",
|
||||
"another_label_name": "another_label_value",
|
||||
},
|
||||
Value: 0,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_count_sum",
|
||||
"another_label_name": "another_label_value",
|
||||
},
|
||||
Value: 0,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_count",
|
||||
"another_label_name": "another_label_value",
|
||||
"quantile": "0.5",
|
||||
},
|
||||
Value: 10,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "\x8d\x01\n\x1drequest_duration_microseconds\x12\x15The response latency.\x18\x04\"S:Q\b\x85\x15\x11\xcd\xcc\xccL\x8f\xcb:A\x1a\v\b{\x11\x00\x00\x00\x00\x00\x00Y@\x1a\f\b\x9c\x03\x11\x00\x00\x00\x00\x00\x00^@\x1a\f\b\xd0\x04\x11\x00\x00\x00\x00\x00\x00b@\x1a\f\b\xf4\v\x11\x9a\x99\x99\x99\x99\x99e@\x1a\f\b\x85\x15\x11\x00\x00\x00\x00\x00\x00\xf0\u007f",
|
||||
expected: model.Vector{
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_duration_microseconds_bucket",
|
||||
"le": "100",
|
||||
},
|
||||
Value: 123,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_duration_microseconds_bucket",
|
||||
"le": "120",
|
||||
},
|
||||
Value: 412,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_duration_microseconds_bucket",
|
||||
"le": "144",
|
||||
},
|
||||
Value: 592,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_duration_microseconds_bucket",
|
||||
"le": "172.8",
|
||||
},
|
||||
Value: 1524,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_duration_microseconds_bucket",
|
||||
"le": "+Inf",
|
||||
},
|
||||
Value: 2693,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_duration_microseconds_sum",
|
||||
},
|
||||
Value: 1756047.3,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_duration_microseconds_count",
|
||||
},
|
||||
Value: 2693,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// The metric type is unset in this protobuf, which needs to be handled
|
||||
// correctly by the decoder.
|
||||
in: "\x1c\n\rrequest_count\"\v\x1a\t\t\x00\x00\x00\x00\x00\x00\xf0?",
|
||||
expected: model.Vector{
|
||||
&model.Sample{
|
||||
Metric: model.Metric{
|
||||
model.MetricNameLabel: "request_count",
|
||||
},
|
||||
Value: 1,
|
||||
Timestamp: testTime,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
dec := &SampleDecoder{
|
||||
Dec: &protoDecoder{r: strings.NewReader(scenario.in)},
|
||||
Opts: &DecodeOptions{
|
||||
Timestamp: testTime,
|
||||
},
|
||||
}
|
||||
|
||||
var all model.Vector
|
||||
for {
|
||||
var smpls model.Vector
|
||||
err := dec.Decode(&smpls)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
all = append(all, smpls...)
|
||||
}
|
||||
sort.Sort(all)
|
||||
sort.Sort(scenario.expected)
|
||||
if !reflect.DeepEqual(all, scenario.expected) {
|
||||
t.Fatalf("%d. output does not match, want: %#v, got %#v", i, scenario.expected, all)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testDiscriminatorHTTPHeader(t testing.TB) {
|
||||
var scenarios = []struct {
|
||||
input map[string]string
|
||||
output Format
|
||||
err error
|
||||
}{
|
||||
{
|
||||
input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="delimited"`},
|
||||
output: FmtProtoDelim,
|
||||
},
|
||||
{
|
||||
input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="illegal"; encoding="delimited"`},
|
||||
output: FmtUnknown,
|
||||
},
|
||||
{
|
||||
input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="illegal"`},
|
||||
output: FmtUnknown,
|
||||
},
|
||||
{
|
||||
input: map[string]string{"Content-Type": `text/plain; version=0.0.4`},
|
||||
output: FmtText,
|
||||
},
|
||||
{
|
||||
input: map[string]string{"Content-Type": `text/plain`},
|
||||
output: FmtText,
|
||||
},
|
||||
{
|
||||
input: map[string]string{"Content-Type": `text/plain; version=0.0.3`},
|
||||
output: FmtUnknown,
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
var header http.Header
|
||||
|
||||
if len(scenario.input) > 0 {
|
||||
header = http.Header{}
|
||||
}
|
||||
|
||||
for key, value := range scenario.input {
|
||||
header.Add(key, value)
|
||||
}
|
||||
|
||||
actual := ResponseFormat(header)
|
||||
|
||||
if scenario.output != actual {
|
||||
t.Errorf("%d. expected %s, got %s", i, scenario.output, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiscriminatorHTTPHeader(t *testing.T) {
|
||||
testDiscriminatorHTTPHeader(t)
|
||||
}
|
||||
|
||||
func BenchmarkDiscriminatorHTTPHeader(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testDiscriminatorHTTPHeader(b)
|
||||
}
|
||||
}
|
||||
88
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
Normal file
88
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"bitbucket.org/ww/goautoneg"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// Encoder types encode metric families into an underlying wire protocol.
|
||||
type Encoder interface {
|
||||
Encode(*dto.MetricFamily) error
|
||||
}
|
||||
|
||||
type encoder func(*dto.MetricFamily) error
|
||||
|
||||
func (e encoder) Encode(v *dto.MetricFamily) error {
|
||||
return e(v)
|
||||
}
|
||||
|
||||
// Negotiate returns the Content-Type based on the given Accept header.
|
||||
// If no appropriate accepted type is found, FmtText is returned.
|
||||
func Negotiate(h http.Header) Format {
|
||||
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
|
||||
// Check for protocol buffer
|
||||
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
|
||||
switch ac.Params["encoding"] {
|
||||
case "delimited":
|
||||
return FmtProtoDelim
|
||||
case "text":
|
||||
return FmtProtoText
|
||||
case "compact-text":
|
||||
return FmtProtoCompact
|
||||
}
|
||||
}
|
||||
// Check for text format.
|
||||
ver := ac.Params["version"]
|
||||
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
|
||||
return FmtText
|
||||
}
|
||||
}
|
||||
return FmtText
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder based on content type negotiation.
|
||||
func NewEncoder(w io.Writer, format Format) Encoder {
|
||||
switch format {
|
||||
case FmtProtoDelim:
|
||||
return encoder(func(v *dto.MetricFamily) error {
|
||||
_, err := pbutil.WriteDelimited(w, v)
|
||||
return err
|
||||
})
|
||||
case FmtProtoCompact:
|
||||
return encoder(func(v *dto.MetricFamily) error {
|
||||
_, err := fmt.Fprintln(w, v.String())
|
||||
return err
|
||||
})
|
||||
case FmtProtoText:
|
||||
return encoder(func(v *dto.MetricFamily) error {
|
||||
_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
|
||||
return err
|
||||
})
|
||||
case FmtText:
|
||||
return encoder(func(v *dto.MetricFamily) error {
|
||||
_, err := MetricFamilyToText(w, v)
|
||||
return err
|
||||
})
|
||||
}
|
||||
panic("expfmt.NewEncoder: unknown format")
|
||||
}
|
||||
40
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
Normal file
40
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// A package for reading and writing Prometheus metrics.
|
||||
package expfmt
|
||||
|
||||
type Format string
|
||||
|
||||
const (
|
||||
TextVersion = "0.0.4"
|
||||
|
||||
ProtoType = `application/vnd.google.protobuf`
|
||||
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
||||
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||
|
||||
// The Content-Type values for the different wire protocols.
|
||||
FmtUnknown Format = `<unknown>`
|
||||
FmtText Format = `text/plain; version=` + TextVersion
|
||||
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
|
||||
FmtProtoText Format = ProtoFmt + ` encoding=text`
|
||||
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
|
||||
|
||||
// fmtJSON2 is hidden as it is deprecated.
|
||||
fmtJSON2 Format = `application/json; version=0.0.2`
|
||||
)
|
||||
|
||||
const (
|
||||
hdrContentType = "Content-Type"
|
||||
hdrAccept = "Accept"
|
||||
)
|
||||
36
vendor/github.com/prometheus/common/expfmt/fuzz.go
generated
vendored
Normal file
36
vendor/github.com/prometheus/common/expfmt/fuzz.go
generated
vendored
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Build only when actually fuzzing
|
||||
// +build gofuzz
|
||||
|
||||
package expfmt
|
||||
|
||||
import "bytes"
|
||||
|
||||
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
|
||||
//
|
||||
// go-fuzz-build github.com/prometheus/client_golang/text
|
||||
// go-fuzz -bin text-fuzz.zip -workdir fuzz
|
||||
//
|
||||
// Further input samples should go in the folder fuzz/corpus.
|
||||
func Fuzz(in []byte) int {
|
||||
parser := TextParser{}
|
||||
_, err := parser.TextToMetricFamilies(bytes.NewReader(in))
|
||||
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
2
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0
generated
vendored
Normal file
2
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_0
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
|
||||
|
||||
6
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1
generated
vendored
Normal file
6
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_1
generated
vendored
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
|
||||
minimal_metric 1.234
|
||||
another_metric -3e3 103948
|
||||
# Even that:
|
||||
no_labels{} 3
|
||||
# HELP line for non-existing metric will be ignored.
|
||||
12
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2
generated
vendored
Normal file
12
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_2
generated
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
|
||||
# A normal comment.
|
||||
#
|
||||
# TYPE name counter
|
||||
name{labelname="val1",basename="basevalue"} NaN
|
||||
name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890
|
||||
# HELP name two-line\n doc str\\ing
|
||||
|
||||
# HELP name2 doc str"ing 2
|
||||
# TYPE name2 gauge
|
||||
name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321
|
||||
name2{ labelname = "val1" , }-Inf
|
||||
22
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3
generated
vendored
Normal file
22
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_3
generated
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
|
||||
# TYPE my_summary summary
|
||||
my_summary{n1="val1",quantile="0.5"} 110
|
||||
decoy -1 -2
|
||||
my_summary{n1="val1",quantile="0.9"} 140 1
|
||||
my_summary_count{n1="val1"} 42
|
||||
# Latest timestamp wins in case of a summary.
|
||||
my_summary_sum{n1="val1"} 4711 2
|
||||
fake_sum{n1="val1"} 2001
|
||||
# TYPE another_summary summary
|
||||
another_summary_count{n2="val2",n1="val1"} 20
|
||||
my_summary_count{n2="val2",n1="val1"} 5 5
|
||||
another_summary{n1="val1",n2="val2",quantile=".3"} -1.2
|
||||
my_summary_sum{n1="val2"} 08 15
|
||||
my_summary{n1="val3", quantile="0.2"} 4711
|
||||
my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN
|
||||
# some
|
||||
# funny comments
|
||||
# HELP
|
||||
# HELP
|
||||
# HELP my_summary
|
||||
# HELP my_summary
|
||||
10
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4
generated
vendored
Normal file
10
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_4
generated
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
|
||||
# HELP request_duration_microseconds The response latency.
|
||||
# TYPE request_duration_microseconds histogram
|
||||
request_duration_microseconds_bucket{le="100"} 123
|
||||
request_duration_microseconds_bucket{le="120"} 412
|
||||
request_duration_microseconds_bucket{le="144"} 592
|
||||
request_duration_microseconds_bucket{le="172.8"} 1524
|
||||
request_duration_microseconds_bucket{le="+Inf"} 2693
|
||||
request_duration_microseconds_sum 1.7560473e+06
|
||||
request_duration_microseconds_count 2693
|
||||
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0
generated
vendored
Normal file
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_0
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
bla 3.14
|
||||
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1
generated
vendored
Normal file
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_1
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
metric{label="\t"} 3.14
|
||||
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10
generated
vendored
Normal file
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_10
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
metric{label="bla"} 3.14 2 3
|
||||
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11
generated
vendored
Normal file
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_11
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
metric{label="bla"} blubb
|
||||
3
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12
generated
vendored
Normal file
3
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_12
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
|
||||
# HELP metric one
|
||||
# HELP metric two
|
||||
3
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13
generated
vendored
Normal file
3
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_13
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
|
||||
# TYPE metric counter
|
||||
# TYPE metric untyped
|
||||
3
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14
generated
vendored
Normal file
3
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_14
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
|
||||
metric 4.12
|
||||
# TYPE metric counter
|
||||
2
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15
generated
vendored
Normal file
2
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_15
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
|
||||
# TYPE metric bla
|
||||
2
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16
generated
vendored
Normal file
2
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_16
generated
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
|
||||
# TYPE met-ric
|
||||
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17
generated
vendored
Normal file
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_17
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
@invalidmetric{label="bla"} 3.14 2
|
||||
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18
generated
vendored
Normal file
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_18
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
{label="bla"} 3.14 2
|
||||
3
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19
generated
vendored
Normal file
3
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_19
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
|
||||
# TYPE metric histogram
|
||||
metric_bucket{le="bla"} 3.14
|
||||
3
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2
generated
vendored
Normal file
3
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_2
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
|
||||
metric{label="new
|
||||
line"} 3.14
|
||||
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3
generated
vendored
Normal file
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_3
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
metric{@="bla"} 3.14
|
||||
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4
generated
vendored
Normal file
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_4
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
metric{__name__="bla"} 3.14
|
||||
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5
generated
vendored
Normal file
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_5
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
metric{label+="bla"} 3.14
|
||||
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6
generated
vendored
Normal file
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_6
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
metric{label=bla} 3.14
|
||||
3
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7
generated
vendored
Normal file
3
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_7
generated
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
|
||||
# TYPE metric summary
|
||||
metric{quantile="bla"} 3.14
|
||||
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8
generated
vendored
Normal file
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_8
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
metric{label="bla"+} 3.14
|
||||
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9
generated
vendored
Normal file
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/from_test_parse_error_9
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
metric{label="bla"} 3.14 2.72
|
||||
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/minimal
generated
vendored
Normal file
1
vendor/github.com/prometheus/common/expfmt/fuzz/corpus/minimal
generated
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
m{} 0
|
||||
162
vendor/github.com/prometheus/common/expfmt/json_decode.go
generated
vendored
Normal file
162
vendor/github.com/prometheus/common/expfmt/json_decode.go
generated
vendored
Normal file
|
|
@ -0,0 +1,162 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
type json2Decoder struct {
|
||||
dec *json.Decoder
|
||||
fams []*dto.MetricFamily
|
||||
}
|
||||
|
||||
func newJSON2Decoder(r io.Reader) Decoder {
|
||||
return &json2Decoder{
|
||||
dec: json.NewDecoder(r),
|
||||
}
|
||||
}
|
||||
|
||||
type histogram002 struct {
|
||||
Labels model.LabelSet `json:"labels"`
|
||||
Values map[string]float64 `json:"value"`
|
||||
}
|
||||
|
||||
type counter002 struct {
|
||||
Labels model.LabelSet `json:"labels"`
|
||||
Value float64 `json:"value"`
|
||||
}
|
||||
|
||||
func protoLabelSet(base, ext model.LabelSet) []*dto.LabelPair {
|
||||
labels := base.Clone().Merge(ext)
|
||||
delete(labels, model.MetricNameLabel)
|
||||
|
||||
names := make([]string, 0, len(labels))
|
||||
for ln := range labels {
|
||||
names = append(names, string(ln))
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
pairs := make([]*dto.LabelPair, 0, len(labels))
|
||||
|
||||
for _, ln := range names {
|
||||
lv := labels[model.LabelName(ln)]
|
||||
|
||||
pairs = append(pairs, &dto.LabelPair{
|
||||
Name: proto.String(ln),
|
||||
Value: proto.String(string(lv)),
|
||||
})
|
||||
}
|
||||
|
||||
return pairs
|
||||
}
|
||||
|
||||
func (d *json2Decoder) more() error {
|
||||
var entities []struct {
|
||||
BaseLabels model.LabelSet `json:"baseLabels"`
|
||||
Docstring string `json:"docstring"`
|
||||
Metric struct {
|
||||
Type string `json:"type"`
|
||||
Values json.RawMessage `json:"value"`
|
||||
} `json:"metric"`
|
||||
}
|
||||
|
||||
if err := d.dec.Decode(&entities); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, e := range entities {
|
||||
f := &dto.MetricFamily{
|
||||
Name: proto.String(string(e.BaseLabels[model.MetricNameLabel])),
|
||||
Help: proto.String(e.Docstring),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{},
|
||||
}
|
||||
|
||||
d.fams = append(d.fams, f)
|
||||
|
||||
switch e.Metric.Type {
|
||||
case "counter", "gauge":
|
||||
var values []counter002
|
||||
|
||||
if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
|
||||
return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
|
||||
}
|
||||
|
||||
for _, ctr := range values {
|
||||
f.Metric = append(f.Metric, &dto.Metric{
|
||||
Label: protoLabelSet(e.BaseLabels, ctr.Labels),
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(ctr.Value),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
case "histogram":
|
||||
var values []histogram002
|
||||
|
||||
if err := json.Unmarshal(e.Metric.Values, &values); err != nil {
|
||||
return fmt.Errorf("could not extract %s value: %s", e.Metric.Type, err)
|
||||
}
|
||||
|
||||
for _, hist := range values {
|
||||
quants := make([]string, 0, len(values))
|
||||
for q := range hist.Values {
|
||||
quants = append(quants, q)
|
||||
}
|
||||
|
||||
sort.Strings(quants)
|
||||
|
||||
for _, q := range quants {
|
||||
value := hist.Values[q]
|
||||
// The correct label is "quantile" but to not break old expressions
|
||||
// this remains "percentile"
|
||||
hist.Labels["percentile"] = model.LabelValue(q)
|
||||
|
||||
f.Metric = append(f.Metric, &dto.Metric{
|
||||
Label: protoLabelSet(e.BaseLabels, hist.Labels),
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(value),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown metric type %q", e.Metric.Type)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode implements the Decoder interface.
|
||||
func (d *json2Decoder) Decode(v *dto.MetricFamily) error {
|
||||
if len(d.fams) == 0 {
|
||||
if err := d.more(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
*v = *d.fams[0]
|
||||
d.fams = d.fams[1:]
|
||||
|
||||
return nil
|
||||
}
|
||||
124
vendor/github.com/prometheus/common/expfmt/json_decode_test.go
generated
vendored
Normal file
124
vendor/github.com/prometheus/common/expfmt/json_decode_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,124 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
func TestJSON2Decode(t *testing.T) {
|
||||
f, err := os.Open("testdata/json2")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
dec := newJSON2Decoder(f)
|
||||
|
||||
var v1 dto.MetricFamily
|
||||
if err := dec.Decode(&v1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
exp1 := dto.MetricFamily{
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Help: proto.String("RPC calls."),
|
||||
Name: proto.String("rpc_calls_total"),
|
||||
Metric: []*dto.Metric{
|
||||
{
|
||||
Label: []*dto.LabelPair{
|
||||
{
|
||||
Name: proto.String("job"),
|
||||
Value: proto.String("batch_job"),
|
||||
}, {
|
||||
Name: proto.String("service"),
|
||||
Value: proto.String("zed"),
|
||||
},
|
||||
},
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(25),
|
||||
},
|
||||
},
|
||||
{
|
||||
Label: []*dto.LabelPair{
|
||||
{
|
||||
Name: proto.String("job"),
|
||||
Value: proto.String("batch_job"),
|
||||
}, {
|
||||
Name: proto.String("service"),
|
||||
Value: proto.String("bar"),
|
||||
},
|
||||
},
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(24),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(v1, exp1) {
|
||||
t.Fatalf("Expected %v, got %v", exp1, v1)
|
||||
}
|
||||
|
||||
var v2 dto.MetricFamily
|
||||
if err := dec.Decode(&v2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
exp2 := dto.MetricFamily{
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Help: proto.String("RPC latency."),
|
||||
Name: proto.String("rpc_latency_microseconds"),
|
||||
Metric: []*dto.Metric{
|
||||
{
|
||||
Label: []*dto.LabelPair{
|
||||
{
|
||||
Name: proto.String("percentile"),
|
||||
Value: proto.String("0.010000"),
|
||||
}, {
|
||||
Name: proto.String("service"),
|
||||
Value: proto.String("foo"),
|
||||
},
|
||||
},
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(15),
|
||||
},
|
||||
},
|
||||
{
|
||||
Label: []*dto.LabelPair{
|
||||
{
|
||||
Name: proto.String("percentile"),
|
||||
Value: proto.String("0.990000"),
|
||||
}, {
|
||||
Name: proto.String("service"),
|
||||
Value: proto.String("foo"),
|
||||
},
|
||||
},
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(17),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(v2, exp2) {
|
||||
t.Fatalf("Expected %v, got %v", exp2, v2)
|
||||
}
|
||||
|
||||
}
|
||||
46
vendor/github.com/prometheus/common/expfmt/testdata/json2
generated
vendored
Normal file
46
vendor/github.com/prometheus/common/expfmt/testdata/json2
generated
vendored
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
[
|
||||
{
|
||||
"baseLabels": {
|
||||
"__name__": "rpc_calls_total",
|
||||
"job": "batch_job"
|
||||
},
|
||||
"docstring": "RPC calls.",
|
||||
"metric": {
|
||||
"type": "counter",
|
||||
"value": [
|
||||
{
|
||||
"labels": {
|
||||
"service": "zed"
|
||||
},
|
||||
"value": 25
|
||||
},
|
||||
{
|
||||
"labels": {
|
||||
"service": "bar"
|
||||
},
|
||||
"value": 24
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"baseLabels": {
|
||||
"__name__": "rpc_latency_microseconds"
|
||||
},
|
||||
"docstring": "RPC latency.",
|
||||
"metric": {
|
||||
"type": "histogram",
|
||||
"value": [
|
||||
{
|
||||
"labels": {
|
||||
"service": "foo"
|
||||
},
|
||||
"value": {
|
||||
"0.010000": 15,
|
||||
"0.990000": 17
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
516
vendor/github.com/prometheus/common/expfmt/testdata/protobuf
generated
vendored
Normal file
516
vendor/github.com/prometheus/common/expfmt/testdata/protobuf
generated
vendored
Normal file
|
|
@ -0,0 +1,516 @@
|
|||
fc08 0a22 6874 7470 5f72 6571 7565 7374
|
||||
5f64 7572 6174 696f 6e5f 6d69 6372 6f73
|
||||
6563 6f6e 6473 122b 5468 6520 4854 5450
|
||||
2072 6571 7565 7374 206c 6174 656e 6369
|
||||
6573 2069 6e20 6d69 6372 6f73 6563 6f6e
|
||||
6473 2e18 0222 570a 0c0a 0768 616e 646c
|
||||
6572 1201 2f22 4708 0011 0000 0000 0000
|
||||
0000 1a12 0900 0000 0000 00e0 3f11 0000
|
||||
0000 0000 0000 1a12 09cd cccc cccc ccec
|
||||
3f11 0000 0000 0000 0000 1a12 09ae 47e1
|
||||
7a14 aeef 3f11 0000 0000 0000 0000 225d
|
||||
0a12 0a07 6861 6e64 6c65 7212 072f 616c
|
||||
6572 7473 2247 0800 1100 0000 0000 0000
|
||||
001a 1209 0000 0000 0000 e03f 1100 0000
|
||||
0000 0000 001a 1209 cdcc cccc cccc ec3f
|
||||
1100 0000 0000 0000 001a 1209 ae47 e17a
|
||||
14ae ef3f 1100 0000 0000 0000 0022 620a
|
||||
170a 0768 616e 646c 6572 120c 2f61 7069
|
||||
2f6d 6574 7269 6373 2247 0800 1100 0000
|
||||
0000 0000 001a 1209 0000 0000 0000 e03f
|
||||
1100 0000 0000 0000 001a 1209 cdcc cccc
|
||||
cccc ec3f 1100 0000 0000 0000 001a 1209
|
||||
ae47 e17a 14ae ef3f 1100 0000 0000 0000
|
||||
0022 600a 150a 0768 616e 646c 6572 120a
|
||||
2f61 7069 2f71 7565 7279 2247 0800 1100
|
||||
0000 0000 0000 001a 1209 0000 0000 0000
|
||||
e03f 1100 0000 0000 0000 001a 1209 cdcc
|
||||
cccc cccc ec3f 1100 0000 0000 0000 001a
|
||||
1209 ae47 e17a 14ae ef3f 1100 0000 0000
|
||||
0000 0022 660a 1b0a 0768 616e 646c 6572
|
||||
1210 2f61 7069 2f71 7565 7279 5f72 616e
|
||||
6765 2247 0800 1100 0000 0000 0000 001a
|
||||
1209 0000 0000 0000 e03f 1100 0000 0000
|
||||
0000 001a 1209 cdcc cccc cccc ec3f 1100
|
||||
0000 0000 0000 001a 1209 ae47 e17a 14ae
|
||||
ef3f 1100 0000 0000 0000 0022 620a 170a
|
||||
0768 616e 646c 6572 120c 2f61 7069 2f74
|
||||
6172 6765 7473 2247 0800 1100 0000 0000
|
||||
0000 001a 1209 0000 0000 0000 e03f 1100
|
||||
0000 0000 0000 001a 1209 cdcc cccc cccc
|
||||
ec3f 1100 0000 0000 0000 001a 1209 ae47
|
||||
e17a 14ae ef3f 1100 0000 0000 0000 0022
|
||||
600a 150a 0768 616e 646c 6572 120a 2f63
|
||||
6f6e 736f 6c65 732f 2247 0800 1100 0000
|
||||
0000 0000 001a 1209 0000 0000 0000 e03f
|
||||
1100 0000 0000 0000 001a 1209 cdcc cccc
|
||||
cccc ec3f 1100 0000 0000 0000 001a 1209
|
||||
ae47 e17a 14ae ef3f 1100 0000 0000 0000
|
||||
0022 5c0a 110a 0768 616e 646c 6572 1206
|
||||
2f67 7261 7068 2247 0800 1100 0000 0000
|
||||
0000 001a 1209 0000 0000 0000 e03f 1100
|
||||
0000 0000 0000 001a 1209 cdcc cccc cccc
|
||||
ec3f 1100 0000 0000 0000 001a 1209 ae47
|
||||
e17a 14ae ef3f 1100 0000 0000 0000 0022
|
||||
5b0a 100a 0768 616e 646c 6572 1205 2f68
|
||||
6561 7022 4708 0011 0000 0000 0000 0000
|
||||
1a12 0900 0000 0000 00e0 3f11 0000 0000
|
||||
0000 0000 1a12 09cd cccc cccc ccec 3f11
|
||||
0000 0000 0000 0000 1a12 09ae 47e1 7a14
|
||||
aeef 3f11 0000 0000 0000 0000 225e 0a13
|
||||
0a07 6861 6e64 6c65 7212 082f 7374 6174
|
||||
6963 2f22 4708 0011 0000 0000 0000 0000
|
||||
1a12 0900 0000 0000 00e0 3f11 0000 0000
|
||||
0000 0000 1a12 09cd cccc cccc ccec 3f11
|
||||
0000 0000 0000 0000 1a12 09ae 47e1 7a14
|
||||
aeef 3f11 0000 0000 0000 0000 2260 0a15
|
||||
0a07 6861 6e64 6c65 7212 0a70 726f 6d65
|
||||
7468 6575 7322 4708 3b11 5b8f c2f5 083f
|
||||
f440 1a12 0900 0000 0000 00e0 3f11 e17a
|
||||
14ae c7af 9340 1a12 09cd cccc cccc ccec
|
||||
3f11 2fdd 2406 81f0 9640 1a12 09ae 47e1
|
||||
7a14 aeef 3f11 3d0a d7a3 b095 a740 e608
|
||||
0a17 6874 7470 5f72 6571 7565 7374 5f73
|
||||
697a 655f 6279 7465 7312 2054 6865 2048
|
||||
5454 5020 7265 7175 6573 7420 7369 7a65
|
||||
7320 696e 2062 7974 6573 2e18 0222 570a
|
||||
0c0a 0768 616e 646c 6572 1201 2f22 4708
|
||||
0011 0000 0000 0000 0000 1a12 0900 0000
|
||||
0000 00e0 3f11 0000 0000 0000 0000 1a12
|
||||
09cd cccc cccc ccec 3f11 0000 0000 0000
|
||||
0000 1a12 09ae 47e1 7a14 aeef 3f11 0000
|
||||
0000 0000 0000 225d 0a12 0a07 6861 6e64
|
||||
6c65 7212 072f 616c 6572 7473 2247 0800
|
||||
1100 0000 0000 0000 001a 1209 0000 0000
|
||||
0000 e03f 1100 0000 0000 0000 001a 1209
|
||||
cdcc cccc cccc ec3f 1100 0000 0000 0000
|
||||
001a 1209 ae47 e17a 14ae ef3f 1100 0000
|
||||
0000 0000 0022 620a 170a 0768 616e 646c
|
||||
6572 120c 2f61 7069 2f6d 6574 7269 6373
|
||||
2247 0800 1100 0000 0000 0000 001a 1209
|
||||
0000 0000 0000 e03f 1100 0000 0000 0000
|
||||
001a 1209 cdcc cccc cccc ec3f 1100 0000
|
||||
0000 0000 001a 1209 ae47 e17a 14ae ef3f
|
||||
1100 0000 0000 0000 0022 600a 150a 0768
|
||||
616e 646c 6572 120a 2f61 7069 2f71 7565
|
||||
7279 2247 0800 1100 0000 0000 0000 001a
|
||||
1209 0000 0000 0000 e03f 1100 0000 0000
|
||||
0000 001a 1209 cdcc cccc cccc ec3f 1100
|
||||
0000 0000 0000 001a 1209 ae47 e17a 14ae
|
||||
ef3f 1100 0000 0000 0000 0022 660a 1b0a
|
||||
0768 616e 646c 6572 1210 2f61 7069 2f71
|
||||
7565 7279 5f72 616e 6765 2247 0800 1100
|
||||
0000 0000 0000 001a 1209 0000 0000 0000
|
||||
e03f 1100 0000 0000 0000 001a 1209 cdcc
|
||||
cccc cccc ec3f 1100 0000 0000 0000 001a
|
||||
1209 ae47 e17a 14ae ef3f 1100 0000 0000
|
||||
0000 0022 620a 170a 0768 616e 646c 6572
|
||||
120c 2f61 7069 2f74 6172 6765 7473 2247
|
||||
0800 1100 0000 0000 0000 001a 1209 0000
|
||||
0000 0000 e03f 1100 0000 0000 0000 001a
|
||||
1209 cdcc cccc cccc ec3f 1100 0000 0000
|
||||
0000 001a 1209 ae47 e17a 14ae ef3f 1100
|
||||
0000 0000 0000 0022 600a 150a 0768 616e
|
||||
646c 6572 120a 2f63 6f6e 736f 6c65 732f
|
||||
2247 0800 1100 0000 0000 0000 001a 1209
|
||||
0000 0000 0000 e03f 1100 0000 0000 0000
|
||||
001a 1209 cdcc cccc cccc ec3f 1100 0000
|
||||
0000 0000 001a 1209 ae47 e17a 14ae ef3f
|
||||
1100 0000 0000 0000 0022 5c0a 110a 0768
|
||||
616e 646c 6572 1206 2f67 7261 7068 2247
|
||||
0800 1100 0000 0000 0000 001a 1209 0000
|
||||
0000 0000 e03f 1100 0000 0000 0000 001a
|
||||
1209 cdcc cccc cccc ec3f 1100 0000 0000
|
||||
0000 001a 1209 ae47 e17a 14ae ef3f 1100
|
||||
0000 0000 0000 0022 5b0a 100a 0768 616e
|
||||
646c 6572 1205 2f68 6561 7022 4708 0011
|
||||
0000 0000 0000 0000 1a12 0900 0000 0000
|
||||
00e0 3f11 0000 0000 0000 0000 1a12 09cd
|
||||
cccc cccc ccec 3f11 0000 0000 0000 0000
|
||||
1a12 09ae 47e1 7a14 aeef 3f11 0000 0000
|
||||
0000 0000 225e 0a13 0a07 6861 6e64 6c65
|
||||
7212 082f 7374 6174 6963 2f22 4708 0011
|
||||
0000 0000 0000 0000 1a12 0900 0000 0000
|
||||
00e0 3f11 0000 0000 0000 0000 1a12 09cd
|
||||
cccc cccc ccec 3f11 0000 0000 0000 0000
|
||||
1a12 09ae 47e1 7a14 aeef 3f11 0000 0000
|
||||
0000 0000 2260 0a15 0a07 6861 6e64 6c65
|
||||
7212 0a70 726f 6d65 7468 6575 7322 4708
|
||||
3b11 0000 0000 40c4 d040 1a12 0900 0000
|
||||
0000 00e0 3f11 0000 0000 0030 7240 1a12
|
||||
09cd cccc cccc ccec 3f11 0000 0000 0030
|
||||
7240 1a12 09ae 47e1 7a14 aeef 3f11 0000
|
||||
0000 0030 7240 7c0a 1368 7474 705f 7265
|
||||
7175 6573 7473 5f74 6f74 616c 1223 546f
|
||||
7461 6c20 6e75 6d62 6572 206f 6620 4854
|
||||
5450 2072 6571 7565 7374 7320 6d61 6465
|
||||
2e18 0022 3e0a 0b0a 0463 6f64 6512 0332
|
||||
3030 0a15 0a07 6861 6e64 6c65 7212 0a70
|
||||
726f 6d65 7468 6575 730a 0d0a 066d 6574
|
||||
686f 6412 0367 6574 1a09 0900 0000 0000
|
||||
804d 40e8 080a 1868 7474 705f 7265 7370
|
||||
6f6e 7365 5f73 697a 655f 6279 7465 7312
|
||||
2154 6865 2048 5454 5020 7265 7370 6f6e
|
||||
7365 2073 697a 6573 2069 6e20 6279 7465
|
||||
732e 1802 2257 0a0c 0a07 6861 6e64 6c65
|
||||
7212 012f 2247 0800 1100 0000 0000 0000
|
||||
001a 1209 0000 0000 0000 e03f 1100 0000
|
||||
0000 0000 001a 1209 cdcc cccc cccc ec3f
|
||||
1100 0000 0000 0000 001a 1209 ae47 e17a
|
||||
14ae ef3f 1100 0000 0000 0000 0022 5d0a
|
||||
120a 0768 616e 646c 6572 1207 2f61 6c65
|
||||
7274 7322 4708 0011 0000 0000 0000 0000
|
||||
1a12 0900 0000 0000 00e0 3f11 0000 0000
|
||||
0000 0000 1a12 09cd cccc cccc ccec 3f11
|
||||
0000 0000 0000 0000 1a12 09ae 47e1 7a14
|
||||
aeef 3f11 0000 0000 0000 0000 2262 0a17
|
||||
0a07 6861 6e64 6c65 7212 0c2f 6170 692f
|
||||
6d65 7472 6963 7322 4708 0011 0000 0000
|
||||
0000 0000 1a12 0900 0000 0000 00e0 3f11
|
||||
0000 0000 0000 0000 1a12 09cd cccc cccc
|
||||
ccec 3f11 0000 0000 0000 0000 1a12 09ae
|
||||
47e1 7a14 aeef 3f11 0000 0000 0000 0000
|
||||
2260 0a15 0a07 6861 6e64 6c65 7212 0a2f
|
||||
6170 692f 7175 6572 7922 4708 0011 0000
|
||||
0000 0000 0000 1a12 0900 0000 0000 00e0
|
||||
3f11 0000 0000 0000 0000 1a12 09cd cccc
|
||||
cccc ccec 3f11 0000 0000 0000 0000 1a12
|
||||
09ae 47e1 7a14 aeef 3f11 0000 0000 0000
|
||||
0000 2266 0a1b 0a07 6861 6e64 6c65 7212
|
||||
102f 6170 692f 7175 6572 795f 7261 6e67
|
||||
6522 4708 0011 0000 0000 0000 0000 1a12
|
||||
0900 0000 0000 00e0 3f11 0000 0000 0000
|
||||
0000 1a12 09cd cccc cccc ccec 3f11 0000
|
||||
0000 0000 0000 1a12 09ae 47e1 7a14 aeef
|
||||
3f11 0000 0000 0000 0000 2262 0a17 0a07
|
||||
6861 6e64 6c65 7212 0c2f 6170 692f 7461
|
||||
7267 6574 7322 4708 0011 0000 0000 0000
|
||||
0000 1a12 0900 0000 0000 00e0 3f11 0000
|
||||
0000 0000 0000 1a12 09cd cccc cccc ccec
|
||||
3f11 0000 0000 0000 0000 1a12 09ae 47e1
|
||||
7a14 aeef 3f11 0000 0000 0000 0000 2260
|
||||
0a15 0a07 6861 6e64 6c65 7212 0a2f 636f
|
||||
6e73 6f6c 6573 2f22 4708 0011 0000 0000
|
||||
0000 0000 1a12 0900 0000 0000 00e0 3f11
|
||||
0000 0000 0000 0000 1a12 09cd cccc cccc
|
||||
ccec 3f11 0000 0000 0000 0000 1a12 09ae
|
||||
47e1 7a14 aeef 3f11 0000 0000 0000 0000
|
||||
225c 0a11 0a07 6861 6e64 6c65 7212 062f
|
||||
6772 6170 6822 4708 0011 0000 0000 0000
|
||||
0000 1a12 0900 0000 0000 00e0 3f11 0000
|
||||
0000 0000 0000 1a12 09cd cccc cccc ccec
|
||||
3f11 0000 0000 0000 0000 1a12 09ae 47e1
|
||||
7a14 aeef 3f11 0000 0000 0000 0000 225b
|
||||
0a10 0a07 6861 6e64 6c65 7212 052f 6865
|
||||
6170 2247 0800 1100 0000 0000 0000 001a
|
||||
1209 0000 0000 0000 e03f 1100 0000 0000
|
||||
0000 001a 1209 cdcc cccc cccc ec3f 1100
|
||||
0000 0000 0000 001a 1209 ae47 e17a 14ae
|
||||
ef3f 1100 0000 0000 0000 0022 5e0a 130a
|
||||
0768 616e 646c 6572 1208 2f73 7461 7469
|
||||
632f 2247 0800 1100 0000 0000 0000 001a
|
||||
1209 0000 0000 0000 e03f 1100 0000 0000
|
||||
0000 001a 1209 cdcc cccc cccc ec3f 1100
|
||||
0000 0000 0000 001a 1209 ae47 e17a 14ae
|
||||
ef3f 1100 0000 0000 0000 0022 600a 150a
|
||||
0768 616e 646c 6572 120a 7072 6f6d 6574
|
||||
6865 7573 2247 083b 1100 0000 00e0 b4fc
|
||||
401a 1209 0000 0000 0000 e03f 1100 0000
|
||||
0000 349f 401a 1209 cdcc cccc cccc ec3f
|
||||
1100 0000 0000 08a0 401a 1209 ae47 e17a
|
||||
14ae ef3f 1100 0000 0000 0aa0 405c 0a19
|
||||
7072 6f63 6573 735f 6370 755f 7365 636f
|
||||
6e64 735f 746f 7461 6c12 3054 6f74 616c
|
||||
2075 7365 7220 616e 6420 7379 7374 656d
|
||||
2043 5055 2074 696d 6520 7370 656e 7420
|
||||
696e 2073 6563 6f6e 6473 2e18 0022 0b1a
|
||||
0909 a470 3d0a d7a3 d03f 4f0a 1270 726f
|
||||
6365 7373 5f67 6f72 6f75 7469 6e65 7312
|
||||
2a4e 756d 6265 7220 6f66 2067 6f72 6f75
|
||||
7469 6e65 7320 7468 6174 2063 7572 7265
|
||||
6e74 6c79 2065 7869 7374 2e18 0122 0b12
|
||||
0909 0000 0000 0000 5140 4a0a 0f70 726f
|
||||
6365 7373 5f6d 6178 5f66 6473 1228 4d61
|
||||
7869 6d75 6d20 6e75 6d62 6572 206f 6620
|
||||
6f70 656e 2066 696c 6520 6465 7363 7269
|
||||
7074 6f72 732e 1801 220b 1209 0900 0000
|
||||
0000 00c0 4043 0a10 7072 6f63 6573 735f
|
||||
6f70 656e 5f66 6473 1220 4e75 6d62 6572
|
||||
206f 6620 6f70 656e 2066 696c 6520 6465
|
||||
7363 7269 7074 6f72 732e 1801 220b 1209
|
||||
0900 0000 0000 003d 404e 0a1d 7072 6f63
|
||||
6573 735f 7265 7369 6465 6e74 5f6d 656d
|
||||
6f72 795f 6279 7465 7312 1e52 6573 6964
|
||||
656e 7420 6d65 6d6f 7279 2073 697a 6520
|
||||
696e 2062 7974 6573 2e18 0122 0b12 0909
|
||||
0000 0000 004b 8841 630a 1a70 726f 6365
|
||||
7373 5f73 7461 7274 5f74 696d 655f 7365
|
||||
636f 6e64 7312 3653 7461 7274 2074 696d
|
||||
6520 6f66 2074 6865 2070 726f 6365 7373
|
||||
2073 696e 6365 2075 6e69 7820 6570 6f63
|
||||
6820 696e 2073 6563 6f6e 6473 2e18 0122
|
||||
0b12 0909 3d0a 172d e831 d541 4c0a 1c70
|
||||
726f 6365 7373 5f76 6972 7475 616c 5f6d
|
||||
656d 6f72 795f 6279 7465 7312 1d56 6972
|
||||
7475 616c 206d 656d 6f72 7920 7369 7a65
|
||||
2069 6e20 6279 7465 732e 1801 220b 1209
|
||||
0900 0000 0020 12c0 415f 0a27 7072 6f6d
|
||||
6574 6865 7573 5f64 6e73 5f73 645f 6c6f
|
||||
6f6b 7570 5f66 6169 6c75 7265 735f 746f
|
||||
7461 6c12 2554 6865 206e 756d 6265 7220
|
||||
6f66 2044 4e53 2d53 4420 6c6f 6f6b 7570
|
||||
2066 6169 6c75 7265 732e 1800 220b 1a09
|
||||
0900 0000 0000 0000 004f 0a1f 7072 6f6d
|
||||
6574 6865 7573 5f64 6e73 5f73 645f 6c6f
|
||||
6f6b 7570 735f 746f 7461 6c12 1d54 6865
|
||||
206e 756d 6265 7220 6f66 2044 4e53 2d53
|
||||
4420 6c6f 6f6b 7570 732e 1800 220b 1a09
|
||||
0900 0000 0000 0008 40cf 010a 2a70 726f
|
||||
6d65 7468 6575 735f 6576 616c 7561 746f
|
||||
725f 6475 7261 7469 6f6e 5f6d 696c 6c69
|
||||
7365 636f 6e64 7312 2c54 6865 2064 7572
|
||||
6174 696f 6e20 666f 7220 616c 6c20 6576
|
||||
616c 7561 7469 6f6e 7320 746f 2065 7865
|
||||
6375 7465 2e18 0222 7122 6f08 0b11 0000
|
||||
0000 0000 2240 1a12 097b 14ae 47e1 7a84
|
||||
3f11 0000 0000 0000 0000 1a12 099a 9999
|
||||
9999 99a9 3f11 0000 0000 0000 0000 1a12
|
||||
0900 0000 0000 00e0 3f11 0000 0000 0000
|
||||
0000 1a12 09cd cccc cccc ccec 3f11 0000
|
||||
0000 0000 f03f 1a12 09ae 47e1 7a14 aeef
|
||||
3f11 0000 0000 0000 f03f a301 0a39 7072
|
||||
6f6d 6574 6865 7573 5f6c 6f63 616c 5f73
|
||||
746f 7261 6765 5f63 6865 636b 706f 696e
|
||||
745f 6475 7261 7469 6f6e 5f6d 696c 6c69
|
||||
7365 636f 6e64 7312 5754 6865 2064 7572
|
||||
6174 696f 6e20 2869 6e20 6d69 6c6c 6973
|
||||
6563 6f6e 6473 2920 6974 2074 6f6f 6b20
|
||||
746f 2063 6865 636b 706f 696e 7420 696e
|
||||
2d6d 656d 6f72 7920 6d65 7472 6963 7320
|
||||
616e 6420 6865 6164 2063 6875 6e6b 732e
|
||||
1801 220b 1209 0900 0000 0000 0000 00f2
|
||||
010a 2870 726f 6d65 7468 6575 735f 6c6f
|
||||
6361 6c5f 7374 6f72 6167 655f 6368 756e
|
||||
6b5f 6f70 735f 746f 7461 6c12 3354 6865
|
||||
2074 6f74 616c 206e 756d 6265 7220 6f66
|
||||
2063 6875 6e6b 206f 7065 7261 7469 6f6e
|
||||
7320 6279 2074 6865 6972 2074 7970 652e
|
||||
1800 221b 0a0e 0a04 7479 7065 1206 6372
|
||||
6561 7465 1a09 0900 0000 0000 b880 4022
|
||||
1c0a 0f0a 0474 7970 6512 0770 6572 7369
|
||||
7374 1a09 0900 0000 0000 c05b 4022 180a
|
||||
0b0a 0474 7970 6512 0370 696e 1a09 0900
|
||||
0000 0000 807b 4022 1e0a 110a 0474 7970
|
||||
6512 0974 7261 6e73 636f 6465 1a09 0900
|
||||
0000 0000 a06b 4022 1a0a 0d0a 0474 7970
|
||||
6512 0575 6e70 696e 1a09 0900 0000 0000
|
||||
807b 40c4 010a 3c70 726f 6d65 7468 6575
|
||||
735f 6c6f 6361 6c5f 7374 6f72 6167 655f
|
||||
696e 6465 7869 6e67 5f62 6174 6368 5f6c
|
||||
6174 656e 6379 5f6d 696c 6c69 7365 636f
|
||||
6e64 7312 3751 7561 6e74 696c 6573 2066
|
||||
6f72 2062 6174 6368 2069 6e64 6578 696e
|
||||
6720 6c61 7465 6e63 6965 7320 696e 206d
|
||||
696c 6c69 7365 636f 6e64 732e 1802 2249
|
||||
2247 0801 1100 0000 0000 0000 001a 1209
|
||||
0000 0000 0000 e03f 1100 0000 0000 0000
|
||||
001a 1209 cdcc cccc cccc ec3f 1100 0000
|
||||
0000 0000 001a 1209 ae47 e17a 14ae ef3f
|
||||
1100 0000 0000 0000 00bf 010a 2d70 726f
|
||||
6d65 7468 6575 735f 6c6f 6361 6c5f 7374
|
||||
6f72 6167 655f 696e 6465 7869 6e67 5f62
|
||||
6174 6368 5f73 697a 6573 1241 5175 616e
|
||||
7469 6c65 7320 666f 7220 696e 6465 7869
|
||||
6e67 2062 6174 6368 2073 697a 6573 2028
|
||||
6e75 6d62 6572 206f 6620 6d65 7472 6963
|
||||
7320 7065 7220 6261 7463 6829 2e18 0222
|
||||
4922 4708 0111 0000 0000 0000 0040 1a12
|
||||
0900 0000 0000 00e0 3f11 0000 0000 0000
|
||||
0040 1a12 09cd cccc cccc ccec 3f11 0000
|
||||
0000 0000 0040 1a12 09ae 47e1 7a14 aeef
|
||||
3f11 0000 0000 0000 0040 660a 3070 726f
|
||||
6d65 7468 6575 735f 6c6f 6361 6c5f 7374
|
||||
6f72 6167 655f 696e 6465 7869 6e67 5f71
|
||||
7565 7565 5f63 6170 6163 6974 7912 2354
|
||||
6865 2063 6170 6163 6974 7920 6f66 2074
|
||||
6865 2069 6e64 6578 696e 6720 7175 6575
|
||||
652e 1801 220b 1209 0900 0000 0000 00d0
|
||||
406d 0a2e 7072 6f6d 6574 6865 7573 5f6c
|
||||
6f63 616c 5f73 746f 7261 6765 5f69 6e64
|
||||
6578 696e 675f 7175 6575 655f 6c65 6e67
|
||||
7468 122c 5468 6520 6e75 6d62 6572 206f
|
||||
6620 6d65 7472 6963 7320 7761 6974 696e
|
||||
6720 746f 2062 6520 696e 6465 7865 642e
|
||||
1801 220b 1209 0900 0000 0000 0000 0067
|
||||
0a2f 7072 6f6d 6574 6865 7573 5f6c 6f63
|
||||
616c 5f73 746f 7261 6765 5f69 6e67 6573
|
||||
7465 645f 7361 6d70 6c65 735f 746f 7461
|
||||
6c12 2554 6865 2074 6f74 616c 206e 756d
|
||||
6265 7220 6f66 2073 616d 706c 6573 2069
|
||||
6e67 6573 7465 642e 1800 220b 1a09 0900
|
||||
0000 0080 27cd 40c3 010a 3770 726f 6d65
|
||||
7468 6575 735f 6c6f 6361 6c5f 7374 6f72
|
||||
6167 655f 696e 7661 6c69 645f 7072 656c
|
||||
6f61 645f 7265 7175 6573 7473 5f74 6f74
|
||||
616c 1279 5468 6520 746f 7461 6c20 6e75
|
||||
6d62 6572 206f 6620 7072 656c 6f61 6420
|
||||
7265 7175 6573 7473 2072 6566 6572 7269
|
||||
6e67 2074 6f20 6120 6e6f 6e2d 6578 6973
|
||||
7465 6e74 2073 6572 6965 732e 2054 6869
|
||||
7320 6973 2061 6e20 696e 6469 6361 7469
|
||||
6f6e 206f 6620 6f75 7464 6174 6564 206c
|
||||
6162 656c 2069 6e64 6578 6573 2e18 0022
|
||||
0b1a 0909 0000 0000 0000 0000 6f0a 2a70
|
||||
726f 6d65 7468 6575 735f 6c6f 6361 6c5f
|
||||
7374 6f72 6167 655f 6d65 6d6f 7279 5f63
|
||||
6875 6e6b 6465 7363 7312 3254 6865 2063
|
||||
7572 7265 6e74 206e 756d 6265 7220 6f66
|
||||
2063 6875 6e6b 2064 6573 6372 6970 746f
|
||||
7273 2069 6e20 6d65 6d6f 7279 2e18 0122
|
||||
0b12 0909 0000 0000 0020 8f40 9c01 0a26
|
||||
7072 6f6d 6574 6865 7573 5f6c 6f63 616c
|
||||
5f73 746f 7261 6765 5f6d 656d 6f72 795f
|
||||
6368 756e 6b73 1263 5468 6520 6375 7272
|
||||
656e 7420 6e75 6d62 6572 206f 6620 6368
|
||||
756e 6b73 2069 6e20 6d65 6d6f 7279 2c20
|
||||
6578 636c 7564 696e 6720 636c 6f6e 6564
|
||||
2063 6875 6e6b 7320 2869 2e65 2e20 6368
|
||||
756e 6b73 2077 6974 686f 7574 2061 2064
|
||||
6573 6372 6970 746f 7229 2e18 0122 0b12
|
||||
0909 0000 0000 00e8 8d40 600a 2670 726f
|
||||
6d65 7468 6575 735f 6c6f 6361 6c5f 7374
|
||||
6f72 6167 655f 6d65 6d6f 7279 5f73 6572
|
||||
6965 7312 2754 6865 2063 7572 7265 6e74
|
||||
206e 756d 6265 7220 6f66 2073 6572 6965
|
||||
7320 696e 206d 656d 6f72 792e 1801 220b
|
||||
1209 0900 0000 0000 807a 40b7 010a 3570
|
||||
726f 6d65 7468 6575 735f 6c6f 6361 6c5f
|
||||
7374 6f72 6167 655f 7065 7273 6973 745f
|
||||
6c61 7465 6e63 795f 6d69 6372 6f73 6563
|
||||
6f6e 6473 1231 4120 7375 6d6d 6172 7920
|
||||
6f66 206c 6174 656e 6369 6573 2066 6f72
|
||||
2070 6572 7369 7374 696e 6720 6561 6368
|
||||
2063 6875 6e6b 2e18 0222 4922 4708 6f11
|
||||
1c2f dd24 e68c cc40 1a12 0900 0000 0000
|
||||
00e0 3f11 8d97 6e12 8360 3e40 1a12 09cd
|
||||
cccc cccc ccec 3f11 0ad7 a370 3d62 6b40
|
||||
1a12 09ae 47e1 7a14 aeef 3f11 7b14 ae47
|
||||
e1b6 7240 6a0a 2f70 726f 6d65 7468 6575
|
||||
735f 6c6f 6361 6c5f 7374 6f72 6167 655f
|
||||
7065 7273 6973 745f 7175 6575 655f 6361
|
||||
7061 6369 7479 1228 5468 6520 746f 7461
|
||||
6c20 6361 7061 6369 7479 206f 6620 7468
|
||||
6520 7065 7273 6973 7420 7175 6575 652e
|
||||
1801 220b 1209 0900 0000 0000 0090 407a
|
||||
0a2d 7072 6f6d 6574 6865 7573 5f6c 6f63
|
||||
616c 5f73 746f 7261 6765 5f70 6572 7369
|
||||
7374 5f71 7565 7565 5f6c 656e 6774 6812
|
||||
3a54 6865 2063 7572 7265 6e74 206e 756d
|
||||
6265 7220 6f66 2063 6875 6e6b 7320 7761
|
||||
6974 696e 6720 696e 2074 6865 2070 6572
|
||||
7369 7374 2071 7565 7565 2e18 0122 0b12
|
||||
0909 0000 0000 0000 0000 ac01 0a29 7072
|
||||
6f6d 6574 6865 7573 5f6c 6f63 616c 5f73
|
||||
746f 7261 6765 5f73 6572 6965 735f 6f70
|
||||
735f 746f 7461 6c12 3454 6865 2074 6f74
|
||||
616c 206e 756d 6265 7220 6f66 2073 6572
|
||||
6965 7320 6f70 6572 6174 696f 6e73 2062
|
||||
7920 7468 6569 7220 7479 7065 2e18 0022
|
||||
1b0a 0e0a 0474 7970 6512 0663 7265 6174
|
||||
651a 0909 0000 0000 0000 0040 222a 0a1d
|
||||
0a04 7479 7065 1215 6d61 696e 7465 6e61
|
||||
6e63 655f 696e 5f6d 656d 6f72 791a 0909
|
||||
0000 0000 0000 1440 d601 0a2d 7072 6f6d
|
||||
6574 6865 7573 5f6e 6f74 6966 6963 6174
|
||||
696f 6e73 5f6c 6174 656e 6379 5f6d 696c
|
||||
6c69 7365 636f 6e64 7312 584c 6174 656e
|
||||
6379 2071 7561 6e74 696c 6573 2066 6f72
|
||||
2073 656e 6469 6e67 2061 6c65 7274 206e
|
||||
6f74 6966 6963 6174 696f 6e73 2028 6e6f
|
||||
7420 696e 636c 7564 696e 6720 6472 6f70
|
||||
7065 6420 6e6f 7469 6669 6361 7469 6f6e
|
||||
7329 2e18 0222 4922 4708 0011 0000 0000
|
||||
0000 0000 1a12 0900 0000 0000 00e0 3f11
|
||||
0000 0000 0000 0000 1a12 09cd cccc cccc
|
||||
ccec 3f11 0000 0000 0000 0000 1a12 09ae
|
||||
47e1 7a14 aeef 3f11 0000 0000 0000 0000
|
||||
680a 2770 726f 6d65 7468 6575 735f 6e6f
|
||||
7469 6669 6361 7469 6f6e 735f 7175 6575
|
||||
655f 6361 7061 6369 7479 122e 5468 6520
|
||||
6361 7061 6369 7479 206f 6620 7468 6520
|
||||
616c 6572 7420 6e6f 7469 6669 6361 7469
|
||||
6f6e 7320 7175 6575 652e 1801 220b 1209
|
||||
0900 0000 0000 0059 4067 0a25 7072 6f6d
|
||||
6574 6865 7573 5f6e 6f74 6966 6963 6174
|
||||
696f 6e73 5f71 7565 7565 5f6c 656e 6774
|
||||
6812 2f54 6865 206e 756d 6265 7220 6f66
|
||||
2061 6c65 7274 206e 6f74 6966 6963 6174
|
||||
696f 6e73 2069 6e20 7468 6520 7175 6575
|
||||
652e 1801 220b 1209 0900 0000 0000 0000
|
||||
009e 020a 3070 726f 6d65 7468 6575 735f
|
||||
7275 6c65 5f65 7661 6c75 6174 696f 6e5f
|
||||
6475 7261 7469 6f6e 5f6d 696c 6c69 7365
|
||||
636f 6e64 7312 2354 6865 2064 7572 6174
|
||||
696f 6e20 666f 7220 6120 7275 6c65 2074
|
||||
6f20 6578 6563 7574 652e 1802 2260 0a15
|
||||
0a09 7275 6c65 5f74 7970 6512 0861 6c65
|
||||
7274 696e 6722 4708 3711 0000 0000 0000
|
||||
2840 1a12 0900 0000 0000 00e0 3f11 0000
|
||||
0000 0000 0000 1a12 09cd cccc cccc ccec
|
||||
3f11 0000 0000 0000 0000 1a12 09ae 47e1
|
||||
7a14 aeef 3f11 0000 0000 0000 0840 2261
|
||||
0a16 0a09 7275 6c65 5f74 7970 6512 0972
|
||||
6563 6f72 6469 6e67 2247 0837 1100 0000
|
||||
0000 002e 401a 1209 0000 0000 0000 e03f
|
||||
1100 0000 0000 0000 001a 1209 cdcc cccc
|
||||
cccc ec3f 1100 0000 0000 0000 001a 1209
|
||||
ae47 e17a 14ae ef3f 1100 0000 0000 0008
|
||||
4069 0a29 7072 6f6d 6574 6865 7573 5f72
|
||||
756c 655f 6576 616c 7561 7469 6f6e 5f66
|
||||
6169 6c75 7265 735f 746f 7461 6c12 2d54
|
||||
6865 2074 6f74 616c 206e 756d 6265 7220
|
||||
6f66 2072 756c 6520 6576 616c 7561 7469
|
||||
6f6e 2066 6169 6c75 7265 732e 1800 220b
|
||||
1a09 0900 0000 0000 0000 0060 0a21 7072
|
||||
6f6d 6574 6865 7573 5f73 616d 706c 6573
|
||||
5f71 7565 7565 5f63 6170 6163 6974 7912
|
||||
2c43 6170 6163 6974 7920 6f66 2074 6865
|
||||
2071 7565 7565 2066 6f72 2075 6e77 7269
|
||||
7474 656e 2073 616d 706c 6573 2e18 0122
|
||||
0b12 0909 0000 0000 0000 b040 da01 0a1f
|
||||
7072 6f6d 6574 6865 7573 5f73 616d 706c
|
||||
6573 5f71 7565 7565 5f6c 656e 6774 6812
|
||||
a701 4375 7272 656e 7420 6e75 6d62 6572
|
||||
206f 6620 6974 656d 7320 696e 2074 6865
|
||||
2071 7565 7565 2066 6f72 2075 6e77 7269
|
||||
7474 656e 2073 616d 706c 6573 2e20 4561
|
||||
6368 2069 7465 6d20 636f 6d70 7269 7365
|
||||
7320 616c 6c20 7361 6d70 6c65 7320 6578
|
||||
706f 7365 6420 6279 206f 6e65 2074 6172
|
||||
6765 7420 6173 206f 6e65 206d 6574 7269
|
||||
6320 6661 6d69 6c79 2028 692e 652e 206d
|
||||
6574 7269 6373 206f 6620 7468 6520 7361
|
||||
6d65 206e 616d 6529 2e18 0122 0b12 0909
|
||||
0000 0000 0000 0000 d902 0a29 7072 6f6d
|
||||
6574 6865 7573 5f74 6172 6765 745f 696e
|
||||
7465 7276 616c 5f6c 656e 6774 685f 7365
|
||||
636f 6e64 7312 2141 6374 7561 6c20 696e
|
||||
7465 7276 616c 7320 6265 7477 6565 6e20
|
||||
7363 7261 7065 732e 1802 2282 010a 0f0a
|
||||
0869 6e74 6572 7661 6c12 0331 3573 226f
|
||||
0804 1100 0000 0000 804d 401a 1209 7b14
|
||||
ae47 e17a 843f 1100 0000 0000 002c 401a
|
||||
1209 9a99 9999 9999 a93f 1100 0000 0000
|
||||
002c 401a 1209 0000 0000 0000 e03f 1100
|
||||
0000 0000 002e 401a 1209 cdcc cccc cccc
|
||||
ec3f 1100 0000 0000 002e 401a 1209 ae47
|
||||
e17a 14ae ef3f 1100 0000 0000 002e 4022
|
||||
8101 0a0e 0a08 696e 7465 7276 616c 1202
|
||||
3173 226f 083a 1100 0000 0000 003c 401a
|
||||
1209 7b14 ae47 e17a 843f 1100 0000 0000
|
||||
0000 001a 1209 9a99 9999 9999 a93f 1100
|
||||
0000 0000 0000 001a 1209 0000 0000 0000
|
||||
e03f 1100 0000 0000 0000 001a 1209 cdcc
|
||||
cccc cccc ec3f 1100 0000 0000 00f0 3f1a
|
||||
1209 ae47 e17a 14ae ef3f 1100 0000 0000
|
||||
00f0 3f
|
||||
129
vendor/github.com/prometheus/common/expfmt/testdata/protobuf.gz
generated
vendored
Normal file
129
vendor/github.com/prometheus/common/expfmt/testdata/protobuf.gz
generated
vendored
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
1f8b 0808 efa0 c754 0003 7072 6f74 6f62
|
||||
7566 00ed 594d 8c1c c515 9eb1 8d3d 5b86
|
||||
6037 265e 8c4d ca03 c4bb ceee cc9a 9f58
|
||||
01cc f6ca 4424 041b 8837 21c8 24ed daee
|
||||
9a99 cef6 1f55 d578 c7e4 b004 0e39 8088
|
||||
8448 048a 124b 4442 9110 e110 25b9 c54a
|
||||
9072 01c5 9724 4a24 2472 413e 448a 8592
|
||||
1b87 bcea aeda eeea 99d9 3530 49a4 68e7
|
||||
b0bb 5355 fdde abf7 bef7 bdf7 7a3f 6ca0
|
||||
664f 88c4 61f4 8994 72e1 7829 23c2 8f23
|
||||
27f4 5d16 73ea c691 c7ad cf2d f628 fed2
|
||||
e2e2 c358 9dc3 0111 3472 7dca b11f e1f2
|
||||
d9d6 e496 e6a3 e86a b4a3 4722 2fa0 ccaa
|
||||
b79b f737 6abb 6bea b3cf 9ac8 ff78 6fbe
|
||||
bcf6 cedb f2f3 7763 ed8d fbff 766e cf1b
|
||||
ff28 d69a df44 5621 7847 9bc0 2fc1 c727
|
||||
7e09 ed2d c45f dd26 89df 0ea9 60be 3b46
|
||||
1d67 d0f5 850e 94e9 008f b2fe f834 74d0
|
||||
8d85 865d 8506 8791 a84b ffa3 de12 8475
|
||||
e938 2352 f116 208c c701 e563 84d4 e368
|
||||
77a1 617b bbcb 48d2 1b9f f4d3 6857 21fd
|
||||
aa76 8f92 647c c2bf 85ae 2b84 37da 5c40
|
||||
e6ba 6374 8de9 fc84 c590 0c3d 9aca f0de
|
||||
bdfb f40b bffd 5763 fe9f 7659 8314 f0fb
|
||||
9fbf 6897 35b4 dfbd 65fb d397 7f60 9735
|
||||
1c43 7f7e f5cd 975e b3df 6fa0 bd06 fb70
|
||||
ff1c 7596 fa82 720b 0f50 8edc cce8 263b
|
||||
b0c9 339b 3cb3 c933 5afa ff2f cfc8 13f6
|
||||
5b17 ed01 0d73 cc1e d090 af99 1a60 ed3b
|
||||
e8ba 32cd 7047 c482 04d6 cd8b f217 8ed2
|
||||
7089 321c 770c bae1 3824 1e6d 4dd6 9af7
|
||||
a29d 689b 1b7b d4da 7adb dcdc 085b d135
|
||||
68bb fc33 f6ac ad00 cd7d 13b9 b5ab 27ec
|
||||
4b0d 34a9 b4f3 0470 45cb 2c77 b0c4 72f9
|
||||
ee26 cd7d 02ec 6cd2 dc26 cd7d 6ce1 ff73
|
||||
9a7b ef17 1f0e d2dc 1d3f 19a4 b9c6 f941
|
||||
9a43 e7ed c7d1 0d20 d5a5 9c3b 6e92 3a6a
|
||||
2053 6437 9793 5dca 81ea c006 ccfb 5cd0
|
||||
101f 7ff8 6b58 f821 d04e 4223 2169 676d
|
||||
8eab 3577 028d fd34 91dd dac5 f987 90a5
|
||||
8577 6316 a7c2 8f80 bf0e 9f5c 23cf 6215
|
||||
8b1e 11d8 4d19 0391 411f d315 9f8b d664
|
||||
bdb9 d352 b458 7bc4 7e00 5dab e585 64c5
|
||||
e9c0 9439 7582 acf8 611a 9618 3906 ab70
|
||||
c70f 28f6 2877 999f 8898 7153 d405 fb38
|
||||
daa5 45c9 f399 2c7c f2a3 c838 669f 4407
|
||||
b40c 6062 df03 cb9d 9086 31e4 79ce d437
|
||||
7d55 2de3 7c39 e3e9 124d 97c4 7de5 7b0b
|
||||
2eda a7c5 018e 9870 a48f 7544 accf 9f92
|
||||
6bb9 dfc1 4040 0156 a741 6ae4 529c 46fe
|
||||
0aa6 49ec f68c 88e4 3a8e a1bd b397 8efc
|
||||
71e1 41b4 5feb 78d2 6722 2581 69f1 81af
|
||||
e7ab 1b1a 8cad 0b0b 0e3a 5420 d2f1 22b0
|
||||
db73 8238 5e4e 13a7 43fc 2005 af28 24dd
|
||||
2a6b 5611 a2fb 4e9e 9a3d 751f cecf 627d
|
||||
56c3 47a3 ff21 f499 51f2 b5dc 03eb c8ad
|
||||
c86b d87f a8a3 c325 81f4 4912 a404 025b
|
||||
7e81 1104 bef6 f88c 94ad b770 2786 1c08
|
||||
02ac 9e82 25c0 6c0c 38a5 6e2a a82c b94f
|
||||
34e3 c64e 95ba 4d99 6c4f ed91 e9f6 ac91
|
||||
e2af bc2c 3f3f 9bff 88f4 7079 7e90 1e2e
|
||||
cfbf 5a47 5f28 5d28 885d 8827 871b 912e
|
||||
75dc 1e75 9793 d88f c488 fb3d 6adc 6f2a
|
||||
7b27 536c 4f63 1fd0 068e 94b7 2c64 0118
|
||||
6615 3654 5dce 9801 58d5 8353 69b4 5cc9
|
||||
925a ed83 3a9a 5ac7 4878 0432 50c7 f376
|
||||
6993 a8b4 58d9 2199 924c f97d a92f f1ef
|
||||
332c fa49 d66e dd88 3e85 b6c9 2fd6 7697
|
||||
5122 a88e faaf 57ed e67e 74ad dadc 0122
|
||||
38f0 8ade bd70 da6e 4eca 4e2d dbdd 9af8
|
||||
d15a 0ff6 94dd bc09 ca52 be33 21a0 6e73
|
||||
d9ce e9fd f3cb 7673 1ff4 6ff9 fe55 6964
|
||||
3efb 561d dd33 f2ce 7ee4 01bb 455d 6789
|
||||
08b7 e7e4 6fc5 fa66 6c8e 3e92 9248 00ff
|
||||
f00c 78d9 49ac 1fac be48 2b9e 9330 fc32
|
||||
d486 fa58 aacf 6fea 68f6 4a6f 9175 a0d6
|
||||
8269 f69a c1b9 fd79 973a 5504 5623 08c2
|
||||
921f 991e b8c0 6071 cbd7 aa17 182c 6eb0
|
||||
d641 731b db0f 8d59 0a40 2409 717d d187
|
||||
061f 10a8 bf69 a65d bb48 76d8 44f8 453b
|
||||
44ad 2b55 13d0 a82b 7a39 b50c fae1 2cf1
|
||||
85d4 0219 b7a4 9452 af9a 4f5d d45e 475b
|
||||
17c6 10ea 399c 8449 60b2 6f35 abd4 11ac
|
||||
9f29 b3e5 eaa1 77ec dfd5 d1d1 7514 010d
|
||||
fa9e 9330 1ac4 c4ab 4e49 fd61 0ad5 d962
|
||||
5862 b443 1953 1726 388a a3d9 acec cb82
|
||||
092d 07e0 bb85 177b 3e98 2849 46fa c377
|
||||
73b2 9215 3a15 1ea4 8107 c9b0 4403 e5ac
|
||||
8112 121b 8c6f de41 15be 8c5d 6495 e7d6
|
||||
6d59 ecf3 1e64 807f 4a8d 4096 76d9 d346
|
||||
70f0 0bf6 8fea e8b3 57a4 905b ee3a ca4a
|
||||
1a66 a0c4 b841 ea49 37b9 411c 51cd b3c0
|
||||
d82d dad2 5fce fa30 47a6 02dc 58d8 396d
|
||||
5877 e979 fbcc c6c6 e57e b70e 0d37 2edf
|
||||
1d71 fdd5 73f6 afea e8ce 911a 14f9 9608
|
||||
aff4 df82 230b 98a7 6148 5896 7305 c149
|
||||
1a51 0f4a 0f50 023c 925d 5933 45bc 7b7f
|
||||
fbdd 5bde 7fee 6d83 299e ff61 643d 73e6
|
||||
5e83 29a0 254d 8e2d 2d1b 4c91 95e8 5f32
|
||||
fbdb eb24 95b6 bb42 1453 05c6 ab74 a19e
|
||||
18c6 16df b7cf ad43 aaa6 2a45 1677 ad0b
|
||||
14cd 1910 930d 54d7 6aaf d7d1 f448 dd79
|
||||
6c4b b5f8 8ea1 ac91 23e0 6315 6360 e4e6
|
||||
6174 406d 5e1f 12e8 2768 44a0 7905 3e51
|
||||
005c 3bbb c7fe 9359 7ea2 58f8 1d45 007c
|
||||
78d5 fcc6 83f9 2adc be5c 8638 8db2 f4c9
|
||||
de55 6043 0e54 a358 f634 3ac3 3c16 2709
|
||||
a498 7168 ad2a 8d67 a8eb 196d b379 ad0a
|
||||
c65a c38a d1b0 6b0c 09f7 6376 17dd ba81
|
||||
2285 b0b6 598e 8629 50f0 1a0a ab1f 6f31
|
||||
ea2c 4b03 ea14 6df2 88ee f3e6 c1ee 1acb
|
||||
272b 4db5 1c80 2732 8919 681a 996d 1029
|
||||
88c6 51e5 d1a9 613d c215 46a3 6137 09fa
|
||||
7459 c304 0303 9967 aa68 7d22 15be 9175
|
||||
55f7 5426 a5d9 6159 9739 a678 66e4 c474
|
||||
061d 2c69 d24d 4005 5433 c72b 80ca f6b3
|
||||
10a4 d159 e60b c821 dd1d 98a1 7ed3 fe6b
|
||||
dd98 c94c 0d0a 4daf d58f 0f90 952f 6868
|
||||
8268 843e fc45 c9f0 f238 76e3 3061 8017
|
||||
9ecd 5dba 5da1 2b09 140d 4fd2 0e14 439c
|
||||
bfee c284 67df f246 0adc 0350 ebab 02a9
|
||||
9b2b 7559 9003 5887 1fd3 5518 ff65 8b11
|
||||
a75c b223 398a 81e7 d5ed d6e6 f183 0b6e
|
||||
3628 eb7d 2042 2ace 5279 1597 9124 7f0b
|
||||
fbdd 3acc 1e0d 7dc4 da7a e44e 0e43 e2b6
|
||||
1c19 ab27 860c 8933 f6e0 9038 3304 7dad
|
||||
214d 706b 4813 dcb2 9b4f d781 900b 23b6
|
||||
1c91 36dc a5f6 eff9 af0c aaff 06f1 48e5
|
||||
4433 2000 00
|
||||
163
vendor/github.com/prometheus/common/expfmt/testdata/test.gz
generated
vendored
Normal file
163
vendor/github.com/prometheus/common/expfmt/testdata/test.gz
generated
vendored
Normal file
|
|
@ -0,0 +1,163 @@
|
|||
1f8b 0808 2aa1 c754 0003 7465 7874 00b5
|
||||
5b5d 939b 3816 7def 5fa1 ea79 99a9 4d3c
|
||||
601b db3c f4c3 5426 55f3 309b ca6e 7ab7
|
||||
6a9e 281a d436 150c 04c4 a4bd 5df3 dff7
|
||||
4a88 361f 025d 094f 1e92 34e8 1cae 8ea4
|
||||
ab7b 04fd 03f9 ede3 ef9f c989 b122 28e9
|
||||
b79a 562c 88eb 3264 499e 05e7 242a f38a
|
||||
4679 1657 e4f1 44c9 6f8f 8f9f 896c 46d2
|
||||
90d1 2c4a 6845 928c 749b aeee 7e20 8f7f
|
||||
7cfe 8861 adea f339 2c2f 77fa a6af a730
|
||||
8b53 5a3e dcff 7cff ee5b 1d66 2c49 e9c3
|
||||
bdb3 f2ee ff22 ce12 027f 3101 9621 80ee
|
||||
7659 90a8 28af 3366 8eeb 2042 f887 558b
|
||||
7553 d158 a8a7 a4b1 d450 7259 2a69 84ee
|
||||
e28a e4e7 3365 6512 dd40 d429 2e1b 6527
|
||||
b96c e5ed 10da 6a6c 4c31 0043 cbf2 7213
|
||||
9915 4c96 22ab 9816 48dc d02d 10d8 8440
|
||||
050d ca30 3bd2 db89 ace2 5b22 b592 6fa9
|
||||
e092 74a9 ec46 3403 0216 9647 7a8b cc3c
|
||||
c565 29ba 9a6b 81e0 2de1 02b1 cd28 3a60
|
||||
f8b9 ca53 5a2d 2f1c 2698 2c44 9e62 b294
|
||||
f84a 6729 b029 4107 7a2c c3e2 b458 5a05
|
||||
8b85 ac2a 164b 491b 2a4b 394d c01d d889
|
||||
86c5 6225 c724 1642 2a48 2c75 144c 9632
|
||||
1a60 3ba8 8ac1 ed68 f96a 57f2 5868 a9e6
|
||||
b194 b325 b354 d40c 7e05 1665 0e45 dc89
|
||||
d68a bdca dd38 fbd5 7aef dd84 90cb e21e
|
||||
bcc3 6ab7 59df 8690 336e 9cc3 7eb5 396c
|
||||
8df5 eeb0 425c 7bff 70d8 ad3c 47fe 712d
|
||||
46a0 4fe8 fa60 96c7 16bc 4afe 4783 a70b
|
||||
a30a dfcd ef09 cf2d eeab cd76 07af 74d8
|
||||
d7fb 26b6 1a81 524c 6a0c 6a16 a675 cd9d
|
||||
a67a abac 0c07 e98f d158 ac0c 5827 3c29
|
||||
c694 819d 9144 0fb1 34ba 6604 6889 4c2c
|
||||
edb4 4e73 2674 4e2c 1cce cab1 9ac0 4dd4
|
||||
427a d359 ad26 fca4 4629 2d6a 81f5 3427
|
||||
31d6 0c6b 32f5 ca4d 5942 8c7e 7aac a587
|
||||
3423 3051 0fed 1667 959b f477 1ad5 1038
|
||||
2b33 6802 c7aa 6560 fb26 b59a b16a 334a
|
||||
a150 c6ae 0e0b c5ea 83f4 6f93 da4c f8ae
|
||||
195d b408 537b 8644 6215 c119 b149 41d4
|
||||
0e6a 460f 1dc0 c267 e1c1 5851 d08e 6a52
|
||||
9749 1f34 230d 0283 334c 6bdf b527 f017
|
||||
1368 1866 0cd0 66bb 3d1c b07a 619c 4e15
|
||||
b09c 8529 7914 7f67 f5f9 8996 247f ee39
|
||||
9e8a 9cc3 982a 8d4e 0b17 4fa6 e59d e2de
|
||||
6b94 c7d0 edb5 e3dc bf53 4ac3 ff93 c70f
|
||||
f7b0 8728 e3ac 0ac8 9c74 c292 3537 359e
|
||||
6ccc 3030 65a3 0638 5786 87f9 96b0 79dc
|
||||
8c31 1bb7 9d73 6673 1169 ad99 2918 ad85
|
||||
de9c e914 195b 2dbd 2e08 8cb1 3fb3 62c0
|
||||
eb84 7368 5ab1 d456 0ba1 1812 6868 d22c
|
||||
f046 9269 6d1a 46b0 91e3 c2c9 a587 5939
|
||||
356b 1673 e1f4 5e0d 2ddf d870 1988 8800
|
||||
1bdb 352b 0623 0911 860d 239f c279 e1a4
|
||||
c300 0d3d 9b05 1e2d 19ca b5e9 0453 1a30
|
||||
bd5c 3898 8171 33c4 a245 d25a 379d 4023
|
||||
27a6 1747 0fc1 bb37 3328 5a16 9d7f d3a9
|
||||
32f4 637a 51b4 0823 0b67 8c46 2b83 3071
|
||||
3a71 148e 4caf 0f06 84f4 71ce d65f 4021
|
||||
7c98 e31d 9650 341c bb2d 52b1 9e27 5b6f
|
||||
f79d 7758 5ae1 a6fc 1c5c 8f68 05cd 8b3a
|
||||
685f 7a75 5d5d 5d81 a703 1252 5d2a 46cf
|
||||
e4c3 e7ff 1096 9cc1 3515 3463 dc35 0d3f
|
||||
1c9d 666c 8dde 740b 1819 6f18 d931 2ff3
|
||||
9a25 1938 af4f 6f16 b373 919d 4246 a2ba
|
||||
2c21 9ef4 42e8 4b52 b151 309d f6c7 b03e
|
||||
d23b c58d bd33 7cf4 397c 099e e38a fc33
|
||||
7c49 cef5 b963 7173 e83d 7986 7124 31ad
|
||||
a232 2958 5e8e 2568 f1fd 47b6 570f aebf
|
||||
1e3e 91f3 8a9b 9f0c 1ff5 06ec 3feb edf2
|
||||
7a34 e230 6992 1834 0bce f49c 432d d498
|
||||
db7f cbab a4b9 2acc f1d8 1bcf 73f4 4350
|
||||
b7f1 569b c3de f1fc 35fd 87b3 1f86 068b
|
||||
bc64 019f 66ed fc20 5ff8 a566 e681 2630
|
||||
91db c610 6116 5152 67c9 0ba1 451e 9de6
|
||||
e6a4 82b8 1fac a281 bbda aed7 9bdd c1df
|
||||
1e36 3b88 7624 e49f 49c9 ea30 edf7 efbf
|
||||
cd45 9c8c 4a86 7e60 ca26 de6a eb6e f707
|
||||
dfe5 2a1e 3a71 c9a5 1ec4 1974 290e d23c
|
||||
ff5a 17c1 7398 a435 0c47 bbc0 41c4 eb8c
|
||||
fef5 d397 f75f 7e25 4d53 d236 ed86 8a22
|
||||
edac 7154 7b47 1735 225a 7d94 d8e8 da76
|
||||
7b45 54f4 cf30 ad43 587c dd4f 05d2 34e9
|
||||
7e63 dfde 21cf 3964 cd34 2512 0497 2051
|
||||
e590 9c68 5433 aa8a 5747 df9e 3ae1 21af
|
||||
ddbd c671 c596 698b f696 a017 81c5 2725
|
||||
d660 5334 df70 89bb 3641 8839 45d6 1bc5
|
||||
9449 f308 966c 05d8 f048 83e8 44a3 af45
|
||||
9e64 0c33 837e 14bf 9871 bdfb 1349 20ff
|
||||
c12c e5f3 e84a 0549 e5bd cc31 f218 45ec
|
||||
d650 46c6 d0aa cebe 2a17 8761 606f a9c8
|
||||
12af 5ae4 430a 0815 76ab ee6a 6783 6365
|
||||
d186 6f87 a55c 504f 17be 1124 2561 9742
|
||||
b9a6 e69f a148 06b3 8057 fe98 87fb a8a4
|
||||
21e3 8706 9e7f 30c5 42ec 1594 27e2 6ba4
|
||||
ad31 38c9 00e8 af1d 5320 2bc3 ace2 27e9
|
||||
00df ba9e 29bc ceae 4fd6 8d63 92c5 5080
|
||||
65c7 e029 64d1 2968 7ecd e8d2 9f0d ff92
|
||||
0bb4 1259 5234 242d 6ef8 8b49 5798 7e7c
|
||||
31cf 5664 5163 92f9 dcb6 8cce bf31 dd72
|
||||
3e91 1117 5234 29d2 359d 3dcd 8b99 fe74
|
||||
799b 28cd bc69 9afc 784d 126d 1284 95d6
|
||||
34f9 c978 e234 9ca6 3345 a046 5363 bd00
|
||||
ef2f c55b 1088 d136 c518 0fef b79a d690
|
||||
6dc2 228c 1276 11c9 feed 0759 ddbf 8db3
|
||||
686b 3086 036e cdd6 3505 7377 fc7b 53c3
|
||||
0ea5 343b b2d3 a052 6d27 e4f7 3061 bc3f
|
||||
b07b 3fc9 eed1 d8b8 5ff2 1166 bd92 204c
|
||||
f63e 5270 f971 5085 e722 a573 9bb1 6c41
|
||||
5a08 a627 4a72 ed2e 3c81 db38 dbbd bee6
|
||||
4a32 a8de 9238 284a 9ae6 613c 7a73 ade8
|
||||
996c 7a7d 815d d267 5a96 72ec 4292 e5d9
|
||||
7b71 c8c0 5d72 454b d8ab 5640 9480 16bc
|
||||
f6e2 439b 444d 0dc7 dd7b cd62 4889 316c
|
||||
6c4f 3495 e38e dacc 6603 47a8 368b d7cf
|
||||
0569 3445 49c0 0f1e 9af2 549e b38c aab2
|
||||
ced1 84d8 b805 58df cbf1 4334 337b 0c70
|
||||
1dcf 37ea cc6c 473a d1bf 03b7 16a5 75cc
|
||||
073e 4af3 8cb6 0535 94e6 2bba 6a7f f89e
|
||||
b013 0c32 4c8c ab06 883d a71f 9141 af79
|
||||
8f11 8598 8434 f373 a2c7 f2a6 f978 4920
|
||||
2e6a d978 bbd6 e753 591e 778a 88ce 6f9b
|
||||
ffd2 6ec9 3cf4 6b99 c88b 0289 e323 4543
|
||||
a80a 8450 fade cc3e 4ebb ffcf a147 75c0
|
||||
c659 6df6 fb1b 9035 47c6 9b95 b7f1 6fc1
|
||||
26e8 76eb dd6a bbdb d8f1 3515 8303 c3bb
|
||||
9af5 16b3 1cb2 82d8 e3a7 88a2 8490 9971
|
||||
5048 4800 b68e 98e0 d74c f509 14ac 54d3
|
||||
1e75 6a88 c914 d596 12b0 7017 f710 5750
|
||||
2831 fa24 d42c 7d8d ad97 f9c1 ded7 8f9e
|
||||
a2dd 1c87 88a1 b39f 2980 27a0 e730 8147
|
||||
6661 16f1 ad57 a63e f1a6 4521 5296 b3e4
|
||||
59d6 0895 daa7 fede 5c24 df7a e6a7 a299
|
||||
d88e c467 46a4 4703 1e28 e787 41ed 8e15
|
||||
9779 51c0 96d5 6ba4 dc97 10d1 2872 a11e
|
||||
356f 930d f123 1f6b 8ab7 2018 3b5f 04a6
|
||||
c964 aaa5 d107 232c 906a 9427 d7f8 2cfb
|
||||
6875 cfb6 761d 6cf8 4ac3 a30a 5b66 2aa3
|
||||
e8a7 32d3 4c5b 55dc 659d d2e0 7a0c 8f3e
|
||||
bc27 1ca8 39b3 c771 2b56 0f0a f82a 5a35
|
||||
f945 880a eb5a f5ae fff6 bca3 c572 2bde
|
||||
d189 048a 58bc 0557 91ff 3538 aac7 b135
|
||||
6fc6 27f8 fa25 8c71 bf4b b854 c67f c340
|
||||
4d10 2f1f a929 62f1 8bb7 8b87 eaca 0eda
|
||||
9a4b 3b1e ab1e a1eb 2116 bce2 ade7 b004
|
||||
114b fd0a 997d fba9 a157 d41e 1a84 2a69
|
||||
b547 1d83 ccfc 61b0 4388 db22 5dd5 d9f7
|
||||
3261 b01f b507 33aa d027 5847 1976 a2dd
|
||||
d6f1 77da 5865 26fe 30aa 5d13 46cf fd8d
|
||||
6022 70f2 915b 38de 1cc4 3c17 25cc 854a
|
||||
bc4b 6d8f 9ce8 4b01 c621 e665 22b8 72d2
|
||||
7c8e 48c2 4afc d41c b7c1 08c2 34ba 48a7
|
||||
de1e c149 d580 07f6 2bf8 4b59 0e29 bba3
|
||||
9168 66fb 69a2 0b78 7558 c214 904d df3e
|
||||
2ef8 2512 5f09 b4b7 a1f6 a5ec 3be5 6a44
|
||||
6558 a887 5143 a9d8 6ee6 11af edf5 877b
|
||||
d71b 7ca2 245e 1bbb db1b 9179 3724 f346
|
||||
19c5 9ecb bf25 9729 9948 997d 42fe 7ad0
|
||||
84a1 c992 238e b55d 8f54 53c0 b90d d568
|
||||
1fb4 a6ba 1dd3 e813 017b 2643 aae1 c8f3
|
||||
41f3 168d 7bf3 71df feee ff2d f9e8 431a
|
||||
5200 00
|
||||
322
vendor/github.com/prometheus/common/expfmt/testdata/text
generated
vendored
Normal file
322
vendor/github.com/prometheus/common/expfmt/testdata/text
generated
vendored
Normal file
|
|
@ -0,0 +1,322 @@
|
|||
# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
|
||||
# TYPE http_request_duration_microseconds summary
|
||||
http_request_duration_microseconds{handler="/",quantile="0.5"} 0
|
||||
http_request_duration_microseconds{handler="/",quantile="0.9"} 0
|
||||
http_request_duration_microseconds{handler="/",quantile="0.99"} 0
|
||||
http_request_duration_microseconds_sum{handler="/"} 0
|
||||
http_request_duration_microseconds_count{handler="/"} 0
|
||||
http_request_duration_microseconds{handler="/alerts",quantile="0.5"} 0
|
||||
http_request_duration_microseconds{handler="/alerts",quantile="0.9"} 0
|
||||
http_request_duration_microseconds{handler="/alerts",quantile="0.99"} 0
|
||||
http_request_duration_microseconds_sum{handler="/alerts"} 0
|
||||
http_request_duration_microseconds_count{handler="/alerts"} 0
|
||||
http_request_duration_microseconds{handler="/api/metrics",quantile="0.5"} 0
|
||||
http_request_duration_microseconds{handler="/api/metrics",quantile="0.9"} 0
|
||||
http_request_duration_microseconds{handler="/api/metrics",quantile="0.99"} 0
|
||||
http_request_duration_microseconds_sum{handler="/api/metrics"} 0
|
||||
http_request_duration_microseconds_count{handler="/api/metrics"} 0
|
||||
http_request_duration_microseconds{handler="/api/query",quantile="0.5"} 0
|
||||
http_request_duration_microseconds{handler="/api/query",quantile="0.9"} 0
|
||||
http_request_duration_microseconds{handler="/api/query",quantile="0.99"} 0
|
||||
http_request_duration_microseconds_sum{handler="/api/query"} 0
|
||||
http_request_duration_microseconds_count{handler="/api/query"} 0
|
||||
http_request_duration_microseconds{handler="/api/query_range",quantile="0.5"} 0
|
||||
http_request_duration_microseconds{handler="/api/query_range",quantile="0.9"} 0
|
||||
http_request_duration_microseconds{handler="/api/query_range",quantile="0.99"} 0
|
||||
http_request_duration_microseconds_sum{handler="/api/query_range"} 0
|
||||
http_request_duration_microseconds_count{handler="/api/query_range"} 0
|
||||
http_request_duration_microseconds{handler="/api/targets",quantile="0.5"} 0
|
||||
http_request_duration_microseconds{handler="/api/targets",quantile="0.9"} 0
|
||||
http_request_duration_microseconds{handler="/api/targets",quantile="0.99"} 0
|
||||
http_request_duration_microseconds_sum{handler="/api/targets"} 0
|
||||
http_request_duration_microseconds_count{handler="/api/targets"} 0
|
||||
http_request_duration_microseconds{handler="/consoles/",quantile="0.5"} 0
|
||||
http_request_duration_microseconds{handler="/consoles/",quantile="0.9"} 0
|
||||
http_request_duration_microseconds{handler="/consoles/",quantile="0.99"} 0
|
||||
http_request_duration_microseconds_sum{handler="/consoles/"} 0
|
||||
http_request_duration_microseconds_count{handler="/consoles/"} 0
|
||||
http_request_duration_microseconds{handler="/graph",quantile="0.5"} 0
|
||||
http_request_duration_microseconds{handler="/graph",quantile="0.9"} 0
|
||||
http_request_duration_microseconds{handler="/graph",quantile="0.99"} 0
|
||||
http_request_duration_microseconds_sum{handler="/graph"} 0
|
||||
http_request_duration_microseconds_count{handler="/graph"} 0
|
||||
http_request_duration_microseconds{handler="/heap",quantile="0.5"} 0
|
||||
http_request_duration_microseconds{handler="/heap",quantile="0.9"} 0
|
||||
http_request_duration_microseconds{handler="/heap",quantile="0.99"} 0
|
||||
http_request_duration_microseconds_sum{handler="/heap"} 0
|
||||
http_request_duration_microseconds_count{handler="/heap"} 0
|
||||
http_request_duration_microseconds{handler="/static/",quantile="0.5"} 0
|
||||
http_request_duration_microseconds{handler="/static/",quantile="0.9"} 0
|
||||
http_request_duration_microseconds{handler="/static/",quantile="0.99"} 0
|
||||
http_request_duration_microseconds_sum{handler="/static/"} 0
|
||||
http_request_duration_microseconds_count{handler="/static/"} 0
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1307.275
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1858.632
|
||||
http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 3087.384
|
||||
http_request_duration_microseconds_sum{handler="prometheus"} 179886.5000000001
|
||||
http_request_duration_microseconds_count{handler="prometheus"} 119
|
||||
# HELP http_request_size_bytes The HTTP request sizes in bytes.
|
||||
# TYPE http_request_size_bytes summary
|
||||
http_request_size_bytes{handler="/",quantile="0.5"} 0
|
||||
http_request_size_bytes{handler="/",quantile="0.9"} 0
|
||||
http_request_size_bytes{handler="/",quantile="0.99"} 0
|
||||
http_request_size_bytes_sum{handler="/"} 0
|
||||
http_request_size_bytes_count{handler="/"} 0
|
||||
http_request_size_bytes{handler="/alerts",quantile="0.5"} 0
|
||||
http_request_size_bytes{handler="/alerts",quantile="0.9"} 0
|
||||
http_request_size_bytes{handler="/alerts",quantile="0.99"} 0
|
||||
http_request_size_bytes_sum{handler="/alerts"} 0
|
||||
http_request_size_bytes_count{handler="/alerts"} 0
|
||||
http_request_size_bytes{handler="/api/metrics",quantile="0.5"} 0
|
||||
http_request_size_bytes{handler="/api/metrics",quantile="0.9"} 0
|
||||
http_request_size_bytes{handler="/api/metrics",quantile="0.99"} 0
|
||||
http_request_size_bytes_sum{handler="/api/metrics"} 0
|
||||
http_request_size_bytes_count{handler="/api/metrics"} 0
|
||||
http_request_size_bytes{handler="/api/query",quantile="0.5"} 0
|
||||
http_request_size_bytes{handler="/api/query",quantile="0.9"} 0
|
||||
http_request_size_bytes{handler="/api/query",quantile="0.99"} 0
|
||||
http_request_size_bytes_sum{handler="/api/query"} 0
|
||||
http_request_size_bytes_count{handler="/api/query"} 0
|
||||
http_request_size_bytes{handler="/api/query_range",quantile="0.5"} 0
|
||||
http_request_size_bytes{handler="/api/query_range",quantile="0.9"} 0
|
||||
http_request_size_bytes{handler="/api/query_range",quantile="0.99"} 0
|
||||
http_request_size_bytes_sum{handler="/api/query_range"} 0
|
||||
http_request_size_bytes_count{handler="/api/query_range"} 0
|
||||
http_request_size_bytes{handler="/api/targets",quantile="0.5"} 0
|
||||
http_request_size_bytes{handler="/api/targets",quantile="0.9"} 0
|
||||
http_request_size_bytes{handler="/api/targets",quantile="0.99"} 0
|
||||
http_request_size_bytes_sum{handler="/api/targets"} 0
|
||||
http_request_size_bytes_count{handler="/api/targets"} 0
|
||||
http_request_size_bytes{handler="/consoles/",quantile="0.5"} 0
|
||||
http_request_size_bytes{handler="/consoles/",quantile="0.9"} 0
|
||||
http_request_size_bytes{handler="/consoles/",quantile="0.99"} 0
|
||||
http_request_size_bytes_sum{handler="/consoles/"} 0
|
||||
http_request_size_bytes_count{handler="/consoles/"} 0
|
||||
http_request_size_bytes{handler="/graph",quantile="0.5"} 0
|
||||
http_request_size_bytes{handler="/graph",quantile="0.9"} 0
|
||||
http_request_size_bytes{handler="/graph",quantile="0.99"} 0
|
||||
http_request_size_bytes_sum{handler="/graph"} 0
|
||||
http_request_size_bytes_count{handler="/graph"} 0
|
||||
http_request_size_bytes{handler="/heap",quantile="0.5"} 0
|
||||
http_request_size_bytes{handler="/heap",quantile="0.9"} 0
|
||||
http_request_size_bytes{handler="/heap",quantile="0.99"} 0
|
||||
http_request_size_bytes_sum{handler="/heap"} 0
|
||||
http_request_size_bytes_count{handler="/heap"} 0
|
||||
http_request_size_bytes{handler="/static/",quantile="0.5"} 0
|
||||
http_request_size_bytes{handler="/static/",quantile="0.9"} 0
|
||||
http_request_size_bytes{handler="/static/",quantile="0.99"} 0
|
||||
http_request_size_bytes_sum{handler="/static/"} 0
|
||||
http_request_size_bytes_count{handler="/static/"} 0
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.5"} 291
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.9"} 291
|
||||
http_request_size_bytes{handler="prometheus",quantile="0.99"} 291
|
||||
http_request_size_bytes_sum{handler="prometheus"} 34488
|
||||
http_request_size_bytes_count{handler="prometheus"} 119
|
||||
# HELP http_requests_total Total number of HTTP requests made.
|
||||
# TYPE http_requests_total counter
|
||||
http_requests_total{code="200",handler="prometheus",method="get"} 119
|
||||
# HELP http_response_size_bytes The HTTP response sizes in bytes.
|
||||
# TYPE http_response_size_bytes summary
|
||||
http_response_size_bytes{handler="/",quantile="0.5"} 0
|
||||
http_response_size_bytes{handler="/",quantile="0.9"} 0
|
||||
http_response_size_bytes{handler="/",quantile="0.99"} 0
|
||||
http_response_size_bytes_sum{handler="/"} 0
|
||||
http_response_size_bytes_count{handler="/"} 0
|
||||
http_response_size_bytes{handler="/alerts",quantile="0.5"} 0
|
||||
http_response_size_bytes{handler="/alerts",quantile="0.9"} 0
|
||||
http_response_size_bytes{handler="/alerts",quantile="0.99"} 0
|
||||
http_response_size_bytes_sum{handler="/alerts"} 0
|
||||
http_response_size_bytes_count{handler="/alerts"} 0
|
||||
http_response_size_bytes{handler="/api/metrics",quantile="0.5"} 0
|
||||
http_response_size_bytes{handler="/api/metrics",quantile="0.9"} 0
|
||||
http_response_size_bytes{handler="/api/metrics",quantile="0.99"} 0
|
||||
http_response_size_bytes_sum{handler="/api/metrics"} 0
|
||||
http_response_size_bytes_count{handler="/api/metrics"} 0
|
||||
http_response_size_bytes{handler="/api/query",quantile="0.5"} 0
|
||||
http_response_size_bytes{handler="/api/query",quantile="0.9"} 0
|
||||
http_response_size_bytes{handler="/api/query",quantile="0.99"} 0
|
||||
http_response_size_bytes_sum{handler="/api/query"} 0
|
||||
http_response_size_bytes_count{handler="/api/query"} 0
|
||||
http_response_size_bytes{handler="/api/query_range",quantile="0.5"} 0
|
||||
http_response_size_bytes{handler="/api/query_range",quantile="0.9"} 0
|
||||
http_response_size_bytes{handler="/api/query_range",quantile="0.99"} 0
|
||||
http_response_size_bytes_sum{handler="/api/query_range"} 0
|
||||
http_response_size_bytes_count{handler="/api/query_range"} 0
|
||||
http_response_size_bytes{handler="/api/targets",quantile="0.5"} 0
|
||||
http_response_size_bytes{handler="/api/targets",quantile="0.9"} 0
|
||||
http_response_size_bytes{handler="/api/targets",quantile="0.99"} 0
|
||||
http_response_size_bytes_sum{handler="/api/targets"} 0
|
||||
http_response_size_bytes_count{handler="/api/targets"} 0
|
||||
http_response_size_bytes{handler="/consoles/",quantile="0.5"} 0
|
||||
http_response_size_bytes{handler="/consoles/",quantile="0.9"} 0
|
||||
http_response_size_bytes{handler="/consoles/",quantile="0.99"} 0
|
||||
http_response_size_bytes_sum{handler="/consoles/"} 0
|
||||
http_response_size_bytes_count{handler="/consoles/"} 0
|
||||
http_response_size_bytes{handler="/graph",quantile="0.5"} 0
|
||||
http_response_size_bytes{handler="/graph",quantile="0.9"} 0
|
||||
http_response_size_bytes{handler="/graph",quantile="0.99"} 0
|
||||
http_response_size_bytes_sum{handler="/graph"} 0
|
||||
http_response_size_bytes_count{handler="/graph"} 0
|
||||
http_response_size_bytes{handler="/heap",quantile="0.5"} 0
|
||||
http_response_size_bytes{handler="/heap",quantile="0.9"} 0
|
||||
http_response_size_bytes{handler="/heap",quantile="0.99"} 0
|
||||
http_response_size_bytes_sum{handler="/heap"} 0
|
||||
http_response_size_bytes_count{handler="/heap"} 0
|
||||
http_response_size_bytes{handler="/static/",quantile="0.5"} 0
|
||||
http_response_size_bytes{handler="/static/",quantile="0.9"} 0
|
||||
http_response_size_bytes{handler="/static/",quantile="0.99"} 0
|
||||
http_response_size_bytes_sum{handler="/static/"} 0
|
||||
http_response_size_bytes_count{handler="/static/"} 0
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.5"} 2049
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.9"} 2058
|
||||
http_response_size_bytes{handler="prometheus",quantile="0.99"} 2064
|
||||
http_response_size_bytes_sum{handler="prometheus"} 247001
|
||||
http_response_size_bytes_count{handler="prometheus"} 119
|
||||
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
|
||||
# TYPE process_cpu_seconds_total counter
|
||||
process_cpu_seconds_total 0.55
|
||||
# HELP go_goroutines Number of goroutines that currently exist.
|
||||
# TYPE go_goroutines gauge
|
||||
go_goroutines 70
|
||||
# HELP process_max_fds Maximum number of open file descriptors.
|
||||
# TYPE process_max_fds gauge
|
||||
process_max_fds 8192
|
||||
# HELP process_open_fds Number of open file descriptors.
|
||||
# TYPE process_open_fds gauge
|
||||
process_open_fds 29
|
||||
# HELP process_resident_memory_bytes Resident memory size in bytes.
|
||||
# TYPE process_resident_memory_bytes gauge
|
||||
process_resident_memory_bytes 5.3870592e+07
|
||||
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
|
||||
# TYPE process_start_time_seconds gauge
|
||||
process_start_time_seconds 1.42236894836e+09
|
||||
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
|
||||
# TYPE process_virtual_memory_bytes gauge
|
||||
process_virtual_memory_bytes 5.41478912e+08
|
||||
# HELP prometheus_dns_sd_lookup_failures_total The number of DNS-SD lookup failures.
|
||||
# TYPE prometheus_dns_sd_lookup_failures_total counter
|
||||
prometheus_dns_sd_lookup_failures_total 0
|
||||
# HELP prometheus_dns_sd_lookups_total The number of DNS-SD lookups.
|
||||
# TYPE prometheus_dns_sd_lookups_total counter
|
||||
prometheus_dns_sd_lookups_total 7
|
||||
# HELP prometheus_evaluator_duration_milliseconds The duration for all evaluations to execute.
|
||||
# TYPE prometheus_evaluator_duration_milliseconds summary
|
||||
prometheus_evaluator_duration_milliseconds{quantile="0.01"} 0
|
||||
prometheus_evaluator_duration_milliseconds{quantile="0.05"} 0
|
||||
prometheus_evaluator_duration_milliseconds{quantile="0.5"} 0
|
||||
prometheus_evaluator_duration_milliseconds{quantile="0.9"} 1
|
||||
prometheus_evaluator_duration_milliseconds{quantile="0.99"} 1
|
||||
prometheus_evaluator_duration_milliseconds_sum 12
|
||||
prometheus_evaluator_duration_milliseconds_count 23
|
||||
# HELP prometheus_local_storage_checkpoint_duration_milliseconds The duration (in milliseconds) it took to checkpoint in-memory metrics and head chunks.
|
||||
# TYPE prometheus_local_storage_checkpoint_duration_milliseconds gauge
|
||||
prometheus_local_storage_checkpoint_duration_milliseconds 0
|
||||
# HELP prometheus_local_storage_chunk_ops_total The total number of chunk operations by their type.
|
||||
# TYPE prometheus_local_storage_chunk_ops_total counter
|
||||
prometheus_local_storage_chunk_ops_total{type="create"} 598
|
||||
prometheus_local_storage_chunk_ops_total{type="persist"} 174
|
||||
prometheus_local_storage_chunk_ops_total{type="pin"} 920
|
||||
prometheus_local_storage_chunk_ops_total{type="transcode"} 415
|
||||
prometheus_local_storage_chunk_ops_total{type="unpin"} 920
|
||||
# HELP prometheus_local_storage_indexing_batch_latency_milliseconds Quantiles for batch indexing latencies in milliseconds.
|
||||
# TYPE prometheus_local_storage_indexing_batch_latency_milliseconds summary
|
||||
prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.5"} 0
|
||||
prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.9"} 0
|
||||
prometheus_local_storage_indexing_batch_latency_milliseconds{quantile="0.99"} 0
|
||||
prometheus_local_storage_indexing_batch_latency_milliseconds_sum 0
|
||||
prometheus_local_storage_indexing_batch_latency_milliseconds_count 1
|
||||
# HELP prometheus_local_storage_indexing_batch_sizes Quantiles for indexing batch sizes (number of metrics per batch).
|
||||
# TYPE prometheus_local_storage_indexing_batch_sizes summary
|
||||
prometheus_local_storage_indexing_batch_sizes{quantile="0.5"} 2
|
||||
prometheus_local_storage_indexing_batch_sizes{quantile="0.9"} 2
|
||||
prometheus_local_storage_indexing_batch_sizes{quantile="0.99"} 2
|
||||
prometheus_local_storage_indexing_batch_sizes_sum 2
|
||||
prometheus_local_storage_indexing_batch_sizes_count 1
|
||||
# HELP prometheus_local_storage_indexing_queue_capacity The capacity of the indexing queue.
|
||||
# TYPE prometheus_local_storage_indexing_queue_capacity gauge
|
||||
prometheus_local_storage_indexing_queue_capacity 16384
|
||||
# HELP prometheus_local_storage_indexing_queue_length The number of metrics waiting to be indexed.
|
||||
# TYPE prometheus_local_storage_indexing_queue_length gauge
|
||||
prometheus_local_storage_indexing_queue_length 0
|
||||
# HELP prometheus_local_storage_ingested_samples_total The total number of samples ingested.
|
||||
# TYPE prometheus_local_storage_ingested_samples_total counter
|
||||
prometheus_local_storage_ingested_samples_total 30473
|
||||
# HELP prometheus_local_storage_invalid_preload_requests_total The total number of preload requests referring to a non-existent series. This is an indication of outdated label indexes.
|
||||
# TYPE prometheus_local_storage_invalid_preload_requests_total counter
|
||||
prometheus_local_storage_invalid_preload_requests_total 0
|
||||
# HELP prometheus_local_storage_memory_chunkdescs The current number of chunk descriptors in memory.
|
||||
# TYPE prometheus_local_storage_memory_chunkdescs gauge
|
||||
prometheus_local_storage_memory_chunkdescs 1059
|
||||
# HELP prometheus_local_storage_memory_chunks The current number of chunks in memory, excluding cloned chunks (i.e. chunks without a descriptor).
|
||||
# TYPE prometheus_local_storage_memory_chunks gauge
|
||||
prometheus_local_storage_memory_chunks 1020
|
||||
# HELP prometheus_local_storage_memory_series The current number of series in memory.
|
||||
# TYPE prometheus_local_storage_memory_series gauge
|
||||
prometheus_local_storage_memory_series 424
|
||||
# HELP prometheus_local_storage_persist_latency_microseconds A summary of latencies for persisting each chunk.
|
||||
# TYPE prometheus_local_storage_persist_latency_microseconds summary
|
||||
prometheus_local_storage_persist_latency_microseconds{quantile="0.5"} 30.377
|
||||
prometheus_local_storage_persist_latency_microseconds{quantile="0.9"} 203.539
|
||||
prometheus_local_storage_persist_latency_microseconds{quantile="0.99"} 2626.463
|
||||
prometheus_local_storage_persist_latency_microseconds_sum 20424.415
|
||||
prometheus_local_storage_persist_latency_microseconds_count 174
|
||||
# HELP prometheus_local_storage_persist_queue_capacity The total capacity of the persist queue.
|
||||
# TYPE prometheus_local_storage_persist_queue_capacity gauge
|
||||
prometheus_local_storage_persist_queue_capacity 1024
|
||||
# HELP prometheus_local_storage_persist_queue_length The current number of chunks waiting in the persist queue.
|
||||
# TYPE prometheus_local_storage_persist_queue_length gauge
|
||||
prometheus_local_storage_persist_queue_length 0
|
||||
# HELP prometheus_local_storage_series_ops_total The total number of series operations by their type.
|
||||
# TYPE prometheus_local_storage_series_ops_total counter
|
||||
prometheus_local_storage_series_ops_total{type="create"} 2
|
||||
prometheus_local_storage_series_ops_total{type="maintenance_in_memory"} 11
|
||||
# HELP prometheus_notifications_latency_milliseconds Latency quantiles for sending alert notifications (not including dropped notifications).
|
||||
# TYPE prometheus_notifications_latency_milliseconds summary
|
||||
prometheus_notifications_latency_milliseconds{quantile="0.5"} 0
|
||||
prometheus_notifications_latency_milliseconds{quantile="0.9"} 0
|
||||
prometheus_notifications_latency_milliseconds{quantile="0.99"} 0
|
||||
prometheus_notifications_latency_milliseconds_sum 0
|
||||
prometheus_notifications_latency_milliseconds_count 0
|
||||
# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue.
|
||||
# TYPE prometheus_notifications_queue_capacity gauge
|
||||
prometheus_notifications_queue_capacity 100
|
||||
# HELP prometheus_notifications_queue_length The number of alert notifications in the queue.
|
||||
# TYPE prometheus_notifications_queue_length gauge
|
||||
prometheus_notifications_queue_length 0
|
||||
# HELP prometheus_rule_evaluation_duration_milliseconds The duration for a rule to execute.
|
||||
# TYPE prometheus_rule_evaluation_duration_milliseconds summary
|
||||
prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.5"} 0
|
||||
prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.9"} 0
|
||||
prometheus_rule_evaluation_duration_milliseconds{rule_type="alerting",quantile="0.99"} 2
|
||||
prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="alerting"} 12
|
||||
prometheus_rule_evaluation_duration_milliseconds_count{rule_type="alerting"} 115
|
||||
prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.5"} 0
|
||||
prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.9"} 0
|
||||
prometheus_rule_evaluation_duration_milliseconds{rule_type="recording",quantile="0.99"} 3
|
||||
prometheus_rule_evaluation_duration_milliseconds_sum{rule_type="recording"} 15
|
||||
prometheus_rule_evaluation_duration_milliseconds_count{rule_type="recording"} 115
|
||||
# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures.
|
||||
# TYPE prometheus_rule_evaluation_failures_total counter
|
||||
prometheus_rule_evaluation_failures_total 0
|
||||
# HELP prometheus_samples_queue_capacity Capacity of the queue for unwritten samples.
|
||||
# TYPE prometheus_samples_queue_capacity gauge
|
||||
prometheus_samples_queue_capacity 4096
|
||||
# HELP prometheus_samples_queue_length Current number of items in the queue for unwritten samples. Each item comprises all samples exposed by one target as one metric family (i.e. metrics of the same name).
|
||||
# TYPE prometheus_samples_queue_length gauge
|
||||
prometheus_samples_queue_length 0
|
||||
# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes.
|
||||
# TYPE prometheus_target_interval_length_seconds summary
|
||||
prometheus_target_interval_length_seconds{interval="15s",quantile="0.01"} 14
|
||||
prometheus_target_interval_length_seconds{interval="15s",quantile="0.05"} 14
|
||||
prometheus_target_interval_length_seconds{interval="15s",quantile="0.5"} 15
|
||||
prometheus_target_interval_length_seconds{interval="15s",quantile="0.9"} 15
|
||||
prometheus_target_interval_length_seconds{interval="15s",quantile="0.99"} 15
|
||||
prometheus_target_interval_length_seconds_sum{interval="15s"} 175
|
||||
prometheus_target_interval_length_seconds_count{interval="15s"} 12
|
||||
prometheus_target_interval_length_seconds{interval="1s",quantile="0.01"} 0
|
||||
prometheus_target_interval_length_seconds{interval="1s",quantile="0.05"} 0
|
||||
prometheus_target_interval_length_seconds{interval="1s",quantile="0.5"} 0
|
||||
prometheus_target_interval_length_seconds{interval="1s",quantile="0.9"} 1
|
||||
prometheus_target_interval_length_seconds{interval="1s",quantile="0.99"} 1
|
||||
prometheus_target_interval_length_seconds_sum{interval="1s"} 55
|
||||
prometheus_target_interval_length_seconds_count{interval="1s"} 117
|
||||
305
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
Normal file
305
vendor/github.com/prometheus/common/expfmt/text_create.go
generated
vendored
Normal file
|
|
@ -0,0 +1,305 @@
|
|||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
// MetricFamilyToText converts a MetricFamily proto message into text format and
|
||||
// writes the resulting lines to 'out'. It returns the number of bytes written
|
||||
// and any error encountered. This function does not perform checks on the
|
||||
// content of the metric and label names, i.e. invalid metric or label names
|
||||
// will result in invalid text format output.
|
||||
// This method fulfills the type 'prometheus.encoder'.
|
||||
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
|
||||
var written int
|
||||
|
||||
// Fail-fast checks.
|
||||
if len(in.Metric) == 0 {
|
||||
return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
|
||||
}
|
||||
name := in.GetName()
|
||||
if name == "" {
|
||||
return written, fmt.Errorf("MetricFamily has no name: %s", in)
|
||||
}
|
||||
|
||||
// Comments, first HELP, then TYPE.
|
||||
if in.Help != nil {
|
||||
n, err := fmt.Fprintf(
|
||||
out, "# HELP %s %s\n",
|
||||
name, escapeString(*in.Help, false),
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
metricType := in.GetType()
|
||||
n, err := fmt.Fprintf(
|
||||
out, "# TYPE %s %s\n",
|
||||
name, strings.ToLower(metricType.String()),
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
|
||||
// Finally the samples, one line for each.
|
||||
for _, metric := range in.Metric {
|
||||
switch metricType {
|
||||
case dto.MetricType_COUNTER:
|
||||
if metric.Counter == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected counter in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
n, err = writeSample(
|
||||
name, metric, "", "",
|
||||
metric.Counter.GetValue(),
|
||||
out,
|
||||
)
|
||||
case dto.MetricType_GAUGE:
|
||||
if metric.Gauge == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected gauge in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
n, err = writeSample(
|
||||
name, metric, "", "",
|
||||
metric.Gauge.GetValue(),
|
||||
out,
|
||||
)
|
||||
case dto.MetricType_UNTYPED:
|
||||
if metric.Untyped == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected untyped in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
n, err = writeSample(
|
||||
name, metric, "", "",
|
||||
metric.Untyped.GetValue(),
|
||||
out,
|
||||
)
|
||||
case dto.MetricType_SUMMARY:
|
||||
if metric.Summary == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected summary in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
for _, q := range metric.Summary.Quantile {
|
||||
n, err = writeSample(
|
||||
name, metric,
|
||||
model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
|
||||
q.GetValue(),
|
||||
out,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
n, err = writeSample(
|
||||
name+"_sum", metric, "", "",
|
||||
metric.Summary.GetSampleSum(),
|
||||
out,
|
||||
)
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
written += n
|
||||
n, err = writeSample(
|
||||
name+"_count", metric, "", "",
|
||||
float64(metric.Summary.GetSampleCount()),
|
||||
out,
|
||||
)
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
if metric.Histogram == nil {
|
||||
return written, fmt.Errorf(
|
||||
"expected histogram in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
infSeen := false
|
||||
for _, q := range metric.Histogram.Bucket {
|
||||
n, err = writeSample(
|
||||
name+"_bucket", metric,
|
||||
model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
|
||||
float64(q.GetCumulativeCount()),
|
||||
out,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
if math.IsInf(q.GetUpperBound(), +1) {
|
||||
infSeen = true
|
||||
}
|
||||
}
|
||||
if !infSeen {
|
||||
n, err = writeSample(
|
||||
name+"_bucket", metric,
|
||||
model.BucketLabel, "+Inf",
|
||||
float64(metric.Histogram.GetSampleCount()),
|
||||
out,
|
||||
)
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
written += n
|
||||
}
|
||||
n, err = writeSample(
|
||||
name+"_sum", metric, "", "",
|
||||
metric.Histogram.GetSampleSum(),
|
||||
out,
|
||||
)
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
written += n
|
||||
n, err = writeSample(
|
||||
name+"_count", metric, "", "",
|
||||
float64(metric.Histogram.GetSampleCount()),
|
||||
out,
|
||||
)
|
||||
default:
|
||||
return written, fmt.Errorf(
|
||||
"unexpected type in metric %s %s", name, metric,
|
||||
)
|
||||
}
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// writeSample writes a single sample in text format to out, given the metric
|
||||
// name, the metric proto message itself, optionally an additional label name
|
||||
// and value (use empty strings if not required), and the value. The function
|
||||
// returns the number of bytes written and any error encountered.
|
||||
func writeSample(
|
||||
name string,
|
||||
metric *dto.Metric,
|
||||
additionalLabelName, additionalLabelValue string,
|
||||
value float64,
|
||||
out io.Writer,
|
||||
) (int, error) {
|
||||
var written int
|
||||
n, err := fmt.Fprint(out, name)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = labelPairsToText(
|
||||
metric.Label,
|
||||
additionalLabelName, additionalLabelValue,
|
||||
out,
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
n, err = fmt.Fprintf(out, " %v", value)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
if metric.TimestampMs != nil {
|
||||
n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
n, err = out.Write([]byte{'\n'})
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// labelPairsToText converts a slice of LabelPair proto messages plus the
|
||||
// explicitly given additional label pair into text formatted as required by the
|
||||
// text format and writes it to 'out'. An empty slice in combination with an
|
||||
// empty string 'additionalLabelName' results in nothing being
|
||||
// written. Otherwise, the label pairs are written, escaped as required by the
|
||||
// text format, and enclosed in '{...}'. The function returns the number of
|
||||
// bytes written and any error encountered.
|
||||
func labelPairsToText(
|
||||
in []*dto.LabelPair,
|
||||
additionalLabelName, additionalLabelValue string,
|
||||
out io.Writer,
|
||||
) (int, error) {
|
||||
if len(in) == 0 && additionalLabelName == "" {
|
||||
return 0, nil
|
||||
}
|
||||
var written int
|
||||
separator := '{'
|
||||
for _, lp := range in {
|
||||
n, err := fmt.Fprintf(
|
||||
out, `%c%s="%s"`,
|
||||
separator, lp.GetName(), escapeString(lp.GetValue(), true),
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
separator = ','
|
||||
}
|
||||
if additionalLabelName != "" {
|
||||
n, err := fmt.Fprintf(
|
||||
out, `%c%s="%s"`,
|
||||
separator, additionalLabelName,
|
||||
escapeString(additionalLabelValue, true),
|
||||
)
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
}
|
||||
n, err := out.Write([]byte{'}'})
|
||||
written += n
|
||||
if err != nil {
|
||||
return written, err
|
||||
}
|
||||
return written, nil
|
||||
}
|
||||
|
||||
// escapeString replaces '\' by '\\', new line character by '\n', and - if
|
||||
// includeDoubleQuote is true - '"' by '\"'.
|
||||
func escapeString(v string, includeDoubleQuote bool) string {
|
||||
result := bytes.NewBuffer(make([]byte, 0, len(v)))
|
||||
for _, c := range v {
|
||||
switch {
|
||||
case c == '\\':
|
||||
result.WriteString(`\\`)
|
||||
case includeDoubleQuote && c == '"':
|
||||
result.WriteString(`\"`)
|
||||
case c == '\n':
|
||||
result.WriteString(`\n`)
|
||||
default:
|
||||
result.WriteRune(c)
|
||||
}
|
||||
}
|
||||
return result.String()
|
||||
}
|
||||
443
vendor/github.com/prometheus/common/expfmt/text_create_test.go
generated
vendored
Normal file
443
vendor/github.com/prometheus/common/expfmt/text_create_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,443 @@
|
|||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
func testCreate(t testing.TB) {
|
||||
var scenarios = []struct {
|
||||
in *dto.MetricFamily
|
||||
out string
|
||||
}{
|
||||
// 0: Counter, NaN as value, timestamp given.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Name: proto.String("name"),
|
||||
Help: proto.String("two-line\n doc str\\ing"),
|
||||
Type: dto.MetricType_COUNTER.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("labelname"),
|
||||
Value: proto.String("val1"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("basename"),
|
||||
Value: proto.String("basevalue"),
|
||||
},
|
||||
},
|
||||
Counter: &dto.Counter{
|
||||
Value: proto.Float64(math.NaN()),
|
||||
},
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("labelname"),
|
||||
Value: proto.String("val2"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("basename"),
|
||||
Value: proto.String("basevalue"),
|
||||
},
|
||||
},
|
||||
Counter: &dto.Counter{
|
||||
Value: proto.Float64(.23),
|
||||
},
|
||||
TimestampMs: proto.Int64(1234567890),
|
||||
},
|
||||
},
|
||||
},
|
||||
out: `# HELP name two-line\n doc str\\ing
|
||||
# TYPE name counter
|
||||
name{labelname="val1",basename="basevalue"} NaN
|
||||
name{labelname="val2",basename="basevalue"} 0.23 1234567890
|
||||
`,
|
||||
},
|
||||
// 1: Gauge, some escaping required, +Inf as value, multi-byte characters in label values.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Name: proto.String("gauge_name"),
|
||||
Help: proto.String("gauge\ndoc\nstr\"ing"),
|
||||
Type: dto.MetricType_GAUGE.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("name_1"),
|
||||
Value: proto.String("val with\nnew line"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("name_2"),
|
||||
Value: proto.String("val with \\backslash and \"quotes\""),
|
||||
},
|
||||
},
|
||||
Gauge: &dto.Gauge{
|
||||
Value: proto.Float64(math.Inf(+1)),
|
||||
},
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("name_1"),
|
||||
Value: proto.String("Björn"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("name_2"),
|
||||
Value: proto.String("佖佥"),
|
||||
},
|
||||
},
|
||||
Gauge: &dto.Gauge{
|
||||
Value: proto.Float64(3.14E42),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: `# HELP gauge_name gauge\ndoc\nstr"ing
|
||||
# TYPE gauge_name gauge
|
||||
gauge_name{name_1="val with\nnew line",name_2="val with \\backslash and \"quotes\""} +Inf
|
||||
gauge_name{name_1="Björn",name_2="佖佥"} 3.14e+42
|
||||
`,
|
||||
},
|
||||
// 2: Untyped, no help, one sample with no labels and -Inf as value, another sample with one label.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Name: proto.String("untyped_name"),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(math.Inf(-1)),
|
||||
},
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("name_1"),
|
||||
Value: proto.String("value 1"),
|
||||
},
|
||||
},
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(-1.23e-45),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: `# TYPE untyped_name untyped
|
||||
untyped_name -Inf
|
||||
untyped_name{name_1="value 1"} -1.23e-45
|
||||
`,
|
||||
},
|
||||
// 3: Summary.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Name: proto.String("summary_name"),
|
||||
Help: proto.String("summary docstring"),
|
||||
Type: dto.MetricType_SUMMARY.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Summary: &dto.Summary{
|
||||
SampleCount: proto.Uint64(42),
|
||||
SampleSum: proto.Float64(-3.4567),
|
||||
Quantile: []*dto.Quantile{
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.5),
|
||||
Value: proto.Float64(-1.23),
|
||||
},
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.9),
|
||||
Value: proto.Float64(.2342354),
|
||||
},
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.99),
|
||||
Value: proto.Float64(0),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("name_1"),
|
||||
Value: proto.String("value 1"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("name_2"),
|
||||
Value: proto.String("value 2"),
|
||||
},
|
||||
},
|
||||
Summary: &dto.Summary{
|
||||
SampleCount: proto.Uint64(4711),
|
||||
SampleSum: proto.Float64(2010.1971),
|
||||
Quantile: []*dto.Quantile{
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.5),
|
||||
Value: proto.Float64(1),
|
||||
},
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.9),
|
||||
Value: proto.Float64(2),
|
||||
},
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.99),
|
||||
Value: proto.Float64(3),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: `# HELP summary_name summary docstring
|
||||
# TYPE summary_name summary
|
||||
summary_name{quantile="0.5"} -1.23
|
||||
summary_name{quantile="0.9"} 0.2342354
|
||||
summary_name{quantile="0.99"} 0
|
||||
summary_name_sum -3.4567
|
||||
summary_name_count 42
|
||||
summary_name{name_1="value 1",name_2="value 2",quantile="0.5"} 1
|
||||
summary_name{name_1="value 1",name_2="value 2",quantile="0.9"} 2
|
||||
summary_name{name_1="value 1",name_2="value 2",quantile="0.99"} 3
|
||||
summary_name_sum{name_1="value 1",name_2="value 2"} 2010.1971
|
||||
summary_name_count{name_1="value 1",name_2="value 2"} 4711
|
||||
`,
|
||||
},
|
||||
// 4: Histogram
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Name: proto.String("request_duration_microseconds"),
|
||||
Help: proto.String("The response latency."),
|
||||
Type: dto.MetricType_HISTOGRAM.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Histogram: &dto.Histogram{
|
||||
SampleCount: proto.Uint64(2693),
|
||||
SampleSum: proto.Float64(1756047.3),
|
||||
Bucket: []*dto.Bucket{
|
||||
&dto.Bucket{
|
||||
UpperBound: proto.Float64(100),
|
||||
CumulativeCount: proto.Uint64(123),
|
||||
},
|
||||
&dto.Bucket{
|
||||
UpperBound: proto.Float64(120),
|
||||
CumulativeCount: proto.Uint64(412),
|
||||
},
|
||||
&dto.Bucket{
|
||||
UpperBound: proto.Float64(144),
|
||||
CumulativeCount: proto.Uint64(592),
|
||||
},
|
||||
&dto.Bucket{
|
||||
UpperBound: proto.Float64(172.8),
|
||||
CumulativeCount: proto.Uint64(1524),
|
||||
},
|
||||
&dto.Bucket{
|
||||
UpperBound: proto.Float64(math.Inf(+1)),
|
||||
CumulativeCount: proto.Uint64(2693),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: `# HELP request_duration_microseconds The response latency.
|
||||
# TYPE request_duration_microseconds histogram
|
||||
request_duration_microseconds_bucket{le="100"} 123
|
||||
request_duration_microseconds_bucket{le="120"} 412
|
||||
request_duration_microseconds_bucket{le="144"} 592
|
||||
request_duration_microseconds_bucket{le="172.8"} 1524
|
||||
request_duration_microseconds_bucket{le="+Inf"} 2693
|
||||
request_duration_microseconds_sum 1.7560473e+06
|
||||
request_duration_microseconds_count 2693
|
||||
`,
|
||||
},
|
||||
// 5: Histogram with missing +Inf bucket.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Name: proto.String("request_duration_microseconds"),
|
||||
Help: proto.String("The response latency."),
|
||||
Type: dto.MetricType_HISTOGRAM.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Histogram: &dto.Histogram{
|
||||
SampleCount: proto.Uint64(2693),
|
||||
SampleSum: proto.Float64(1756047.3),
|
||||
Bucket: []*dto.Bucket{
|
||||
&dto.Bucket{
|
||||
UpperBound: proto.Float64(100),
|
||||
CumulativeCount: proto.Uint64(123),
|
||||
},
|
||||
&dto.Bucket{
|
||||
UpperBound: proto.Float64(120),
|
||||
CumulativeCount: proto.Uint64(412),
|
||||
},
|
||||
&dto.Bucket{
|
||||
UpperBound: proto.Float64(144),
|
||||
CumulativeCount: proto.Uint64(592),
|
||||
},
|
||||
&dto.Bucket{
|
||||
UpperBound: proto.Float64(172.8),
|
||||
CumulativeCount: proto.Uint64(1524),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: `# HELP request_duration_microseconds The response latency.
|
||||
# TYPE request_duration_microseconds histogram
|
||||
request_duration_microseconds_bucket{le="100"} 123
|
||||
request_duration_microseconds_bucket{le="120"} 412
|
||||
request_duration_microseconds_bucket{le="144"} 592
|
||||
request_duration_microseconds_bucket{le="172.8"} 1524
|
||||
request_duration_microseconds_bucket{le="+Inf"} 2693
|
||||
request_duration_microseconds_sum 1.7560473e+06
|
||||
request_duration_microseconds_count 2693
|
||||
`,
|
||||
},
|
||||
// 6: No metric type, should result in default type Counter.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Name: proto.String("name"),
|
||||
Help: proto.String("doc string"),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Counter: &dto.Counter{
|
||||
Value: proto.Float64(math.Inf(-1)),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: `# HELP name doc string
|
||||
# TYPE name counter
|
||||
name -Inf
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
out := bytes.NewBuffer(make([]byte, 0, len(scenario.out)))
|
||||
n, err := MetricFamilyToText(out, scenario.in)
|
||||
if err != nil {
|
||||
t.Errorf("%d. error: %s", i, err)
|
||||
continue
|
||||
}
|
||||
if expected, got := len(scenario.out), n; expected != got {
|
||||
t.Errorf(
|
||||
"%d. expected %d bytes written, got %d",
|
||||
i, expected, got,
|
||||
)
|
||||
}
|
||||
if expected, got := scenario.out, out.String(); expected != got {
|
||||
t.Errorf(
|
||||
"%d. expected out=%q, got %q",
|
||||
i, expected, got,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
testCreate(t)
|
||||
}
|
||||
|
||||
func BenchmarkCreate(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testCreate(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testCreateError(t testing.TB) {
|
||||
var scenarios = []struct {
|
||||
in *dto.MetricFamily
|
||||
err string
|
||||
}{
|
||||
// 0: No metric.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Name: proto.String("name"),
|
||||
Help: proto.String("doc string"),
|
||||
Type: dto.MetricType_COUNTER.Enum(),
|
||||
Metric: []*dto.Metric{},
|
||||
},
|
||||
err: "MetricFamily has no metrics",
|
||||
},
|
||||
// 1: No metric name.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Help: proto.String("doc string"),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(math.Inf(-1)),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
err: "MetricFamily has no name",
|
||||
},
|
||||
// 2: Wrong type.
|
||||
{
|
||||
in: &dto.MetricFamily{
|
||||
Name: proto.String("name"),
|
||||
Help: proto.String("doc string"),
|
||||
Type: dto.MetricType_COUNTER.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(math.Inf(-1)),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
err: "expected counter in metric",
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
var out bytes.Buffer
|
||||
_, err := MetricFamilyToText(&out, scenario.in)
|
||||
if err == nil {
|
||||
t.Errorf("%d. expected error, got nil", i)
|
||||
continue
|
||||
}
|
||||
if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 {
|
||||
t.Errorf(
|
||||
"%d. expected error starting with %q, got %q",
|
||||
i, expected, got,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCreateError(t *testing.T) {
|
||||
testCreateError(t)
|
||||
}
|
||||
|
||||
func BenchmarkCreateError(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testCreateError(b)
|
||||
}
|
||||
}
|
||||
746
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
Normal file
746
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
Normal file
|
|
@ -0,0 +1,746 @@
|
|||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
// A stateFn is a function that represents a state in a state machine. By
|
||||
// executing it, the state is progressed to the next state. The stateFn returns
|
||||
// another stateFn, which represents the new state. The end state is represented
|
||||
// by nil.
|
||||
type stateFn func() stateFn
|
||||
|
||||
// ParseError signals errors while parsing the simple and flat text-based
|
||||
// exchange format.
|
||||
type ParseError struct {
|
||||
Line int
|
||||
Msg string
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e ParseError) Error() string {
|
||||
return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
|
||||
}
|
||||
|
||||
// TextParser is used to parse the simple and flat text-based exchange format. Its
|
||||
// nil value is ready to use.
|
||||
type TextParser struct {
|
||||
metricFamiliesByName map[string]*dto.MetricFamily
|
||||
buf *bufio.Reader // Where the parsed input is read through.
|
||||
err error // Most recent error.
|
||||
lineCount int // Tracks the line count for error messages.
|
||||
currentByte byte // The most recent byte read.
|
||||
currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes.
|
||||
currentMF *dto.MetricFamily
|
||||
currentMetric *dto.Metric
|
||||
currentLabelPair *dto.LabelPair
|
||||
|
||||
// The remaining member variables are only used for summaries/histograms.
|
||||
currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
|
||||
// Summary specific.
|
||||
summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature.
|
||||
currentQuantile float64
|
||||
// Histogram specific.
|
||||
histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature.
|
||||
currentBucket float64
|
||||
// These tell us if the currently processed line ends on '_count' or
|
||||
// '_sum' respectively and belong to a summary/histogram, representing the sample
|
||||
// count and sum of that summary/histogram.
|
||||
currentIsSummaryCount, currentIsSummarySum bool
|
||||
currentIsHistogramCount, currentIsHistogramSum bool
|
||||
}
|
||||
|
||||
// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
|
||||
// format and creates MetricFamily proto messages. It returns the MetricFamily
|
||||
// proto messages in a map where the metric names are the keys, along with any
|
||||
// error encountered.
|
||||
//
|
||||
// If the input contains duplicate metrics (i.e. lines with the same metric name
|
||||
// and exactly the same label set), the resulting MetricFamily will contain
|
||||
// duplicate Metric proto messages. Similar is true for duplicate label
|
||||
// names. Checks for duplicates have to be performed separately, if required.
|
||||
// Also note that neither the metrics within each MetricFamily are sorted nor
|
||||
// the label pairs within each Metric. Sorting is not required for the most
|
||||
// frequent use of this method, which is sample ingestion in the Prometheus
|
||||
// server. However, for presentation purposes, you might want to sort the
|
||||
// metrics, and in some cases, you must sort the labels, e.g. for consumption by
|
||||
// the metric family injection hook of the Prometheus registry.
|
||||
//
|
||||
// Summaries and histograms are rather special beasts. You would probably not
|
||||
// use them in the simple text format anyway. This method can deal with
|
||||
// summaries and histograms if they are presented in exactly the way the
|
||||
// text.Create function creates them.
|
||||
//
|
||||
// This method must not be called concurrently. If you want to parse different
|
||||
// input concurrently, instantiate a separate Parser for each goroutine.
|
||||
func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
|
||||
p.reset(in)
|
||||
for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
|
||||
// Magic happens here...
|
||||
}
|
||||
// Get rid of empty metric families.
|
||||
for k, mf := range p.metricFamiliesByName {
|
||||
if len(mf.GetMetric()) == 0 {
|
||||
delete(p.metricFamiliesByName, k)
|
||||
}
|
||||
}
|
||||
return p.metricFamiliesByName, p.err
|
||||
}
|
||||
|
||||
func (p *TextParser) reset(in io.Reader) {
|
||||
p.metricFamiliesByName = map[string]*dto.MetricFamily{}
|
||||
if p.buf == nil {
|
||||
p.buf = bufio.NewReader(in)
|
||||
} else {
|
||||
p.buf.Reset(in)
|
||||
}
|
||||
p.err = nil
|
||||
p.lineCount = 0
|
||||
if p.summaries == nil || len(p.summaries) > 0 {
|
||||
p.summaries = map[uint64]*dto.Metric{}
|
||||
}
|
||||
if p.histograms == nil || len(p.histograms) > 0 {
|
||||
p.histograms = map[uint64]*dto.Metric{}
|
||||
}
|
||||
p.currentQuantile = math.NaN()
|
||||
p.currentBucket = math.NaN()
|
||||
}
|
||||
|
||||
// startOfLine represents the state where the next byte read from p.buf is the
|
||||
// start of a line (or whitespace leading up to it).
|
||||
func (p *TextParser) startOfLine() stateFn {
|
||||
p.lineCount++
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
// End of input reached. This is the only case where
|
||||
// that is not an error but a signal that we are done.
|
||||
p.err = nil
|
||||
return nil
|
||||
}
|
||||
switch p.currentByte {
|
||||
case '#':
|
||||
return p.startComment
|
||||
case '\n':
|
||||
return p.startOfLine // Empty line, start the next one.
|
||||
}
|
||||
return p.readingMetricName
|
||||
}
|
||||
|
||||
// startComment represents the state where the next byte read from p.buf is the
|
||||
// start of a comment (or whitespace leading up to it).
|
||||
func (p *TextParser) startComment() stateFn {
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte == '\n' {
|
||||
return p.startOfLine
|
||||
}
|
||||
if p.readTokenUntilWhitespace(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
// If we have hit the end of line already, there is nothing left
|
||||
// to do. This is not considered a syntax error.
|
||||
if p.currentByte == '\n' {
|
||||
return p.startOfLine
|
||||
}
|
||||
keyword := p.currentToken.String()
|
||||
if keyword != "HELP" && keyword != "TYPE" {
|
||||
// Generic comment, ignore by fast forwarding to end of line.
|
||||
for p.currentByte != '\n' {
|
||||
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
}
|
||||
return p.startOfLine
|
||||
}
|
||||
// There is something. Next has to be a metric name.
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.readTokenAsMetricName(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte == '\n' {
|
||||
// At the end of the line already.
|
||||
// Again, this is not considered a syntax error.
|
||||
return p.startOfLine
|
||||
}
|
||||
if !isBlankOrTab(p.currentByte) {
|
||||
p.parseError("invalid metric name in comment")
|
||||
return nil
|
||||
}
|
||||
p.setOrCreateCurrentMF()
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte == '\n' {
|
||||
// At the end of the line already.
|
||||
// Again, this is not considered a syntax error.
|
||||
return p.startOfLine
|
||||
}
|
||||
switch keyword {
|
||||
case "HELP":
|
||||
return p.readingHelp
|
||||
case "TYPE":
|
||||
return p.readingType
|
||||
}
|
||||
panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
|
||||
}
|
||||
|
||||
// readingMetricName represents the state where the last byte read (now in
|
||||
// p.currentByte) is the first byte of a metric name.
|
||||
func (p *TextParser) readingMetricName() stateFn {
|
||||
if p.readTokenAsMetricName(); p.err != nil {
|
||||
return nil
|
||||
}
|
||||
if p.currentToken.Len() == 0 {
|
||||
p.parseError("invalid metric name")
|
||||
return nil
|
||||
}
|
||||
p.setOrCreateCurrentMF()
|
||||
// Now is the time to fix the type if it hasn't happened yet.
|
||||
if p.currentMF.Type == nil {
|
||||
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
|
||||
}
|
||||
p.currentMetric = &dto.Metric{}
|
||||
// Do not append the newly created currentMetric to
|
||||
// currentMF.Metric right now. First wait if this is a summary,
|
||||
// and the metric exists already, which we can only know after
|
||||
// having read all the labels.
|
||||
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
return p.readingLabels
|
||||
}
|
||||
|
||||
// readingLabels represents the state where the last byte read (now in
|
||||
// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
|
||||
// first byte of the value (otherwise).
|
||||
func (p *TextParser) readingLabels() stateFn {
|
||||
// Summaries/histograms are special. We have to reset the
|
||||
// currentLabels map, currentQuantile and currentBucket before starting to
|
||||
// read labels.
|
||||
if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
|
||||
p.currentLabels = map[string]string{}
|
||||
p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
|
||||
p.currentQuantile = math.NaN()
|
||||
p.currentBucket = math.NaN()
|
||||
}
|
||||
if p.currentByte != '{' {
|
||||
return p.readingValue
|
||||
}
|
||||
return p.startLabelName
|
||||
}
|
||||
|
||||
// startLabelName represents the state where the next byte read from p.buf is
|
||||
// the start of a label name (or whitespace leading up to it).
|
||||
func (p *TextParser) startLabelName() stateFn {
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte == '}' {
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
return p.readingValue
|
||||
}
|
||||
if p.readTokenAsLabelName(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentToken.Len() == 0 {
|
||||
p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
|
||||
return nil
|
||||
}
|
||||
p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
|
||||
if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
|
||||
p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
|
||||
return nil
|
||||
}
|
||||
// Special summary/histogram treatment. Don't add 'quantile' and 'le'
|
||||
// labels to 'real' labels.
|
||||
if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
|
||||
!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
|
||||
p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
|
||||
}
|
||||
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte != '=' {
|
||||
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
|
||||
return nil
|
||||
}
|
||||
return p.startLabelValue
|
||||
}
|
||||
|
||||
// startLabelValue represents the state where the next byte read from p.buf is
|
||||
// the start of a (quoted) label value (or whitespace leading up to it).
|
||||
func (p *TextParser) startLabelValue() stateFn {
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentByte != '"' {
|
||||
p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
|
||||
return nil
|
||||
}
|
||||
if p.readTokenAsLabelValue(); p.err != nil {
|
||||
return nil
|
||||
}
|
||||
p.currentLabelPair.Value = proto.String(p.currentToken.String())
|
||||
// Special treatment of summaries:
|
||||
// - Quantile labels are special, will result in dto.Quantile later.
|
||||
// - Other labels have to be added to currentLabels for signature calculation.
|
||||
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
|
||||
if p.currentLabelPair.GetName() == model.QuantileLabel {
|
||||
if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
|
||||
// Create a more helpful error message.
|
||||
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
|
||||
}
|
||||
}
|
||||
// Similar special treatment of histograms.
|
||||
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
|
||||
if p.currentLabelPair.GetName() == model.BucketLabel {
|
||||
if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {
|
||||
// Create a more helpful error message.
|
||||
p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
|
||||
}
|
||||
}
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
switch p.currentByte {
|
||||
case ',':
|
||||
return p.startLabelName
|
||||
|
||||
case '}':
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
return p.readingValue
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// readingValue represents the state where the last byte read (now in
|
||||
// p.currentByte) is the first byte of the sample value (i.e. a float).
|
||||
func (p *TextParser) readingValue() stateFn {
|
||||
// When we are here, we have read all the labels, so for the
|
||||
// special case of a summary/histogram, we can finally find out
|
||||
// if the metric already exists.
|
||||
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
|
||||
signature := model.LabelsToSignature(p.currentLabels)
|
||||
if summary := p.summaries[signature]; summary != nil {
|
||||
p.currentMetric = summary
|
||||
} else {
|
||||
p.summaries[signature] = p.currentMetric
|
||||
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
|
||||
}
|
||||
} else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
|
||||
signature := model.LabelsToSignature(p.currentLabels)
|
||||
if histogram := p.histograms[signature]; histogram != nil {
|
||||
p.currentMetric = histogram
|
||||
} else {
|
||||
p.histograms[signature] = p.currentMetric
|
||||
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
|
||||
}
|
||||
} else {
|
||||
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
|
||||
}
|
||||
if p.readTokenUntilWhitespace(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
value, err := strconv.ParseFloat(p.currentToken.String(), 64)
|
||||
if err != nil {
|
||||
// Create a more helpful error message.
|
||||
p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
|
||||
return nil
|
||||
}
|
||||
switch p.currentMF.GetType() {
|
||||
case dto.MetricType_COUNTER:
|
||||
p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
|
||||
case dto.MetricType_GAUGE:
|
||||
p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
|
||||
case dto.MetricType_UNTYPED:
|
||||
p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
|
||||
case dto.MetricType_SUMMARY:
|
||||
// *sigh*
|
||||
if p.currentMetric.Summary == nil {
|
||||
p.currentMetric.Summary = &dto.Summary{}
|
||||
}
|
||||
switch {
|
||||
case p.currentIsSummaryCount:
|
||||
p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
|
||||
case p.currentIsSummarySum:
|
||||
p.currentMetric.Summary.SampleSum = proto.Float64(value)
|
||||
case !math.IsNaN(p.currentQuantile):
|
||||
p.currentMetric.Summary.Quantile = append(
|
||||
p.currentMetric.Summary.Quantile,
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(p.currentQuantile),
|
||||
Value: proto.Float64(value),
|
||||
},
|
||||
)
|
||||
}
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
// *sigh*
|
||||
if p.currentMetric.Histogram == nil {
|
||||
p.currentMetric.Histogram = &dto.Histogram{}
|
||||
}
|
||||
switch {
|
||||
case p.currentIsHistogramCount:
|
||||
p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
|
||||
case p.currentIsHistogramSum:
|
||||
p.currentMetric.Histogram.SampleSum = proto.Float64(value)
|
||||
case !math.IsNaN(p.currentBucket):
|
||||
p.currentMetric.Histogram.Bucket = append(
|
||||
p.currentMetric.Histogram.Bucket,
|
||||
&dto.Bucket{
|
||||
UpperBound: proto.Float64(p.currentBucket),
|
||||
CumulativeCount: proto.Uint64(uint64(value)),
|
||||
},
|
||||
)
|
||||
}
|
||||
default:
|
||||
p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
|
||||
}
|
||||
if p.currentByte == '\n' {
|
||||
return p.startOfLine
|
||||
}
|
||||
return p.startTimestamp
|
||||
}
|
||||
|
||||
// startTimestamp represents the state where the next byte read from p.buf is
|
||||
// the start of the timestamp (or whitespace leading up to it).
|
||||
func (p *TextParser) startTimestamp() stateFn {
|
||||
if p.skipBlankTab(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.readTokenUntilWhitespace(); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
|
||||
if err != nil {
|
||||
// Create a more helpful error message.
|
||||
p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
|
||||
return nil
|
||||
}
|
||||
p.currentMetric.TimestampMs = proto.Int64(timestamp)
|
||||
if p.readTokenUntilNewline(false); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
if p.currentToken.Len() > 0 {
|
||||
p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
|
||||
return nil
|
||||
}
|
||||
return p.startOfLine
|
||||
}
|
||||
|
||||
// readingHelp represents the state where the last byte read (now in
|
||||
// p.currentByte) is the first byte of the docstring after 'HELP'.
|
||||
func (p *TextParser) readingHelp() stateFn {
|
||||
if p.currentMF.Help != nil {
|
||||
p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
|
||||
return nil
|
||||
}
|
||||
// Rest of line is the docstring.
|
||||
if p.readTokenUntilNewline(true); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
p.currentMF.Help = proto.String(p.currentToken.String())
|
||||
return p.startOfLine
|
||||
}
|
||||
|
||||
// readingType represents the state where the last byte read (now in
|
||||
// p.currentByte) is the first byte of the type hint after 'HELP'.
|
||||
func (p *TextParser) readingType() stateFn {
|
||||
if p.currentMF.Type != nil {
|
||||
p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
|
||||
return nil
|
||||
}
|
||||
// Rest of line is the type.
|
||||
if p.readTokenUntilNewline(false); p.err != nil {
|
||||
return nil // Unexpected end of input.
|
||||
}
|
||||
metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
|
||||
if !ok {
|
||||
p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
|
||||
return nil
|
||||
}
|
||||
p.currentMF.Type = dto.MetricType(metricType).Enum()
|
||||
return p.startOfLine
|
||||
}
|
||||
|
||||
// parseError sets p.err to a ParseError at the current line with the given
|
||||
// message.
|
||||
func (p *TextParser) parseError(msg string) {
|
||||
p.err = ParseError{
|
||||
Line: p.lineCount,
|
||||
Msg: msg,
|
||||
}
|
||||
}
|
||||
|
||||
// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
|
||||
// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
|
||||
func (p *TextParser) skipBlankTab() {
|
||||
for {
|
||||
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
|
||||
// anything if p.currentByte is neither ' ' nor '\t'.
|
||||
func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
|
||||
if isBlankOrTab(p.currentByte) {
|
||||
p.skipBlankTab()
|
||||
}
|
||||
}
|
||||
|
||||
// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The
|
||||
// first byte considered is the byte already read (now in p.currentByte). The
|
||||
// first whitespace byte encountered is still copied into p.currentByte, but not
|
||||
// into p.currentToken.
|
||||
func (p *TextParser) readTokenUntilWhitespace() {
|
||||
p.currentToken.Reset()
|
||||
for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
p.currentByte, p.err = p.buf.ReadByte()
|
||||
}
|
||||
}
|
||||
|
||||
// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first
|
||||
// byte considered is the byte already read (now in p.currentByte). The first
|
||||
// newline byte encountered is still copied into p.currentByte, but not into
|
||||
// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
|
||||
// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
|
||||
// other escape sequences are invalid and cause an error.
|
||||
func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
|
||||
p.currentToken.Reset()
|
||||
escaped := false
|
||||
for p.err == nil {
|
||||
if recognizeEscapeSequence && escaped {
|
||||
switch p.currentByte {
|
||||
case '\\':
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
case 'n':
|
||||
p.currentToken.WriteByte('\n')
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||
return
|
||||
}
|
||||
escaped = false
|
||||
} else {
|
||||
switch p.currentByte {
|
||||
case '\n':
|
||||
return
|
||||
case '\\':
|
||||
escaped = true
|
||||
default:
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
}
|
||||
}
|
||||
p.currentByte, p.err = p.buf.ReadByte()
|
||||
}
|
||||
}
|
||||
|
||||
// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
|
||||
// The first byte considered is the byte already read (now in p.currentByte).
|
||||
// The first byte not part of a metric name is still copied into p.currentByte,
|
||||
// but not into p.currentToken.
|
||||
func (p *TextParser) readTokenAsMetricName() {
|
||||
p.currentToken.Reset()
|
||||
if !isValidMetricNameStart(p.currentByte) {
|
||||
return
|
||||
}
|
||||
for {
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
p.currentByte, p.err = p.buf.ReadByte()
|
||||
if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
|
||||
// The first byte considered is the byte already read (now in p.currentByte).
|
||||
// The first byte not part of a label name is still copied into p.currentByte,
|
||||
// but not into p.currentToken.
|
||||
func (p *TextParser) readTokenAsLabelName() {
|
||||
p.currentToken.Reset()
|
||||
if !isValidLabelNameStart(p.currentByte) {
|
||||
return
|
||||
}
|
||||
for {
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
p.currentByte, p.err = p.buf.ReadByte()
|
||||
if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
|
||||
// In contrast to the other 'readTokenAs...' functions, which start with the
|
||||
// last read byte in p.currentByte, this method ignores p.currentByte and starts
|
||||
// with reading a new byte from p.buf. The first byte not part of a label value
|
||||
// is still copied into p.currentByte, but not into p.currentToken.
|
||||
func (p *TextParser) readTokenAsLabelValue() {
|
||||
p.currentToken.Reset()
|
||||
escaped := false
|
||||
for {
|
||||
if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
|
||||
return
|
||||
}
|
||||
if escaped {
|
||||
switch p.currentByte {
|
||||
case '"', '\\':
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
case 'n':
|
||||
p.currentToken.WriteByte('\n')
|
||||
default:
|
||||
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
|
||||
return
|
||||
}
|
||||
escaped = false
|
||||
continue
|
||||
}
|
||||
switch p.currentByte {
|
||||
case '"':
|
||||
return
|
||||
case '\n':
|
||||
p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
|
||||
return
|
||||
case '\\':
|
||||
escaped = true
|
||||
default:
|
||||
p.currentToken.WriteByte(p.currentByte)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *TextParser) setOrCreateCurrentMF() {
|
||||
p.currentIsSummaryCount = false
|
||||
p.currentIsSummarySum = false
|
||||
p.currentIsHistogramCount = false
|
||||
p.currentIsHistogramSum = false
|
||||
name := p.currentToken.String()
|
||||
if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
|
||||
return
|
||||
}
|
||||
// Try out if this is a _sum or _count for a summary/histogram.
|
||||
summaryName := summaryMetricName(name)
|
||||
if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
|
||||
if p.currentMF.GetType() == dto.MetricType_SUMMARY {
|
||||
if isCount(name) {
|
||||
p.currentIsSummaryCount = true
|
||||
}
|
||||
if isSum(name) {
|
||||
p.currentIsSummarySum = true
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
histogramName := histogramMetricName(name)
|
||||
if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
|
||||
if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
|
||||
if isCount(name) {
|
||||
p.currentIsHistogramCount = true
|
||||
}
|
||||
if isSum(name) {
|
||||
p.currentIsHistogramSum = true
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
|
||||
p.metricFamiliesByName[name] = p.currentMF
|
||||
}
|
||||
|
||||
func isValidLabelNameStart(b byte) bool {
|
||||
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
|
||||
}
|
||||
|
||||
func isValidLabelNameContinuation(b byte) bool {
|
||||
return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
|
||||
}
|
||||
|
||||
func isValidMetricNameStart(b byte) bool {
|
||||
return isValidLabelNameStart(b) || b == ':'
|
||||
}
|
||||
|
||||
func isValidMetricNameContinuation(b byte) bool {
|
||||
return isValidLabelNameContinuation(b) || b == ':'
|
||||
}
|
||||
|
||||
func isBlankOrTab(b byte) bool {
|
||||
return b == ' ' || b == '\t'
|
||||
}
|
||||
|
||||
func isCount(name string) bool {
|
||||
return len(name) > 6 && name[len(name)-6:] == "_count"
|
||||
}
|
||||
|
||||
func isSum(name string) bool {
|
||||
return len(name) > 4 && name[len(name)-4:] == "_sum"
|
||||
}
|
||||
|
||||
func isBucket(name string) bool {
|
||||
return len(name) > 7 && name[len(name)-7:] == "_bucket"
|
||||
}
|
||||
|
||||
func summaryMetricName(name string) string {
|
||||
switch {
|
||||
case isCount(name):
|
||||
return name[:len(name)-6]
|
||||
case isSum(name):
|
||||
return name[:len(name)-4]
|
||||
default:
|
||||
return name
|
||||
}
|
||||
}
|
||||
|
||||
func histogramMetricName(name string) string {
|
||||
switch {
|
||||
case isCount(name):
|
||||
return name[:len(name)-6]
|
||||
case isSum(name):
|
||||
return name[:len(name)-4]
|
||||
case isBucket(name):
|
||||
return name[:len(name)-7]
|
||||
default:
|
||||
return name
|
||||
}
|
||||
}
|
||||
586
vendor/github.com/prometheus/common/expfmt/text_parse_test.go
generated
vendored
Normal file
586
vendor/github.com/prometheus/common/expfmt/text_parse_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,586 @@
|
|||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package expfmt
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
func testTextParse(t testing.TB) {
|
||||
var scenarios = []struct {
|
||||
in string
|
||||
out []*dto.MetricFamily
|
||||
}{
|
||||
// 0: Empty lines as input.
|
||||
{
|
||||
in: `
|
||||
|
||||
`,
|
||||
out: []*dto.MetricFamily{},
|
||||
},
|
||||
// 1: Minimal case.
|
||||
{
|
||||
in: `
|
||||
minimal_metric 1.234
|
||||
another_metric -3e3 103948
|
||||
# Even that:
|
||||
no_labels{} 3
|
||||
# HELP line for non-existing metric will be ignored.
|
||||
`,
|
||||
out: []*dto.MetricFamily{
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("minimal_metric"),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(1.234),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("another_metric"),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(-3e3),
|
||||
},
|
||||
TimestampMs: proto.Int64(103948),
|
||||
},
|
||||
},
|
||||
},
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("no_labels"),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(3),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// 2: Counters & gauges, docstrings, various whitespace, escape sequences.
|
||||
{
|
||||
in: `
|
||||
# A normal comment.
|
||||
#
|
||||
# TYPE name counter
|
||||
name{labelname="val1",basename="basevalue"} NaN
|
||||
name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890
|
||||
# HELP name two-line\n doc str\\ing
|
||||
|
||||
# HELP name2 doc str"ing 2
|
||||
# TYPE name2 gauge
|
||||
name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321
|
||||
name2{ labelname = "val1" , }-Inf
|
||||
`,
|
||||
out: []*dto.MetricFamily{
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("name"),
|
||||
Help: proto.String("two-line\n doc str\\ing"),
|
||||
Type: dto.MetricType_COUNTER.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("labelname"),
|
||||
Value: proto.String("val1"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("basename"),
|
||||
Value: proto.String("basevalue"),
|
||||
},
|
||||
},
|
||||
Counter: &dto.Counter{
|
||||
Value: proto.Float64(math.NaN()),
|
||||
},
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("labelname"),
|
||||
Value: proto.String("val2"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("basename"),
|
||||
Value: proto.String("base\"v\\al\nue"),
|
||||
},
|
||||
},
|
||||
Counter: &dto.Counter{
|
||||
Value: proto.Float64(.23),
|
||||
},
|
||||
TimestampMs: proto.Int64(1234567890),
|
||||
},
|
||||
},
|
||||
},
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("name2"),
|
||||
Help: proto.String("doc str\"ing 2"),
|
||||
Type: dto.MetricType_GAUGE.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("labelname"),
|
||||
Value: proto.String("val2"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("basename"),
|
||||
Value: proto.String("basevalue2"),
|
||||
},
|
||||
},
|
||||
Gauge: &dto.Gauge{
|
||||
Value: proto.Float64(math.Inf(+1)),
|
||||
},
|
||||
TimestampMs: proto.Int64(54321),
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("labelname"),
|
||||
Value: proto.String("val1"),
|
||||
},
|
||||
},
|
||||
Gauge: &dto.Gauge{
|
||||
Value: proto.Float64(math.Inf(-1)),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// 3: The evil summary, mixed with other types and funny comments.
|
||||
{
|
||||
in: `
|
||||
# TYPE my_summary summary
|
||||
my_summary{n1="val1",quantile="0.5"} 110
|
||||
decoy -1 -2
|
||||
my_summary{n1="val1",quantile="0.9"} 140 1
|
||||
my_summary_count{n1="val1"} 42
|
||||
# Latest timestamp wins in case of a summary.
|
||||
my_summary_sum{n1="val1"} 4711 2
|
||||
fake_sum{n1="val1"} 2001
|
||||
# TYPE another_summary summary
|
||||
another_summary_count{n2="val2",n1="val1"} 20
|
||||
my_summary_count{n2="val2",n1="val1"} 5 5
|
||||
another_summary{n1="val1",n2="val2",quantile=".3"} -1.2
|
||||
my_summary_sum{n1="val2"} 08 15
|
||||
my_summary{n1="val3", quantile="0.2"} 4711
|
||||
my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN
|
||||
# some
|
||||
# funny comments
|
||||
# HELP
|
||||
# HELP
|
||||
# HELP my_summary
|
||||
# HELP my_summary
|
||||
`,
|
||||
out: []*dto.MetricFamily{
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("fake_sum"),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("n1"),
|
||||
Value: proto.String("val1"),
|
||||
},
|
||||
},
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(2001),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("decoy"),
|
||||
Type: dto.MetricType_UNTYPED.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Untyped: &dto.Untyped{
|
||||
Value: proto.Float64(-1),
|
||||
},
|
||||
TimestampMs: proto.Int64(-2),
|
||||
},
|
||||
},
|
||||
},
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("my_summary"),
|
||||
Type: dto.MetricType_SUMMARY.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("n1"),
|
||||
Value: proto.String("val1"),
|
||||
},
|
||||
},
|
||||
Summary: &dto.Summary{
|
||||
SampleCount: proto.Uint64(42),
|
||||
SampleSum: proto.Float64(4711),
|
||||
Quantile: []*dto.Quantile{
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.5),
|
||||
Value: proto.Float64(110),
|
||||
},
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.9),
|
||||
Value: proto.Float64(140),
|
||||
},
|
||||
},
|
||||
},
|
||||
TimestampMs: proto.Int64(2),
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("n2"),
|
||||
Value: proto.String("val2"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("n1"),
|
||||
Value: proto.String("val1"),
|
||||
},
|
||||
},
|
||||
Summary: &dto.Summary{
|
||||
SampleCount: proto.Uint64(5),
|
||||
Quantile: []*dto.Quantile{
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(-12.34),
|
||||
Value: proto.Float64(math.NaN()),
|
||||
},
|
||||
},
|
||||
},
|
||||
TimestampMs: proto.Int64(5),
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("n1"),
|
||||
Value: proto.String("val2"),
|
||||
},
|
||||
},
|
||||
Summary: &dto.Summary{
|
||||
SampleSum: proto.Float64(8),
|
||||
},
|
||||
TimestampMs: proto.Int64(15),
|
||||
},
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("n1"),
|
||||
Value: proto.String("val3"),
|
||||
},
|
||||
},
|
||||
Summary: &dto.Summary{
|
||||
Quantile: []*dto.Quantile{
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.2),
|
||||
Value: proto.Float64(4711),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&dto.MetricFamily{
|
||||
Name: proto.String("another_summary"),
|
||||
Type: dto.MetricType_SUMMARY.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Label: []*dto.LabelPair{
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("n2"),
|
||||
Value: proto.String("val2"),
|
||||
},
|
||||
&dto.LabelPair{
|
||||
Name: proto.String("n1"),
|
||||
Value: proto.String("val1"),
|
||||
},
|
||||
},
|
||||
Summary: &dto.Summary{
|
||||
SampleCount: proto.Uint64(20),
|
||||
Quantile: []*dto.Quantile{
|
||||
&dto.Quantile{
|
||||
Quantile: proto.Float64(0.3),
|
||||
Value: proto.Float64(-1.2),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// 4: The histogram.
|
||||
{
|
||||
in: `
|
||||
# HELP request_duration_microseconds The response latency.
|
||||
# TYPE request_duration_microseconds histogram
|
||||
request_duration_microseconds_bucket{le="100"} 123
|
||||
request_duration_microseconds_bucket{le="120"} 412
|
||||
request_duration_microseconds_bucket{le="144"} 592
|
||||
request_duration_microseconds_bucket{le="172.8"} 1524
|
||||
request_duration_microseconds_bucket{le="+Inf"} 2693
|
||||
request_duration_microseconds_sum 1.7560473e+06
|
||||
request_duration_microseconds_count 2693
|
||||
`,
|
||||
out: []*dto.MetricFamily{
|
||||
{
|
||||
Name: proto.String("request_duration_microseconds"),
|
||||
Help: proto.String("The response latency."),
|
||||
Type: dto.MetricType_HISTOGRAM.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
&dto.Metric{
|
||||
Histogram: &dto.Histogram{
|
||||
SampleCount: proto.Uint64(2693),
|
||||
SampleSum: proto.Float64(1756047.3),
|
||||
Bucket: []*dto.Bucket{
|
||||
&dto.Bucket{
|
||||
UpperBound: proto.Float64(100),
|
||||
CumulativeCount: proto.Uint64(123),
|
||||
},
|
||||
&dto.Bucket{
|
||||
UpperBound: proto.Float64(120),
|
||||
CumulativeCount: proto.Uint64(412),
|
||||
},
|
||||
&dto.Bucket{
|
||||
UpperBound: proto.Float64(144),
|
||||
CumulativeCount: proto.Uint64(592),
|
||||
},
|
||||
&dto.Bucket{
|
||||
UpperBound: proto.Float64(172.8),
|
||||
CumulativeCount: proto.Uint64(1524),
|
||||
},
|
||||
&dto.Bucket{
|
||||
UpperBound: proto.Float64(math.Inf(+1)),
|
||||
CumulativeCount: proto.Uint64(2693),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
out, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in))
|
||||
if err != nil {
|
||||
t.Errorf("%d. error: %s", i, err)
|
||||
continue
|
||||
}
|
||||
if expected, got := len(scenario.out), len(out); expected != got {
|
||||
t.Errorf(
|
||||
"%d. expected %d MetricFamilies, got %d",
|
||||
i, expected, got,
|
||||
)
|
||||
}
|
||||
for _, expected := range scenario.out {
|
||||
got, ok := out[expected.GetName()]
|
||||
if !ok {
|
||||
t.Errorf(
|
||||
"%d. expected MetricFamily %q, found none",
|
||||
i, expected.GetName(),
|
||||
)
|
||||
continue
|
||||
}
|
||||
if expected.String() != got.String() {
|
||||
t.Errorf(
|
||||
"%d. expected MetricFamily %s, got %s",
|
||||
i, expected, got,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTextParse(t *testing.T) {
|
||||
testTextParse(t)
|
||||
}
|
||||
|
||||
func BenchmarkTextParse(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testTextParse(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testTextParseError(t testing.TB) {
|
||||
var scenarios = []struct {
|
||||
in string
|
||||
err string
|
||||
}{
|
||||
// 0: No new-line at end of input.
|
||||
{
|
||||
in: `bla 3.14`,
|
||||
err: "EOF",
|
||||
},
|
||||
// 1: Invalid escape sequence in label value.
|
||||
{
|
||||
in: `metric{label="\t"} 3.14`,
|
||||
err: "text format parsing error in line 1: invalid escape sequence",
|
||||
},
|
||||
// 2: Newline in label value.
|
||||
{
|
||||
in: `
|
||||
metric{label="new
|
||||
line"} 3.14
|
||||
`,
|
||||
err: `text format parsing error in line 2: label value "new" contains unescaped new-line`,
|
||||
},
|
||||
// 3:
|
||||
{
|
||||
in: `metric{@="bla"} 3.14`,
|
||||
err: "text format parsing error in line 1: invalid label name for metric",
|
||||
},
|
||||
// 4:
|
||||
{
|
||||
in: `metric{__name__="bla"} 3.14`,
|
||||
err: `text format parsing error in line 1: label name "__name__" is reserved`,
|
||||
},
|
||||
// 5:
|
||||
{
|
||||
in: `metric{label+="bla"} 3.14`,
|
||||
err: "text format parsing error in line 1: expected '=' after label name",
|
||||
},
|
||||
// 6:
|
||||
{
|
||||
in: `metric{label=bla} 3.14`,
|
||||
err: "text format parsing error in line 1: expected '\"' at start of label value",
|
||||
},
|
||||
// 7:
|
||||
{
|
||||
in: `
|
||||
# TYPE metric summary
|
||||
metric{quantile="bla"} 3.14
|
||||
`,
|
||||
err: "text format parsing error in line 3: expected float as value for 'quantile' label",
|
||||
},
|
||||
// 8:
|
||||
{
|
||||
in: `metric{label="bla"+} 3.14`,
|
||||
err: "text format parsing error in line 1: unexpected end of label value",
|
||||
},
|
||||
// 9:
|
||||
{
|
||||
in: `metric{label="bla"} 3.14 2.72
|
||||
`,
|
||||
err: "text format parsing error in line 1: expected integer as timestamp",
|
||||
},
|
||||
// 10:
|
||||
{
|
||||
in: `metric{label="bla"} 3.14 2 3
|
||||
`,
|
||||
err: "text format parsing error in line 1: spurious string after timestamp",
|
||||
},
|
||||
// 11:
|
||||
{
|
||||
in: `metric{label="bla"} blubb
|
||||
`,
|
||||
err: "text format parsing error in line 1: expected float as value",
|
||||
},
|
||||
// 12:
|
||||
{
|
||||
in: `
|
||||
# HELP metric one
|
||||
# HELP metric two
|
||||
`,
|
||||
err: "text format parsing error in line 3: second HELP line for metric name",
|
||||
},
|
||||
// 13:
|
||||
{
|
||||
in: `
|
||||
# TYPE metric counter
|
||||
# TYPE metric untyped
|
||||
`,
|
||||
err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`,
|
||||
},
|
||||
// 14:
|
||||
{
|
||||
in: `
|
||||
metric 4.12
|
||||
# TYPE metric counter
|
||||
`,
|
||||
err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`,
|
||||
},
|
||||
// 14:
|
||||
{
|
||||
in: `
|
||||
# TYPE metric bla
|
||||
`,
|
||||
err: "text format parsing error in line 2: unknown metric type",
|
||||
},
|
||||
// 15:
|
||||
{
|
||||
in: `
|
||||
# TYPE met-ric
|
||||
`,
|
||||
err: "text format parsing error in line 2: invalid metric name in comment",
|
||||
},
|
||||
// 16:
|
||||
{
|
||||
in: `@invalidmetric{label="bla"} 3.14 2`,
|
||||
err: "text format parsing error in line 1: invalid metric name",
|
||||
},
|
||||
// 17:
|
||||
{
|
||||
in: `{label="bla"} 3.14 2`,
|
||||
err: "text format parsing error in line 1: invalid metric name",
|
||||
},
|
||||
// 18:
|
||||
{
|
||||
in: `
|
||||
# TYPE metric histogram
|
||||
metric_bucket{le="bla"} 3.14
|
||||
`,
|
||||
err: "text format parsing error in line 3: expected float as value for 'le' label",
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
_, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in))
|
||||
if err == nil {
|
||||
t.Errorf("%d. expected error, got nil", i)
|
||||
continue
|
||||
}
|
||||
if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 {
|
||||
t.Errorf(
|
||||
"%d. expected error starting with %q, got %q",
|
||||
i, expected, got,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestTextParseError(t *testing.T) {
|
||||
testTextParseError(t)
|
||||
}
|
||||
|
||||
func BenchmarkParseError(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testTextParseError(b)
|
||||
}
|
||||
}
|
||||
253
vendor/github.com/prometheus/common/log/log.go
generated
vendored
Normal file
253
vendor/github.com/prometheus/common/log/log.go
generated
vendored
Normal file
|
|
@ -0,0 +1,253 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
type levelFlag struct{}
|
||||
|
||||
// String implements flag.Value.
|
||||
func (f levelFlag) String() string {
|
||||
return origLogger.Level.String()
|
||||
}
|
||||
|
||||
// Set implements flag.Value.
|
||||
func (f levelFlag) Set(level string) error {
|
||||
l, err := logrus.ParseLevel(level)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
origLogger.Level = l
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
// In order for this flag to take effect, the user of the package must call
|
||||
// flag.Parse() before logging anything.
|
||||
flag.Var(levelFlag{}, "log.level", "Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal].")
|
||||
}
|
||||
|
||||
type Logger interface {
|
||||
Debug(...interface{})
|
||||
Debugln(...interface{})
|
||||
Debugf(string, ...interface{})
|
||||
|
||||
Info(...interface{})
|
||||
Infoln(...interface{})
|
||||
Infof(string, ...interface{})
|
||||
|
||||
Warn(...interface{})
|
||||
Warnln(...interface{})
|
||||
Warnf(string, ...interface{})
|
||||
|
||||
Error(...interface{})
|
||||
Errorln(...interface{})
|
||||
Errorf(string, ...interface{})
|
||||
|
||||
Fatal(...interface{})
|
||||
Fatalln(...interface{})
|
||||
Fatalf(string, ...interface{})
|
||||
|
||||
With(key string, value interface{}) Logger
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
entry *logrus.Entry
|
||||
}
|
||||
|
||||
func (l logger) With(key string, value interface{}) Logger {
|
||||
return logger{l.entry.WithField(key, value)}
|
||||
}
|
||||
|
||||
// Debug logs a message at level Debug on the standard logger.
|
||||
func (l logger) Debug(args ...interface{}) {
|
||||
l.sourced().Debug(args...)
|
||||
}
|
||||
|
||||
// Debug logs a message at level Debug on the standard logger.
|
||||
func (l logger) Debugln(args ...interface{}) {
|
||||
l.sourced().Debugln(args...)
|
||||
}
|
||||
|
||||
// Debugf logs a message at level Debug on the standard logger.
|
||||
func (l logger) Debugf(format string, args ...interface{}) {
|
||||
l.sourced().Debugf(format, args...)
|
||||
}
|
||||
|
||||
// Info logs a message at level Info on the standard logger.
|
||||
func (l logger) Info(args ...interface{}) {
|
||||
l.sourced().Info(args...)
|
||||
}
|
||||
|
||||
// Info logs a message at level Info on the standard logger.
|
||||
func (l logger) Infoln(args ...interface{}) {
|
||||
l.sourced().Infoln(args...)
|
||||
}
|
||||
|
||||
// Infof logs a message at level Info on the standard logger.
|
||||
func (l logger) Infof(format string, args ...interface{}) {
|
||||
l.sourced().Infof(format, args...)
|
||||
}
|
||||
|
||||
// Warn logs a message at level Warn on the standard logger.
|
||||
func (l logger) Warn(args ...interface{}) {
|
||||
l.sourced().Warn(args...)
|
||||
}
|
||||
|
||||
// Warn logs a message at level Warn on the standard logger.
|
||||
func (l logger) Warnln(args ...interface{}) {
|
||||
l.sourced().Warnln(args...)
|
||||
}
|
||||
|
||||
// Warnf logs a message at level Warn on the standard logger.
|
||||
func (l logger) Warnf(format string, args ...interface{}) {
|
||||
l.sourced().Warnf(format, args...)
|
||||
}
|
||||
|
||||
// Error logs a message at level Error on the standard logger.
|
||||
func (l logger) Error(args ...interface{}) {
|
||||
l.sourced().Error(args...)
|
||||
}
|
||||
|
||||
// Error logs a message at level Error on the standard logger.
|
||||
func (l logger) Errorln(args ...interface{}) {
|
||||
l.sourced().Errorln(args...)
|
||||
}
|
||||
|
||||
// Errorf logs a message at level Error on the standard logger.
|
||||
func (l logger) Errorf(format string, args ...interface{}) {
|
||||
l.sourced().Errorf(format, args...)
|
||||
}
|
||||
|
||||
// Fatal logs a message at level Fatal on the standard logger.
|
||||
func (l logger) Fatal(args ...interface{}) {
|
||||
l.sourced().Fatal(args...)
|
||||
}
|
||||
|
||||
// Fatal logs a message at level Fatal on the standard logger.
|
||||
func (l logger) Fatalln(args ...interface{}) {
|
||||
l.sourced().Fatalln(args...)
|
||||
}
|
||||
|
||||
// Fatalf logs a message at level Fatal on the standard logger.
|
||||
func (l logger) Fatalf(format string, args ...interface{}) {
|
||||
l.sourced().Fatalf(format, args...)
|
||||
}
|
||||
|
||||
// sourced adds a source field to the logger that contains
|
||||
// the file name and line where the logging happened.
|
||||
func (l logger) sourced() *logrus.Entry {
|
||||
_, file, line, ok := runtime.Caller(2)
|
||||
if !ok {
|
||||
file = "<???>"
|
||||
line = 1
|
||||
} else {
|
||||
slash := strings.LastIndex(file, "/")
|
||||
file = file[slash+1:]
|
||||
}
|
||||
return l.entry.WithField("source", fmt.Sprintf("%s:%d", file, line))
|
||||
}
|
||||
|
||||
var origLogger = logrus.New()
|
||||
var baseLogger = logger{entry: logrus.NewEntry(origLogger)}
|
||||
|
||||
func Base() Logger {
|
||||
return baseLogger
|
||||
}
|
||||
|
||||
func With(key string, value interface{}) Logger {
|
||||
return baseLogger.With(key, value)
|
||||
}
|
||||
|
||||
// Debug logs a message at level Debug on the standard logger.
|
||||
func Debug(args ...interface{}) {
|
||||
baseLogger.sourced().Debug(args...)
|
||||
}
|
||||
|
||||
// Debug logs a message at level Debug on the standard logger.
|
||||
func Debugln(args ...interface{}) {
|
||||
baseLogger.sourced().Debugln(args...)
|
||||
}
|
||||
|
||||
// Debugf logs a message at level Debug on the standard logger.
|
||||
func Debugf(format string, args ...interface{}) {
|
||||
baseLogger.sourced().Debugf(format, args...)
|
||||
}
|
||||
|
||||
// Info logs a message at level Info on the standard logger.
|
||||
func Info(args ...interface{}) {
|
||||
baseLogger.sourced().Info(args...)
|
||||
}
|
||||
|
||||
// Info logs a message at level Info on the standard logger.
|
||||
func Infoln(args ...interface{}) {
|
||||
baseLogger.sourced().Infoln(args...)
|
||||
}
|
||||
|
||||
// Infof logs a message at level Info on the standard logger.
|
||||
func Infof(format string, args ...interface{}) {
|
||||
baseLogger.sourced().Infof(format, args...)
|
||||
}
|
||||
|
||||
// Warn logs a message at level Warn on the standard logger.
|
||||
func Warn(args ...interface{}) {
|
||||
baseLogger.sourced().Warn(args...)
|
||||
}
|
||||
|
||||
// Warn logs a message at level Warn on the standard logger.
|
||||
func Warnln(args ...interface{}) {
|
||||
baseLogger.sourced().Warnln(args...)
|
||||
}
|
||||
|
||||
// Warnf logs a message at level Warn on the standard logger.
|
||||
func Warnf(format string, args ...interface{}) {
|
||||
baseLogger.sourced().Warnf(format, args...)
|
||||
}
|
||||
|
||||
// Error logs a message at level Error on the standard logger.
|
||||
func Error(args ...interface{}) {
|
||||
baseLogger.sourced().Error(args...)
|
||||
}
|
||||
|
||||
// Error logs a message at level Error on the standard logger.
|
||||
func Errorln(args ...interface{}) {
|
||||
baseLogger.sourced().Errorln(args...)
|
||||
}
|
||||
|
||||
// Errorf logs a message at level Error on the standard logger.
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
baseLogger.sourced().Errorf(format, args...)
|
||||
}
|
||||
|
||||
// Fatal logs a message at level Fatal on the standard logger.
|
||||
func Fatal(args ...interface{}) {
|
||||
baseLogger.sourced().Fatal(args...)
|
||||
}
|
||||
|
||||
// Fatal logs a message at level Fatal on the standard logger.
|
||||
func Fatalln(args ...interface{}) {
|
||||
baseLogger.sourced().Fatalln(args...)
|
||||
}
|
||||
|
||||
// Fatalf logs a message at level Fatal on the standard logger.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
baseLogger.sourced().Fatalf(format, args...)
|
||||
}
|
||||
39
vendor/github.com/prometheus/common/log/log_test.go
generated
vendored
Normal file
39
vendor/github.com/prometheus/common/log/log_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func TestFileLineLogging(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
origLogger.Out = &buf
|
||||
origLogger.Formatter = &logrus.TextFormatter{
|
||||
DisableColors: true,
|
||||
}
|
||||
|
||||
// The default logging level should be "info".
|
||||
Debug("This debug-level line should not show up in the output.")
|
||||
Infof("This %s-level line should show up in the output.", "info")
|
||||
|
||||
re := `^time=".*" level=info msg="This info-level line should show up in the output." source="log_test.go:33" \n$`
|
||||
if !regexp.MustCompile(re).Match(buf.Bytes()) {
|
||||
t.Fatalf("%q did not match expected regex %q", buf.String(), re)
|
||||
}
|
||||
}
|
||||
109
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
Normal file
109
vendor/github.com/prometheus/common/model/alert.go
generated
vendored
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type AlertStatus string
|
||||
|
||||
const (
|
||||
AlertFiring AlertStatus = "firing"
|
||||
AlertResolved AlertStatus = "resolved"
|
||||
)
|
||||
|
||||
// Alert is a generic representation of an alert in the Prometheus eco-system.
|
||||
type Alert struct {
|
||||
// Label value pairs for purpose of aggregation, matching, and disposition
|
||||
// dispatching. This must minimally include an "alertname" label.
|
||||
Labels LabelSet `json:"labels"`
|
||||
|
||||
// Extra key/value information which does not define alert identity.
|
||||
Annotations LabelSet `json:"annotations"`
|
||||
|
||||
// The known time range for this alert. Both ends are optional.
|
||||
StartsAt time.Time `json:"startsAt,omitempty"`
|
||||
EndsAt time.Time `json:"endsAt,omitempty"`
|
||||
}
|
||||
|
||||
// Name returns the name of the alert. It is equivalent to the "alertname" label.
|
||||
func (a *Alert) Name() string {
|
||||
return string(a.Labels[AlertNameLabel])
|
||||
}
|
||||
|
||||
// Fingerprint returns a unique hash for the alert. It is equivalent to
|
||||
// the fingerprint of the alert's label set.
|
||||
func (a *Alert) Fingerprint() Fingerprint {
|
||||
return a.Labels.Fingerprint()
|
||||
}
|
||||
|
||||
func (a *Alert) String() string {
|
||||
s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
|
||||
if a.Resolved() {
|
||||
return s + "[resolved]"
|
||||
}
|
||||
return s + "[active]"
|
||||
}
|
||||
|
||||
// Resolved returns true iff the activity interval ended in the past.
|
||||
func (a *Alert) Resolved() bool {
|
||||
if a.EndsAt.IsZero() {
|
||||
return false
|
||||
}
|
||||
return !a.EndsAt.After(time.Now())
|
||||
}
|
||||
|
||||
// Status returns the status of the alert.
|
||||
func (a *Alert) Status() AlertStatus {
|
||||
if a.Resolved() {
|
||||
return AlertResolved
|
||||
}
|
||||
return AlertFiring
|
||||
}
|
||||
|
||||
// Alert is a list of alerts that can be sorted in chronological order.
|
||||
type Alerts []*Alert
|
||||
|
||||
func (as Alerts) Len() int { return len(as) }
|
||||
func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
|
||||
|
||||
func (as Alerts) Less(i, j int) bool {
|
||||
if as[i].StartsAt.Before(as[j].StartsAt) {
|
||||
return true
|
||||
}
|
||||
if as[i].EndsAt.Before(as[j].EndsAt) {
|
||||
return true
|
||||
}
|
||||
return as[i].Fingerprint() < as[j].Fingerprint()
|
||||
}
|
||||
|
||||
// HasFiring returns true iff one of the alerts is not resolved.
|
||||
func (as Alerts) HasFiring() bool {
|
||||
for _, a := range as {
|
||||
if !a.Resolved() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Status returns StatusFiring iff at least one of the alerts is firing.
|
||||
func (as Alerts) Status() AlertStatus {
|
||||
if as.HasFiring() {
|
||||
return AlertFiring
|
||||
}
|
||||
return AlertResolved
|
||||
}
|
||||
105
vendor/github.com/prometheus/common/model/fingerprinting.go
generated
vendored
Normal file
105
vendor/github.com/prometheus/common/model/fingerprinting.go
generated
vendored
Normal file
|
|
@ -0,0 +1,105 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Fingerprint provides a hash-capable representation of a Metric.
|
||||
// For our purposes, FNV-1A 64-bit is used.
|
||||
type Fingerprint uint64
|
||||
|
||||
// FingerprintFromString transforms a string representation into a Fingerprint.
|
||||
func FingerprintFromString(s string) (Fingerprint, error) {
|
||||
num, err := strconv.ParseUint(s, 16, 64)
|
||||
return Fingerprint(num), err
|
||||
}
|
||||
|
||||
// ParseFingerprint parses the input string into a fingerprint.
|
||||
func ParseFingerprint(s string) (Fingerprint, error) {
|
||||
num, err := strconv.ParseUint(s, 16, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return Fingerprint(num), nil
|
||||
}
|
||||
|
||||
func (f Fingerprint) String() string {
|
||||
return fmt.Sprintf("%016x", uint64(f))
|
||||
}
|
||||
|
||||
// Fingerprints represents a collection of Fingerprint subject to a given
|
||||
// natural sorting scheme. It implements sort.Interface.
|
||||
type Fingerprints []Fingerprint
|
||||
|
||||
// Len implements sort.Interface.
|
||||
func (f Fingerprints) Len() int {
|
||||
return len(f)
|
||||
}
|
||||
|
||||
// Less implements sort.Interface.
|
||||
func (f Fingerprints) Less(i, j int) bool {
|
||||
return f[i] < f[j]
|
||||
}
|
||||
|
||||
// Swap implements sort.Interface.
|
||||
func (f Fingerprints) Swap(i, j int) {
|
||||
f[i], f[j] = f[j], f[i]
|
||||
}
|
||||
|
||||
// FingerprintSet is a set of Fingerprints.
|
||||
type FingerprintSet map[Fingerprint]struct{}
|
||||
|
||||
// Equal returns true if both sets contain the same elements (and not more).
|
||||
func (s FingerprintSet) Equal(o FingerprintSet) bool {
|
||||
if len(s) != len(o) {
|
||||
return false
|
||||
}
|
||||
|
||||
for k := range s {
|
||||
if _, ok := o[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Intersection returns the elements contained in both sets.
|
||||
func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
|
||||
myLength, otherLength := len(s), len(o)
|
||||
if myLength == 0 || otherLength == 0 {
|
||||
return FingerprintSet{}
|
||||
}
|
||||
|
||||
subSet := s
|
||||
superSet := o
|
||||
|
||||
if otherLength < myLength {
|
||||
subSet = o
|
||||
superSet = s
|
||||
}
|
||||
|
||||
out := FingerprintSet{}
|
||||
|
||||
for k := range subSet {
|
||||
if _, ok := superSet[k]; ok {
|
||||
out[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
188
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
Normal file
188
vendor/github.com/prometheus/common/model/labels.go
generated
vendored
Normal file
|
|
@ -0,0 +1,188 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// AlertNameLabel is the name of the label containing the an alert's name.
|
||||
AlertNameLabel = "alertname"
|
||||
|
||||
// ExportedLabelPrefix is the prefix to prepend to the label names present in
|
||||
// exported metrics if a label of the same name is added by the server.
|
||||
ExportedLabelPrefix = "exported_"
|
||||
|
||||
// MetricNameLabel is the label name indicating the metric name of a
|
||||
// timeseries.
|
||||
MetricNameLabel = "__name__"
|
||||
|
||||
// SchemeLabel is the name of the label that holds the scheme on which to
|
||||
// scrape a target.
|
||||
SchemeLabel = "__scheme__"
|
||||
|
||||
// AddressLabel is the name of the label that holds the address of
|
||||
// a scrape target.
|
||||
AddressLabel = "__address__"
|
||||
|
||||
// MetricsPathLabel is the name of the label that holds the path on which to
|
||||
// scrape a target.
|
||||
MetricsPathLabel = "__metrics_path__"
|
||||
|
||||
// ReservedLabelPrefix is a prefix which is not legal in user-supplied
|
||||
// label names.
|
||||
ReservedLabelPrefix = "__"
|
||||
|
||||
// MetaLabelPrefix is a prefix for labels that provide meta information.
|
||||
// Labels with this prefix are used for intermediate label processing and
|
||||
// will not be attached to time series.
|
||||
MetaLabelPrefix = "__meta_"
|
||||
|
||||
// TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
|
||||
// Labels with this prefix are used for intermediate label processing and
|
||||
// will not be attached to time series. This is reserved for use in
|
||||
// Prometheus configuration files by users.
|
||||
TmpLabelPrefix = "__tmp_"
|
||||
|
||||
// ParamLabelPrefix is a prefix for labels that provide URL parameters
|
||||
// used to scrape a target.
|
||||
ParamLabelPrefix = "__param_"
|
||||
|
||||
// JobLabel is the label name indicating the job from which a timeseries
|
||||
// was scraped.
|
||||
JobLabel = "job"
|
||||
|
||||
// InstanceLabel is the label name used for the instance label.
|
||||
InstanceLabel = "instance"
|
||||
|
||||
// BucketLabel is used for the label that defines the upper bound of a
|
||||
// bucket of a histogram ("le" -> "less or equal").
|
||||
BucketLabel = "le"
|
||||
|
||||
// QuantileLabel is used for the label that defines the quantile in a
|
||||
// summary.
|
||||
QuantileLabel = "quantile"
|
||||
)
|
||||
|
||||
// LabelNameRE is a regular expression matching valid label names.
|
||||
var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
|
||||
|
||||
// A LabelName is a key for a LabelSet or Metric. It has a value associated
|
||||
// therewith.
|
||||
type LabelName string
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var s string
|
||||
if err := unmarshal(&s); err != nil {
|
||||
return err
|
||||
}
|
||||
if !LabelNameRE.MatchString(s) {
|
||||
return fmt.Errorf("%q is not a valid label name", s)
|
||||
}
|
||||
*ln = LabelName(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (ln *LabelName) UnmarshalJSON(b []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(b, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
if !LabelNameRE.MatchString(s) {
|
||||
return fmt.Errorf("%q is not a valid label name", s)
|
||||
}
|
||||
*ln = LabelName(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// LabelNames is a sortable LabelName slice. In implements sort.Interface.
|
||||
type LabelNames []LabelName
|
||||
|
||||
func (l LabelNames) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l LabelNames) Less(i, j int) bool {
|
||||
return l[i] < l[j]
|
||||
}
|
||||
|
||||
func (l LabelNames) Swap(i, j int) {
|
||||
l[i], l[j] = l[j], l[i]
|
||||
}
|
||||
|
||||
func (l LabelNames) String() string {
|
||||
labelStrings := make([]string, 0, len(l))
|
||||
for _, label := range l {
|
||||
labelStrings = append(labelStrings, string(label))
|
||||
}
|
||||
return strings.Join(labelStrings, ", ")
|
||||
}
|
||||
|
||||
// A LabelValue is an associated value for a LabelName.
|
||||
type LabelValue string
|
||||
|
||||
// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
|
||||
type LabelValues []LabelValue
|
||||
|
||||
func (l LabelValues) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l LabelValues) Less(i, j int) bool {
|
||||
return sort.StringsAreSorted([]string{string(l[i]), string(l[j])})
|
||||
}
|
||||
|
||||
func (l LabelValues) Swap(i, j int) {
|
||||
l[i], l[j] = l[j], l[i]
|
||||
}
|
||||
|
||||
// LabelPair pairs a name with a value.
|
||||
type LabelPair struct {
|
||||
Name LabelName
|
||||
Value LabelValue
|
||||
}
|
||||
|
||||
// LabelPairs is a sortable slice of LabelPair pointers. It implements
|
||||
// sort.Interface.
|
||||
type LabelPairs []*LabelPair
|
||||
|
||||
func (l LabelPairs) Len() int {
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (l LabelPairs) Less(i, j int) bool {
|
||||
switch {
|
||||
case l[i].Name > l[j].Name:
|
||||
return false
|
||||
case l[i].Name < l[j].Name:
|
||||
return true
|
||||
case l[i].Value > l[j].Value:
|
||||
return false
|
||||
case l[i].Value < l[j].Value:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (l LabelPairs) Swap(i, j int) {
|
||||
l[i], l[j] = l[j], l[i]
|
||||
}
|
||||
91
vendor/github.com/prometheus/common/model/labels_test.go
generated
vendored
Normal file
91
vendor/github.com/prometheus/common/model/labels_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func testLabelNames(t testing.TB) {
|
||||
var scenarios = []struct {
|
||||
in LabelNames
|
||||
out LabelNames
|
||||
}{
|
||||
{
|
||||
in: LabelNames{"ZZZ", "zzz"},
|
||||
out: LabelNames{"ZZZ", "zzz"},
|
||||
},
|
||||
{
|
||||
in: LabelNames{"aaa", "AAA"},
|
||||
out: LabelNames{"AAA", "aaa"},
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
sort.Sort(scenario.in)
|
||||
|
||||
for j, expected := range scenario.out {
|
||||
if expected != scenario.in[j] {
|
||||
t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLabelNames(t *testing.T) {
|
||||
testLabelNames(t)
|
||||
}
|
||||
|
||||
func BenchmarkLabelNames(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLabelNames(b)
|
||||
}
|
||||
}
|
||||
|
||||
func testLabelValues(t testing.TB) {
|
||||
var scenarios = []struct {
|
||||
in LabelValues
|
||||
out LabelValues
|
||||
}{
|
||||
{
|
||||
in: LabelValues{"ZZZ", "zzz"},
|
||||
out: LabelValues{"ZZZ", "zzz"},
|
||||
},
|
||||
{
|
||||
in: LabelValues{"aaa", "AAA"},
|
||||
out: LabelValues{"AAA", "aaa"},
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
sort.Sort(scenario.in)
|
||||
|
||||
for j, expected := range scenario.out {
|
||||
if expected != scenario.in[j] {
|
||||
t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLabelValues(t *testing.T) {
|
||||
testLabelValues(t)
|
||||
}
|
||||
|
||||
func BenchmarkLabelValues(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testLabelValues(b)
|
||||
}
|
||||
}
|
||||
153
vendor/github.com/prometheus/common/model/labelset.go
generated
vendored
Normal file
153
vendor/github.com/prometheus/common/model/labelset.go
generated
vendored
Normal file
|
|
@ -0,0 +1,153 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
|
||||
// may be fully-qualified down to the point where it may resolve to a single
|
||||
// Metric in the data store or not. All operations that occur within the realm
|
||||
// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
|
||||
// match.
|
||||
type LabelSet map[LabelName]LabelValue
|
||||
|
||||
func (ls LabelSet) Equal(o LabelSet) bool {
|
||||
if len(ls) != len(o) {
|
||||
return false
|
||||
}
|
||||
for ln, lv := range ls {
|
||||
olv, ok := o[ln]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if olv != lv {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Before compares the metrics, using the following criteria:
|
||||
//
|
||||
// If m has fewer labels than o, it is before o. If it has more, it is not.
|
||||
//
|
||||
// If the number of labels is the same, the superset of all label names is
|
||||
// sorted alphanumerically. The first differing label pair found in that order
|
||||
// determines the outcome: If the label does not exist at all in m, then m is
|
||||
// before o, and vice versa. Otherwise the label value is compared
|
||||
// alphanumerically.
|
||||
//
|
||||
// If m and o are equal, the method returns false.
|
||||
func (ls LabelSet) Before(o LabelSet) bool {
|
||||
if len(ls) < len(o) {
|
||||
return true
|
||||
}
|
||||
if len(ls) > len(o) {
|
||||
return false
|
||||
}
|
||||
|
||||
lns := make(LabelNames, 0, len(ls)+len(o))
|
||||
for ln := range ls {
|
||||
lns = append(lns, ln)
|
||||
}
|
||||
for ln := range o {
|
||||
lns = append(lns, ln)
|
||||
}
|
||||
// It's probably not worth it to de-dup lns.
|
||||
sort.Sort(lns)
|
||||
for _, ln := range lns {
|
||||
mlv, ok := ls[ln]
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
olv, ok := o[ln]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if mlv < olv {
|
||||
return true
|
||||
}
|
||||
if mlv > olv {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ls LabelSet) Clone() LabelSet {
|
||||
lsn := make(LabelSet, len(ls))
|
||||
for ln, lv := range ls {
|
||||
lsn[ln] = lv
|
||||
}
|
||||
return lsn
|
||||
}
|
||||
|
||||
// Merge is a helper function to non-destructively merge two label sets.
|
||||
func (l LabelSet) Merge(other LabelSet) LabelSet {
|
||||
result := make(LabelSet, len(l))
|
||||
|
||||
for k, v := range l {
|
||||
result[k] = v
|
||||
}
|
||||
|
||||
for k, v := range other {
|
||||
result[k] = v
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (l LabelSet) String() string {
|
||||
lstrs := make([]string, 0, len(l))
|
||||
for l, v := range l {
|
||||
lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
|
||||
}
|
||||
|
||||
sort.Strings(lstrs)
|
||||
return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
|
||||
}
|
||||
|
||||
// Fingerprint returns the LabelSet's fingerprint.
|
||||
func (ls LabelSet) Fingerprint() Fingerprint {
|
||||
return labelSetToFingerprint(ls)
|
||||
}
|
||||
|
||||
// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
|
||||
// algorithm, which is, however, more susceptible to hash collisions.
|
||||
func (ls LabelSet) FastFingerprint() Fingerprint {
|
||||
return labelSetToFastFingerprint(ls)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (l *LabelSet) UnmarshalJSON(b []byte) error {
|
||||
var m map[LabelName]LabelValue
|
||||
if err := json.Unmarshal(b, &m); err != nil {
|
||||
return err
|
||||
}
|
||||
// encoding/json only unmarshals maps of the form map[string]T. It treats
|
||||
// LabelName as a string and does not call its UnmarshalJSON method.
|
||||
// Thus, we have to replicate the behavior here.
|
||||
for ln := range m {
|
||||
if !LabelNameRE.MatchString(string(ln)) {
|
||||
return fmt.Errorf("%q is not a valid label name", ln)
|
||||
}
|
||||
}
|
||||
*l = LabelSet(m)
|
||||
return nil
|
||||
}
|
||||
81
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
Normal file
81
vendor/github.com/prometheus/common/model/metric.go
generated
vendored
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var separator = []byte{0}
|
||||
|
||||
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
|
||||
// a singleton and refers to one and only one stream of samples.
|
||||
type Metric LabelSet
|
||||
|
||||
// Equal compares the metrics.
|
||||
func (m Metric) Equal(o Metric) bool {
|
||||
return LabelSet(m).Equal(LabelSet(o))
|
||||
}
|
||||
|
||||
// Before compares the metrics' underlying label sets.
|
||||
func (m Metric) Before(o Metric) bool {
|
||||
return LabelSet(m).Before(LabelSet(o))
|
||||
}
|
||||
|
||||
// Clone returns a copy of the Metric.
|
||||
func (m Metric) Clone() Metric {
|
||||
clone := Metric{}
|
||||
for k, v := range m {
|
||||
clone[k] = v
|
||||
}
|
||||
return clone
|
||||
}
|
||||
|
||||
func (m Metric) String() string {
|
||||
metricName, hasName := m[MetricNameLabel]
|
||||
numLabels := len(m) - 1
|
||||
if !hasName {
|
||||
numLabels = len(m)
|
||||
}
|
||||
labelStrings := make([]string, 0, numLabels)
|
||||
for label, value := range m {
|
||||
if label != MetricNameLabel {
|
||||
labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
|
||||
}
|
||||
}
|
||||
|
||||
switch numLabels {
|
||||
case 0:
|
||||
if hasName {
|
||||
return string(metricName)
|
||||
}
|
||||
return "{}"
|
||||
default:
|
||||
sort.Strings(labelStrings)
|
||||
return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
// Fingerprint returns a Metric's Fingerprint.
|
||||
func (m Metric) Fingerprint() Fingerprint {
|
||||
return LabelSet(m).Fingerprint()
|
||||
}
|
||||
|
||||
// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
|
||||
// algorithm, which is, however, more susceptible to hash collisions.
|
||||
func (m Metric) FastFingerprint() Fingerprint {
|
||||
return LabelSet(m).FastFingerprint()
|
||||
}
|
||||
83
vendor/github.com/prometheus/common/model/metric_test.go
generated
vendored
Normal file
83
vendor/github.com/prometheus/common/model/metric_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,83 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import "testing"
|
||||
|
||||
func testMetric(t testing.TB) {
|
||||
var scenarios = []struct {
|
||||
input LabelSet
|
||||
fingerprint Fingerprint
|
||||
fastFingerprint Fingerprint
|
||||
}{
|
||||
{
|
||||
input: LabelSet{},
|
||||
fingerprint: 14695981039346656037,
|
||||
fastFingerprint: 14695981039346656037,
|
||||
},
|
||||
{
|
||||
input: LabelSet{
|
||||
"first_name": "electro",
|
||||
"occupation": "robot",
|
||||
"manufacturer": "westinghouse",
|
||||
},
|
||||
fingerprint: 5911716720268894962,
|
||||
fastFingerprint: 11310079640881077873,
|
||||
},
|
||||
{
|
||||
input: LabelSet{
|
||||
"x": "y",
|
||||
},
|
||||
fingerprint: 8241431561484471700,
|
||||
fastFingerprint: 13948396922932177635,
|
||||
},
|
||||
{
|
||||
input: LabelSet{
|
||||
"a": "bb",
|
||||
"b": "c",
|
||||
},
|
||||
fingerprint: 3016285359649981711,
|
||||
fastFingerprint: 3198632812309449502,
|
||||
},
|
||||
{
|
||||
input: LabelSet{
|
||||
"a": "b",
|
||||
"bb": "c",
|
||||
},
|
||||
fingerprint: 7122421792099404749,
|
||||
fastFingerprint: 5774953389407657638,
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
input := Metric(scenario.input)
|
||||
|
||||
if scenario.fingerprint != input.Fingerprint() {
|
||||
t.Errorf("%d. expected %d, got %d", i, scenario.fingerprint, input.Fingerprint())
|
||||
}
|
||||
if scenario.fastFingerprint != input.FastFingerprint() {
|
||||
t.Errorf("%d. expected %d, got %d", i, scenario.fastFingerprint, input.FastFingerprint())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetric(t *testing.T) {
|
||||
testMetric(t)
|
||||
}
|
||||
|
||||
func BenchmarkMetric(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
testMetric(b)
|
||||
}
|
||||
}
|
||||
16
vendor/github.com/prometheus/common/model/model.go
generated
vendored
Normal file
16
vendor/github.com/prometheus/common/model/model.go
generated
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package model contains common data structures that are shared across
|
||||
// Prometheus componenets and libraries.
|
||||
package model
|
||||
190
vendor/github.com/prometheus/common/model/signature.go
generated
vendored
Normal file
190
vendor/github.com/prometheus/common/model/signature.go
generated
vendored
Normal file
|
|
@ -0,0 +1,190 @@
|
|||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
|
||||
// used to separate label names, label values, and other strings from each other
|
||||
// when calculating their combined hash value (aka signature aka fingerprint).
|
||||
const SeparatorByte byte = 255
|
||||
|
||||
var (
|
||||
// cache the signature of an empty label set.
|
||||
emptyLabelSignature = fnv.New64a().Sum64()
|
||||
|
||||
hashAndBufPool sync.Pool
|
||||
)
|
||||
|
||||
type hashAndBuf struct {
|
||||
h hash.Hash64
|
||||
b bytes.Buffer
|
||||
}
|
||||
|
||||
func getHashAndBuf() *hashAndBuf {
|
||||
hb := hashAndBufPool.Get()
|
||||
if hb == nil {
|
||||
return &hashAndBuf{h: fnv.New64a()}
|
||||
}
|
||||
return hb.(*hashAndBuf)
|
||||
}
|
||||
|
||||
func putHashAndBuf(hb *hashAndBuf) {
|
||||
hb.h.Reset()
|
||||
hb.b.Reset()
|
||||
hashAndBufPool.Put(hb)
|
||||
}
|
||||
|
||||
// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
|
||||
// given label set. (Collisions are possible but unlikely if the number of label
|
||||
// sets the function is applied to is small.)
|
||||
func LabelsToSignature(labels map[string]string) uint64 {
|
||||
if len(labels) == 0 {
|
||||
return emptyLabelSignature
|
||||
}
|
||||
|
||||
labelNames := make([]string, 0, len(labels))
|
||||
for labelName := range labels {
|
||||
labelNames = append(labelNames, labelName)
|
||||
}
|
||||
sort.Strings(labelNames)
|
||||
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
for _, labelName := range labelNames {
|
||||
hb.b.WriteString(labelName)
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(labels[labelName])
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
hb.b.Reset()
|
||||
}
|
||||
return hb.h.Sum64()
|
||||
}
|
||||
|
||||
// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
|
||||
// parameter (rather than a label map) and returns a Fingerprint.
|
||||
func labelSetToFingerprint(ls LabelSet) Fingerprint {
|
||||
if len(ls) == 0 {
|
||||
return Fingerprint(emptyLabelSignature)
|
||||
}
|
||||
|
||||
labelNames := make(LabelNames, 0, len(ls))
|
||||
for labelName := range ls {
|
||||
labelNames = append(labelNames, labelName)
|
||||
}
|
||||
sort.Sort(labelNames)
|
||||
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
for _, labelName := range labelNames {
|
||||
hb.b.WriteString(string(labelName))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(string(ls[labelName]))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
hb.b.Reset()
|
||||
}
|
||||
return Fingerprint(hb.h.Sum64())
|
||||
}
|
||||
|
||||
// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
|
||||
// faster and less allocation-heavy hash function, which is more susceptible to
|
||||
// create hash collisions. Therefore, collision detection should be applied.
|
||||
func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
|
||||
if len(ls) == 0 {
|
||||
return Fingerprint(emptyLabelSignature)
|
||||
}
|
||||
|
||||
var result uint64
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
for labelName, labelValue := range ls {
|
||||
hb.b.WriteString(string(labelName))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(string(labelValue))
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
result ^= hb.h.Sum64()
|
||||
hb.h.Reset()
|
||||
hb.b.Reset()
|
||||
}
|
||||
return Fingerprint(result)
|
||||
}
|
||||
|
||||
// SignatureForLabels works like LabelsToSignature but takes a Metric as
|
||||
// parameter (rather than a label map) and only includes the labels with the
|
||||
// specified LabelNames into the signature calculation. The labels passed in
|
||||
// will be sorted by this function.
|
||||
func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
|
||||
if len(m) == 0 || len(labels) == 0 {
|
||||
return emptyLabelSignature
|
||||
}
|
||||
|
||||
sort.Sort(LabelNames(labels))
|
||||
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
for _, label := range labels {
|
||||
hb.b.WriteString(string(label))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(string(m[label]))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
hb.b.Reset()
|
||||
}
|
||||
return hb.h.Sum64()
|
||||
}
|
||||
|
||||
// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
|
||||
// parameter (rather than a label map) and excludes the labels with any of the
|
||||
// specified LabelNames from the signature calculation.
|
||||
func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
|
||||
if len(m) == 0 {
|
||||
return emptyLabelSignature
|
||||
}
|
||||
|
||||
labelNames := make(LabelNames, 0, len(m))
|
||||
for labelName := range m {
|
||||
if _, exclude := labels[labelName]; !exclude {
|
||||
labelNames = append(labelNames, labelName)
|
||||
}
|
||||
}
|
||||
if len(labelNames) == 0 {
|
||||
return emptyLabelSignature
|
||||
}
|
||||
sort.Sort(labelNames)
|
||||
|
||||
hb := getHashAndBuf()
|
||||
defer putHashAndBuf(hb)
|
||||
|
||||
for _, labelName := range labelNames {
|
||||
hb.b.WriteString(string(labelName))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.b.WriteString(string(m[labelName]))
|
||||
hb.b.WriteByte(SeparatorByte)
|
||||
hb.h.Write(hb.b.Bytes())
|
||||
hb.b.Reset()
|
||||
}
|
||||
return hb.h.Sum64()
|
||||
}
|
||||
304
vendor/github.com/prometheus/common/model/signature_test.go
generated
vendored
Normal file
304
vendor/github.com/prometheus/common/model/signature_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,304 @@
|
|||
// Copyright 2014 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLabelsToSignature(t *testing.T) {
|
||||
var scenarios = []struct {
|
||||
in map[string]string
|
||||
out uint64
|
||||
}{
|
||||
{
|
||||
in: map[string]string{},
|
||||
out: 14695981039346656037,
|
||||
},
|
||||
{
|
||||
in: map[string]string{"name": "garland, briggs", "fear": "love is not enough"},
|
||||
out: 5799056148416392346,
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
actual := LabelsToSignature(scenario.in)
|
||||
|
||||
if actual != scenario.out {
|
||||
t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricToFingerprint(t *testing.T) {
|
||||
var scenarios = []struct {
|
||||
in LabelSet
|
||||
out Fingerprint
|
||||
}{
|
||||
{
|
||||
in: LabelSet{},
|
||||
out: 14695981039346656037,
|
||||
},
|
||||
{
|
||||
in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"},
|
||||
out: 5799056148416392346,
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
actual := labelSetToFingerprint(scenario.in)
|
||||
|
||||
if actual != scenario.out {
|
||||
t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricToFastFingerprint(t *testing.T) {
|
||||
var scenarios = []struct {
|
||||
in LabelSet
|
||||
out Fingerprint
|
||||
}{
|
||||
{
|
||||
in: LabelSet{},
|
||||
out: 14695981039346656037,
|
||||
},
|
||||
{
|
||||
in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"},
|
||||
out: 12952432476264840823,
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
actual := labelSetToFastFingerprint(scenario.in)
|
||||
|
||||
if actual != scenario.out {
|
||||
t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignatureForLabels(t *testing.T) {
|
||||
var scenarios = []struct {
|
||||
in Metric
|
||||
labels LabelNames
|
||||
out uint64
|
||||
}{
|
||||
{
|
||||
in: Metric{},
|
||||
labels: nil,
|
||||
out: 14695981039346656037,
|
||||
},
|
||||
{
|
||||
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
|
||||
labels: LabelNames{"fear", "name"},
|
||||
out: 5799056148416392346,
|
||||
},
|
||||
{
|
||||
in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"},
|
||||
labels: LabelNames{"fear", "name"},
|
||||
out: 5799056148416392346,
|
||||
},
|
||||
{
|
||||
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
|
||||
labels: LabelNames{},
|
||||
out: 14695981039346656037,
|
||||
},
|
||||
{
|
||||
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
|
||||
labels: nil,
|
||||
out: 14695981039346656037,
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
actual := SignatureForLabels(scenario.in, scenario.labels...)
|
||||
|
||||
if actual != scenario.out {
|
||||
t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignatureWithoutLabels(t *testing.T) {
|
||||
var scenarios = []struct {
|
||||
in Metric
|
||||
labels map[LabelName]struct{}
|
||||
out uint64
|
||||
}{
|
||||
{
|
||||
in: Metric{},
|
||||
labels: nil,
|
||||
out: 14695981039346656037,
|
||||
},
|
||||
{
|
||||
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
|
||||
labels: map[LabelName]struct{}{"fear": struct{}{}, "name": struct{}{}},
|
||||
out: 14695981039346656037,
|
||||
},
|
||||
{
|
||||
in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"},
|
||||
labels: map[LabelName]struct{}{"foo": struct{}{}},
|
||||
out: 5799056148416392346,
|
||||
},
|
||||
{
|
||||
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
|
||||
labels: map[LabelName]struct{}{},
|
||||
out: 5799056148416392346,
|
||||
},
|
||||
{
|
||||
in: Metric{"name": "garland, briggs", "fear": "love is not enough"},
|
||||
labels: nil,
|
||||
out: 5799056148416392346,
|
||||
},
|
||||
}
|
||||
|
||||
for i, scenario := range scenarios {
|
||||
actual := SignatureWithoutLabels(scenario.in, scenario.labels)
|
||||
|
||||
if actual != scenario.out {
|
||||
t.Errorf("%d. expected %d, got %d", i, scenario.out, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkLabelToSignature(b *testing.B, l map[string]string, e uint64) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
if a := LabelsToSignature(l); a != e {
|
||||
b.Fatalf("expected signature of %d for %s, got %d", e, l, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLabelToSignatureScalar(b *testing.B) {
|
||||
benchmarkLabelToSignature(b, nil, 14695981039346656037)
|
||||
}
|
||||
|
||||
func BenchmarkLabelToSignatureSingle(b *testing.B) {
|
||||
benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value"}, 5146282821936882169)
|
||||
}
|
||||
|
||||
func BenchmarkLabelToSignatureDouble(b *testing.B) {
|
||||
benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717)
|
||||
}
|
||||
|
||||
func BenchmarkLabelToSignatureTriple(b *testing.B) {
|
||||
benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121)
|
||||
}
|
||||
|
||||
func benchmarkMetricToFingerprint(b *testing.B, ls LabelSet, e Fingerprint) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
if a := labelSetToFingerprint(ls); a != e {
|
||||
b.Fatalf("expected signature of %d for %s, got %d", e, ls, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMetricToFingerprintScalar(b *testing.B) {
|
||||
benchmarkMetricToFingerprint(b, nil, 14695981039346656037)
|
||||
}
|
||||
|
||||
func BenchmarkMetricToFingerprintSingle(b *testing.B) {
|
||||
benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5146282821936882169)
|
||||
}
|
||||
|
||||
func BenchmarkMetricToFingerprintDouble(b *testing.B) {
|
||||
benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717)
|
||||
}
|
||||
|
||||
func BenchmarkMetricToFingerprintTriple(b *testing.B) {
|
||||
benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121)
|
||||
}
|
||||
|
||||
func benchmarkMetricToFastFingerprint(b *testing.B, ls LabelSet, e Fingerprint) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
if a := labelSetToFastFingerprint(ls); a != e {
|
||||
b.Fatalf("expected signature of %d for %s, got %d", e, ls, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMetricToFastFingerprintScalar(b *testing.B) {
|
||||
benchmarkMetricToFastFingerprint(b, nil, 14695981039346656037)
|
||||
}
|
||||
|
||||
func BenchmarkMetricToFastFingerprintSingle(b *testing.B) {
|
||||
benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5147259542624943964)
|
||||
}
|
||||
|
||||
func BenchmarkMetricToFastFingerprintDouble(b *testing.B) {
|
||||
benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528)
|
||||
}
|
||||
|
||||
func BenchmarkMetricToFastFingerprintTriple(b *testing.B) {
|
||||
benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676)
|
||||
}
|
||||
|
||||
func BenchmarkEmptyLabelSignature(b *testing.B) {
|
||||
input := []map[string]string{nil, {}}
|
||||
|
||||
var ms runtime.MemStats
|
||||
runtime.ReadMemStats(&ms)
|
||||
|
||||
alloc := ms.Alloc
|
||||
|
||||
for _, labels := range input {
|
||||
LabelsToSignature(labels)
|
||||
}
|
||||
|
||||
runtime.ReadMemStats(&ms)
|
||||
|
||||
if got := ms.Alloc; alloc != got {
|
||||
b.Fatal("expected LabelsToSignature with empty labels not to perform allocations")
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkMetricToFastFingerprintConc(b *testing.B, ls LabelSet, e Fingerprint, concLevel int) {
|
||||
var start, end sync.WaitGroup
|
||||
start.Add(1)
|
||||
end.Add(concLevel)
|
||||
|
||||
for i := 0; i < concLevel; i++ {
|
||||
go func() {
|
||||
start.Wait()
|
||||
for j := b.N / concLevel; j >= 0; j-- {
|
||||
if a := labelSetToFastFingerprint(ls); a != e {
|
||||
b.Fatalf("expected signature of %d for %s, got %d", e, ls, a)
|
||||
}
|
||||
}
|
||||
end.Done()
|
||||
}()
|
||||
}
|
||||
b.ResetTimer()
|
||||
start.Done()
|
||||
end.Wait()
|
||||
}
|
||||
|
||||
func BenchmarkMetricToFastFingerprintTripleConc1(b *testing.B) {
|
||||
benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 1)
|
||||
}
|
||||
|
||||
func BenchmarkMetricToFastFingerprintTripleConc2(b *testing.B) {
|
||||
benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 2)
|
||||
}
|
||||
|
||||
func BenchmarkMetricToFastFingerprintTripleConc4(b *testing.B) {
|
||||
benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 4)
|
||||
}
|
||||
|
||||
func BenchmarkMetricToFastFingerprintTripleConc8(b *testing.B) {
|
||||
benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 8)
|
||||
}
|
||||
60
vendor/github.com/prometheus/common/model/silence.go
generated
vendored
Normal file
60
vendor/github.com/prometheus/common/model/silence.go
generated
vendored
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Matcher describes a matches the value of a given label.
|
||||
type Matcher struct {
|
||||
Name LabelName `json:"name"`
|
||||
Value string `json:"value"`
|
||||
IsRegex bool `json:"isRegex"`
|
||||
}
|
||||
|
||||
func (m *Matcher) UnmarshalJSON(b []byte) error {
|
||||
type plain Matcher
|
||||
if err := json.Unmarshal(b, (*plain)(m)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(m.Name) == 0 {
|
||||
return fmt.Errorf("label name in matcher must not be empty")
|
||||
}
|
||||
if m.IsRegex {
|
||||
if _, err := regexp.Compile(m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Silence defines the representation of a silence definiton
|
||||
// in the Prometheus eco-system.
|
||||
type Silence struct {
|
||||
ID uint64 `json:"id,omitempty"`
|
||||
|
||||
Matchers []*Matcher `json:"matchers"`
|
||||
|
||||
StartsAt time.Time `json:"startsAt"`
|
||||
EndsAt time.Time `json:"endsAt"`
|
||||
|
||||
CreatedAt time.Time `json:"createdAt,omitempty"`
|
||||
CreatedBy string `json:"createdBy"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
}
|
||||
230
vendor/github.com/prometheus/common/model/time.go
generated
vendored
Normal file
230
vendor/github.com/prometheus/common/model/time.go
generated
vendored
Normal file
|
|
@ -0,0 +1,230 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// MinimumTick is the minimum supported time resolution. This has to be
|
||||
// at least time.Second in order for the code below to work.
|
||||
minimumTick = time.Millisecond
|
||||
// second is the Time duration equivalent to one second.
|
||||
second = int64(time.Second / minimumTick)
|
||||
// The number of nanoseconds per minimum tick.
|
||||
nanosPerTick = int64(minimumTick / time.Nanosecond)
|
||||
|
||||
// Earliest is the earliest Time representable. Handy for
|
||||
// initializing a high watermark.
|
||||
Earliest = Time(math.MinInt64)
|
||||
// Latest is the latest Time representable. Handy for initializing
|
||||
// a low watermark.
|
||||
Latest = Time(math.MaxInt64)
|
||||
)
|
||||
|
||||
// Time is the number of milliseconds since the epoch
|
||||
// (1970-01-01 00:00 UTC) excluding leap seconds.
|
||||
type Time int64
|
||||
|
||||
// Interval describes and interval between two timestamps.
|
||||
type Interval struct {
|
||||
Start, End Time
|
||||
}
|
||||
|
||||
// Now returns the current time as a Time.
|
||||
func Now() Time {
|
||||
return TimeFromUnixNano(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// TimeFromUnix returns the Time equivalent to the Unix Time t
|
||||
// provided in seconds.
|
||||
func TimeFromUnix(t int64) Time {
|
||||
return Time(t * second)
|
||||
}
|
||||
|
||||
// TimeFromUnixNano returns the Time equivalent to the Unix Time
|
||||
// t provided in nanoseconds.
|
||||
func TimeFromUnixNano(t int64) Time {
|
||||
return Time(t / nanosPerTick)
|
||||
}
|
||||
|
||||
// Equal reports whether two Times represent the same instant.
|
||||
func (t Time) Equal(o Time) bool {
|
||||
return t == o
|
||||
}
|
||||
|
||||
// Before reports whether the Time t is before o.
|
||||
func (t Time) Before(o Time) bool {
|
||||
return t < o
|
||||
}
|
||||
|
||||
// After reports whether the Time t is after o.
|
||||
func (t Time) After(o Time) bool {
|
||||
return t > o
|
||||
}
|
||||
|
||||
// Add returns the Time t + d.
|
||||
func (t Time) Add(d time.Duration) Time {
|
||||
return t + Time(d/minimumTick)
|
||||
}
|
||||
|
||||
// Sub returns the Duration t - o.
|
||||
func (t Time) Sub(o Time) time.Duration {
|
||||
return time.Duration(t-o) * minimumTick
|
||||
}
|
||||
|
||||
// Time returns the time.Time representation of t.
|
||||
func (t Time) Time() time.Time {
|
||||
return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
|
||||
}
|
||||
|
||||
// Unix returns t as a Unix time, the number of seconds elapsed
|
||||
// since January 1, 1970 UTC.
|
||||
func (t Time) Unix() int64 {
|
||||
return int64(t) / second
|
||||
}
|
||||
|
||||
// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
|
||||
// since January 1, 1970 UTC.
|
||||
func (t Time) UnixNano() int64 {
|
||||
return int64(t) * nanosPerTick
|
||||
}
|
||||
|
||||
// The number of digits after the dot.
|
||||
var dotPrecision = int(math.Log10(float64(second)))
|
||||
|
||||
// String returns a string representation of the Time.
|
||||
func (t Time) String() string {
|
||||
return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (t Time) MarshalJSON() ([]byte, error) {
|
||||
return []byte(t.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface.
|
||||
func (t *Time) UnmarshalJSON(b []byte) error {
|
||||
p := strings.Split(string(b), ".")
|
||||
switch len(p) {
|
||||
case 1:
|
||||
v, err := strconv.ParseInt(string(p[0]), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(v * second)
|
||||
|
||||
case 2:
|
||||
v, err := strconv.ParseInt(string(p[0]), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v *= second
|
||||
|
||||
prec := dotPrecision - len(p[1])
|
||||
if prec < 0 {
|
||||
p[1] = p[1][:dotPrecision]
|
||||
} else if prec > 0 {
|
||||
p[1] = p[1] + strings.Repeat("0", prec)
|
||||
}
|
||||
|
||||
va, err := strconv.ParseInt(p[1], 10, 32)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*t = Time(v + va)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("invalid time %q", string(b))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Duration wraps time.Duration. It is used to parse the custom duration format
|
||||
// from YAML.
|
||||
// This type should not propagate beyond the scope of input/output processing.
|
||||
type Duration time.Duration
|
||||
|
||||
// StringToDuration parses a string into a time.Duration, assuming that a year
|
||||
// a day always has 24h.
|
||||
func ParseDuration(durationStr string) (Duration, error) {
|
||||
matches := durationRE.FindStringSubmatch(durationStr)
|
||||
if len(matches) != 3 {
|
||||
return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
|
||||
}
|
||||
durSeconds, _ := strconv.Atoi(matches[1])
|
||||
dur := time.Duration(durSeconds) * time.Second
|
||||
unit := matches[2]
|
||||
switch unit {
|
||||
case "d":
|
||||
dur *= 60 * 60 * 24
|
||||
case "h":
|
||||
dur *= 60 * 60
|
||||
case "m":
|
||||
dur *= 60
|
||||
case "s":
|
||||
dur *= 1
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid time unit in duration string: %q", unit)
|
||||
}
|
||||
return Duration(dur), nil
|
||||
}
|
||||
|
||||
var durationRE = regexp.MustCompile("^([0-9]+)([ywdhms]+)$")
|
||||
|
||||
func (d Duration) String() string {
|
||||
seconds := int64(time.Duration(d) / time.Second)
|
||||
factors := map[string]int64{
|
||||
"d": 60 * 60 * 24,
|
||||
"h": 60 * 60,
|
||||
"m": 60,
|
||||
"s": 1,
|
||||
}
|
||||
unit := "s"
|
||||
switch int64(0) {
|
||||
case seconds % factors["d"]:
|
||||
unit = "d"
|
||||
case seconds % factors["h"]:
|
||||
unit = "h"
|
||||
case seconds % factors["m"]:
|
||||
unit = "m"
|
||||
}
|
||||
return fmt.Sprintf("%v%v", seconds/factors[unit], unit)
|
||||
}
|
||||
|
||||
// MarshalYAML implements the yaml.Marshaler interface.
|
||||
func (d Duration) MarshalYAML() (interface{}, error) {
|
||||
return d.String(), nil
|
||||
}
|
||||
|
||||
// UnmarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var s string
|
||||
if err := unmarshal(&s); err != nil {
|
||||
return err
|
||||
}
|
||||
dur, err := ParseDuration(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*d = dur
|
||||
return nil
|
||||
}
|
||||
86
vendor/github.com/prometheus/common/model/time_test.go
generated
vendored
Normal file
86
vendor/github.com/prometheus/common/model/time_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestComparators(t *testing.T) {
|
||||
t1a := TimeFromUnix(0)
|
||||
t1b := TimeFromUnix(0)
|
||||
t2 := TimeFromUnix(2*second - 1)
|
||||
|
||||
if !t1a.Equal(t1b) {
|
||||
t.Fatalf("Expected %s to be equal to %s", t1a, t1b)
|
||||
}
|
||||
if t1a.Equal(t2) {
|
||||
t.Fatalf("Expected %s to not be equal to %s", t1a, t2)
|
||||
}
|
||||
|
||||
if !t1a.Before(t2) {
|
||||
t.Fatalf("Expected %s to be before %s", t1a, t2)
|
||||
}
|
||||
if t1a.Before(t1b) {
|
||||
t.Fatalf("Expected %s to not be before %s", t1a, t1b)
|
||||
}
|
||||
|
||||
if !t2.After(t1a) {
|
||||
t.Fatalf("Expected %s to be after %s", t2, t1a)
|
||||
}
|
||||
if t1b.After(t1a) {
|
||||
t.Fatalf("Expected %s to not be after %s", t1b, t1a)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeConversions(t *testing.T) {
|
||||
unixSecs := int64(1136239445)
|
||||
unixNsecs := int64(123456789)
|
||||
unixNano := unixSecs*1e9 + unixNsecs
|
||||
|
||||
t1 := time.Unix(unixSecs, unixNsecs-unixNsecs%nanosPerTick)
|
||||
t2 := time.Unix(unixSecs, unixNsecs)
|
||||
|
||||
ts := TimeFromUnixNano(unixNano)
|
||||
if !ts.Time().Equal(t1) {
|
||||
t.Fatalf("Expected %s, got %s", t1, ts.Time())
|
||||
}
|
||||
|
||||
// Test available precision.
|
||||
ts = TimeFromUnixNano(t2.UnixNano())
|
||||
if !ts.Time().Equal(t1) {
|
||||
t.Fatalf("Expected %s, got %s", t1, ts.Time())
|
||||
}
|
||||
|
||||
if ts.UnixNano() != unixNano-unixNano%nanosPerTick {
|
||||
t.Fatalf("Expected %d, got %d", unixNano, ts.UnixNano())
|
||||
}
|
||||
}
|
||||
|
||||
func TestDuration(t *testing.T) {
|
||||
duration := time.Second + time.Minute + time.Hour
|
||||
goTime := time.Unix(1136239445, 0)
|
||||
|
||||
ts := TimeFromUnix(goTime.Unix())
|
||||
if !goTime.Add(duration).Equal(ts.Add(duration).Time()) {
|
||||
t.Fatalf("Expected %s to be equal to %s", goTime.Add(duration), ts.Add(duration))
|
||||
}
|
||||
|
||||
earlier := ts.Add(-duration)
|
||||
delta := ts.Sub(earlier)
|
||||
if delta != duration {
|
||||
t.Fatalf("Expected %s to be equal to %s", delta, duration)
|
||||
}
|
||||
}
|
||||
395
vendor/github.com/prometheus/common/model/value.go
generated
vendored
Normal file
395
vendor/github.com/prometheus/common/model/value.go
generated
vendored
Normal file
|
|
@ -0,0 +1,395 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A SampleValue is a representation of a value for a given sample at a given
|
||||
// time.
|
||||
type SampleValue float64
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (v SampleValue) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(v.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (v *SampleValue) UnmarshalJSON(b []byte) error {
|
||||
if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
|
||||
return fmt.Errorf("sample value must be a quoted string")
|
||||
}
|
||||
f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*v = SampleValue(f)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v SampleValue) Equal(o SampleValue) bool {
|
||||
return v == o
|
||||
}
|
||||
|
||||
func (v SampleValue) String() string {
|
||||
return strconv.FormatFloat(float64(v), 'f', -1, 64)
|
||||
}
|
||||
|
||||
// SamplePair pairs a SampleValue with a Timestamp.
|
||||
type SamplePair struct {
|
||||
Timestamp Time
|
||||
Value SampleValue
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (s SamplePair) MarshalJSON() ([]byte, error) {
|
||||
t, err := json.Marshal(s.Timestamp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v, err := json.Marshal(s.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (s *SamplePair) UnmarshalJSON(b []byte) error {
|
||||
v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
|
||||
return json.Unmarshal(b, &v)
|
||||
}
|
||||
|
||||
// Equal returns true if this SamplePair and o have equal Values and equal
|
||||
// Timestamps.
|
||||
func (s *SamplePair) Equal(o *SamplePair) bool {
|
||||
return s == o || (s.Value == o.Value && s.Timestamp.Equal(o.Timestamp))
|
||||
}
|
||||
|
||||
func (s SamplePair) String() string {
|
||||
return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
|
||||
}
|
||||
|
||||
// Sample is a sample pair associated with a metric.
|
||||
type Sample struct {
|
||||
Metric Metric `json:"metric"`
|
||||
Value SampleValue `json:"value"`
|
||||
Timestamp Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// Equal compares first the metrics, then the timestamp, then the value.
|
||||
func (s *Sample) Equal(o *Sample) bool {
|
||||
if s == o {
|
||||
return true
|
||||
}
|
||||
|
||||
if !s.Metric.Equal(o.Metric) {
|
||||
return false
|
||||
}
|
||||
if !s.Timestamp.Equal(o.Timestamp) {
|
||||
return false
|
||||
}
|
||||
if s.Value != o.Value {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (s Sample) String() string {
|
||||
return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
|
||||
Timestamp: s.Timestamp,
|
||||
Value: s.Value,
|
||||
})
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (s Sample) MarshalJSON() ([]byte, error) {
|
||||
v := struct {
|
||||
Metric Metric `json:"metric"`
|
||||
Value SamplePair `json:"value"`
|
||||
}{
|
||||
Metric: s.Metric,
|
||||
Value: SamplePair{
|
||||
Timestamp: s.Timestamp,
|
||||
Value: s.Value,
|
||||
},
|
||||
}
|
||||
|
||||
return json.Marshal(&v)
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (s *Sample) UnmarshalJSON(b []byte) error {
|
||||
v := struct {
|
||||
Metric Metric `json:"metric"`
|
||||
Value SamplePair `json:"value"`
|
||||
}{
|
||||
Metric: s.Metric,
|
||||
Value: SamplePair{
|
||||
Timestamp: s.Timestamp,
|
||||
Value: s.Value,
|
||||
},
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.Metric = v.Metric
|
||||
s.Timestamp = v.Value.Timestamp
|
||||
s.Value = v.Value.Value
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Samples is a sortable Sample slice. It implements sort.Interface.
|
||||
type Samples []*Sample
|
||||
|
||||
func (s Samples) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// Less compares first the metrics, then the timestamp.
|
||||
func (s Samples) Less(i, j int) bool {
|
||||
switch {
|
||||
case s[i].Metric.Before(s[j].Metric):
|
||||
return true
|
||||
case s[j].Metric.Before(s[i].Metric):
|
||||
return false
|
||||
case s[i].Timestamp.Before(s[j].Timestamp):
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (s Samples) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Equal compares two sets of samples and returns true if they are equal.
|
||||
func (s Samples) Equal(o Samples) bool {
|
||||
if len(s) != len(o) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, sample := range s {
|
||||
if !sample.Equal(o[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// SampleStream is a stream of Values belonging to an attached COWMetric.
|
||||
type SampleStream struct {
|
||||
Metric Metric `json:"metric"`
|
||||
Values []SamplePair `json:"values"`
|
||||
}
|
||||
|
||||
func (ss SampleStream) String() string {
|
||||
vals := make([]string, len(ss.Values))
|
||||
for i, v := range ss.Values {
|
||||
vals[i] = v.String()
|
||||
}
|
||||
return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
|
||||
}
|
||||
|
||||
// Value is a generic interface for values resulting from a query evaluation.
|
||||
type Value interface {
|
||||
Type() ValueType
|
||||
String() string
|
||||
}
|
||||
|
||||
func (Matrix) Type() ValueType { return ValMatrix }
|
||||
func (Vector) Type() ValueType { return ValVector }
|
||||
func (*Scalar) Type() ValueType { return ValScalar }
|
||||
func (*String) Type() ValueType { return ValString }
|
||||
|
||||
type ValueType int
|
||||
|
||||
const (
|
||||
ValNone ValueType = iota
|
||||
ValScalar
|
||||
ValVector
|
||||
ValMatrix
|
||||
ValString
|
||||
)
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (et ValueType) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(et.String())
|
||||
}
|
||||
|
||||
func (et *ValueType) UnmarshalJSON(b []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(b, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
switch s {
|
||||
case "<ValNone>":
|
||||
*et = ValNone
|
||||
case "scalar":
|
||||
*et = ValScalar
|
||||
case "vector":
|
||||
*et = ValVector
|
||||
case "matrix":
|
||||
*et = ValMatrix
|
||||
case "string":
|
||||
*et = ValString
|
||||
default:
|
||||
return fmt.Errorf("unknown value type %q", s)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e ValueType) String() string {
|
||||
switch e {
|
||||
case ValNone:
|
||||
return "<ValNone>"
|
||||
case ValScalar:
|
||||
return "scalar"
|
||||
case ValVector:
|
||||
return "vector"
|
||||
case ValMatrix:
|
||||
return "matrix"
|
||||
case ValString:
|
||||
return "string"
|
||||
}
|
||||
panic("ValueType.String: unhandled value type")
|
||||
}
|
||||
|
||||
// Scalar is a scalar value evaluated at the set timestamp.
|
||||
type Scalar struct {
|
||||
Value SampleValue `json:"value"`
|
||||
Timestamp Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
func (s Scalar) String() string {
|
||||
return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (s Scalar) MarshalJSON() ([]byte, error) {
|
||||
v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
|
||||
return json.Marshal([...]interface{}{s.Timestamp, string(v)})
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (s *Scalar) UnmarshalJSON(b []byte) error {
|
||||
var f string
|
||||
v := [...]interface{}{&s.Timestamp, &f}
|
||||
|
||||
if err := json.Unmarshal(b, &v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
value, err := strconv.ParseFloat(f, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing sample value: %s", err)
|
||||
}
|
||||
s.Value = SampleValue(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// String is a string value evaluated at the set timestamp.
|
||||
type String struct {
|
||||
Value string `json:"value"`
|
||||
Timestamp Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
func (s *String) String() string {
|
||||
return s.Value
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler.
|
||||
func (s String) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal([]interface{}{s.Timestamp, s.Value})
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (s *String) UnmarshalJSON(b []byte) error {
|
||||
v := [...]interface{}{&s.Timestamp, &s.Value}
|
||||
return json.Unmarshal(b, &v)
|
||||
}
|
||||
|
||||
// Vector is basically only an alias for Samples, but the
|
||||
// contract is that in a Vector, all Samples have the same timestamp.
|
||||
type Vector []*Sample
|
||||
|
||||
func (vec Vector) String() string {
|
||||
entries := make([]string, len(vec))
|
||||
for i, s := range vec {
|
||||
entries[i] = s.String()
|
||||
}
|
||||
return strings.Join(entries, "\n")
|
||||
}
|
||||
|
||||
func (vec Vector) Len() int { return len(vec) }
|
||||
func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
|
||||
|
||||
// Less compares first the metrics, then the timestamp.
|
||||
func (vec Vector) Less(i, j int) bool {
|
||||
switch {
|
||||
case vec[i].Metric.Before(vec[j].Metric):
|
||||
return true
|
||||
case vec[j].Metric.Before(vec[i].Metric):
|
||||
return false
|
||||
case vec[i].Timestamp.Before(vec[j].Timestamp):
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Equal compares two sets of samples and returns true if they are equal.
|
||||
func (vec Vector) Equal(o Vector) bool {
|
||||
if len(vec) != len(o) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i, sample := range vec {
|
||||
if !sample.Equal(o[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Matrix is a list of time series.
|
||||
type Matrix []*SampleStream
|
||||
|
||||
func (m Matrix) Len() int { return len(m) }
|
||||
func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
|
||||
func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
|
||||
|
||||
func (mat Matrix) String() string {
|
||||
matCp := make(Matrix, len(mat))
|
||||
copy(matCp, mat)
|
||||
sort.Sort(matCp)
|
||||
|
||||
strs := make([]string, len(matCp))
|
||||
|
||||
for i, ss := range matCp {
|
||||
strs[i] = ss.String()
|
||||
}
|
||||
|
||||
return strings.Join(strs, "\n")
|
||||
}
|
||||
362
vendor/github.com/prometheus/common/model/value_test.go
generated
vendored
Normal file
362
vendor/github.com/prometheus/common/model/value_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,362 @@
|
|||
// Copyright 2013 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSamplePairJSON(t *testing.T) {
|
||||
input := []struct {
|
||||
plain string
|
||||
value SamplePair
|
||||
}{
|
||||
{
|
||||
plain: `[1234.567,"123.1"]`,
|
||||
value: SamplePair{
|
||||
Value: 123.1,
|
||||
Timestamp: 1234567,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range input {
|
||||
b, err := json.Marshal(test.value)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if string(b) != test.plain {
|
||||
t.Errorf("encoding error: expected %q, got %q", test.plain, b)
|
||||
continue
|
||||
}
|
||||
|
||||
var sp SamplePair
|
||||
err = json.Unmarshal(b, &sp)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if sp != test.value {
|
||||
t.Errorf("decoding error: expected %v, got %v", test.value, sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSampleJSON(t *testing.T) {
|
||||
input := []struct {
|
||||
plain string
|
||||
value Sample
|
||||
}{
|
||||
{
|
||||
plain: `{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}`,
|
||||
value: Sample{
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "test_metric",
|
||||
},
|
||||
Value: 123.1,
|
||||
Timestamp: 1234567,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range input {
|
||||
b, err := json.Marshal(test.value)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if string(b) != test.plain {
|
||||
t.Errorf("encoding error: expected %q, got %q", test.plain, b)
|
||||
continue
|
||||
}
|
||||
|
||||
var sv Sample
|
||||
err = json.Unmarshal(b, &sv)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(sv, test.value) {
|
||||
t.Errorf("decoding error: expected %v, got %v", test.value, sv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestVectorJSON(t *testing.T) {
|
||||
input := []struct {
|
||||
plain string
|
||||
value Vector
|
||||
}{
|
||||
{
|
||||
plain: `[]`,
|
||||
value: Vector{},
|
||||
},
|
||||
{
|
||||
plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}]`,
|
||||
value: Vector{&Sample{
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "test_metric",
|
||||
},
|
||||
Value: 123.1,
|
||||
Timestamp: 1234567,
|
||||
}},
|
||||
},
|
||||
{
|
||||
plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]},{"metric":{"foo":"bar"},"value":[1.234,"+Inf"]}]`,
|
||||
value: Vector{
|
||||
&Sample{
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "test_metric",
|
||||
},
|
||||
Value: 123.1,
|
||||
Timestamp: 1234567,
|
||||
},
|
||||
&Sample{
|
||||
Metric: Metric{
|
||||
"foo": "bar",
|
||||
},
|
||||
Value: SampleValue(math.Inf(1)),
|
||||
Timestamp: 1234,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range input {
|
||||
b, err := json.Marshal(test.value)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if string(b) != test.plain {
|
||||
t.Errorf("encoding error: expected %q, got %q", test.plain, b)
|
||||
continue
|
||||
}
|
||||
|
||||
var vec Vector
|
||||
err = json.Unmarshal(b, &vec)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(vec, test.value) {
|
||||
t.Errorf("decoding error: expected %v, got %v", test.value, vec)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestScalarJSON(t *testing.T) {
|
||||
input := []struct {
|
||||
plain string
|
||||
value Scalar
|
||||
}{
|
||||
{
|
||||
plain: `[123.456,"456"]`,
|
||||
value: Scalar{
|
||||
Timestamp: 123456,
|
||||
Value: 456,
|
||||
},
|
||||
},
|
||||
{
|
||||
plain: `[123123.456,"+Inf"]`,
|
||||
value: Scalar{
|
||||
Timestamp: 123123456,
|
||||
Value: SampleValue(math.Inf(1)),
|
||||
},
|
||||
},
|
||||
{
|
||||
plain: `[123123.456,"-Inf"]`,
|
||||
value: Scalar{
|
||||
Timestamp: 123123456,
|
||||
Value: SampleValue(math.Inf(-1)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range input {
|
||||
b, err := json.Marshal(test.value)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if string(b) != test.plain {
|
||||
t.Errorf("encoding error: expected %q, got %q", test.plain, b)
|
||||
continue
|
||||
}
|
||||
|
||||
var sv Scalar
|
||||
err = json.Unmarshal(b, &sv)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if sv != test.value {
|
||||
t.Errorf("decoding error: expected %v, got %v", test.value, sv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringJSON(t *testing.T) {
|
||||
input := []struct {
|
||||
plain string
|
||||
value String
|
||||
}{
|
||||
{
|
||||
plain: `[123.456,"test"]`,
|
||||
value: String{
|
||||
Timestamp: 123456,
|
||||
Value: "test",
|
||||
},
|
||||
},
|
||||
{
|
||||
plain: `[123123.456,"台北"]`,
|
||||
value: String{
|
||||
Timestamp: 123123456,
|
||||
Value: "台北",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range input {
|
||||
b, err := json.Marshal(test.value)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if string(b) != test.plain {
|
||||
t.Errorf("encoding error: expected %q, got %q", test.plain, b)
|
||||
continue
|
||||
}
|
||||
|
||||
var sv String
|
||||
err = json.Unmarshal(b, &sv)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if sv != test.value {
|
||||
t.Errorf("decoding error: expected %v, got %v", test.value, sv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestVectorSort(t *testing.T) {
|
||||
input := Vector{
|
||||
&Sample{
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "A",
|
||||
},
|
||||
Timestamp: 1,
|
||||
},
|
||||
&Sample{
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "A",
|
||||
},
|
||||
Timestamp: 2,
|
||||
},
|
||||
&Sample{
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "C",
|
||||
},
|
||||
Timestamp: 1,
|
||||
},
|
||||
&Sample{
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "C",
|
||||
},
|
||||
Timestamp: 2,
|
||||
},
|
||||
&Sample{
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "B",
|
||||
},
|
||||
Timestamp: 1,
|
||||
},
|
||||
&Sample{
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "B",
|
||||
},
|
||||
Timestamp: 2,
|
||||
},
|
||||
}
|
||||
|
||||
expected := Vector{
|
||||
&Sample{
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "A",
|
||||
},
|
||||
Timestamp: 1,
|
||||
},
|
||||
&Sample{
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "A",
|
||||
},
|
||||
Timestamp: 2,
|
||||
},
|
||||
&Sample{
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "B",
|
||||
},
|
||||
Timestamp: 1,
|
||||
},
|
||||
&Sample{
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "B",
|
||||
},
|
||||
Timestamp: 2,
|
||||
},
|
||||
&Sample{
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "C",
|
||||
},
|
||||
Timestamp: 1,
|
||||
},
|
||||
&Sample{
|
||||
Metric: Metric{
|
||||
MetricNameLabel: "C",
|
||||
},
|
||||
Timestamp: 2,
|
||||
},
|
||||
}
|
||||
|
||||
sort.Sort(input)
|
||||
|
||||
for i, actual := range input {
|
||||
actualFp := actual.Metric.Fingerprint()
|
||||
expectedFp := expected[i].Metric.Fingerprint()
|
||||
|
||||
if actualFp != expectedFp {
|
||||
t.Fatalf("%d. Incorrect fingerprint. Got %s; want %s", i, actualFp.String(), expectedFp.String())
|
||||
}
|
||||
|
||||
if actual.Timestamp != expected[i].Timestamp {
|
||||
t.Fatalf("%d. Incorrect timestamp. Got %s; want %s", i, actual.Timestamp, expected[i].Timestamp)
|
||||
}
|
||||
}
|
||||
}
|
||||
114
vendor/github.com/prometheus/common/route/route.go
generated
vendored
Normal file
114
vendor/github.com/prometheus/common/route/route.go
generated
vendored
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
package route
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
mtx = sync.RWMutex{}
|
||||
ctxts = map[*http.Request]context.Context{}
|
||||
)
|
||||
|
||||
// Context returns the context for the request.
|
||||
func Context(r *http.Request) context.Context {
|
||||
mtx.RLock()
|
||||
defer mtx.RUnlock()
|
||||
return ctxts[r]
|
||||
}
|
||||
|
||||
type param string
|
||||
|
||||
// Param returns param p for the context.
|
||||
func Param(ctx context.Context, p string) string {
|
||||
return ctx.Value(param(p)).(string)
|
||||
}
|
||||
|
||||
// WithParam returns a new context with param p set to v.
|
||||
func WithParam(ctx context.Context, p, v string) context.Context {
|
||||
return context.WithValue(ctx, param(p), v)
|
||||
}
|
||||
|
||||
// handle turns a Handle into httprouter.Handle
|
||||
func handle(h http.HandlerFunc) httprouter.Handle {
|
||||
return func(w http.ResponseWriter, r *http.Request, params httprouter.Params) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
for _, p := range params {
|
||||
ctx = context.WithValue(ctx, param(p.Key), p.Value)
|
||||
}
|
||||
|
||||
mtx.Lock()
|
||||
ctxts[r] = ctx
|
||||
mtx.Unlock()
|
||||
|
||||
h(w, r)
|
||||
|
||||
mtx.Lock()
|
||||
delete(ctxts, r)
|
||||
mtx.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Router wraps httprouter.Router and adds support for prefixed sub-routers.
|
||||
type Router struct {
|
||||
rtr *httprouter.Router
|
||||
prefix string
|
||||
}
|
||||
|
||||
// New returns a new Router.
|
||||
func New() *Router {
|
||||
return &Router{rtr: httprouter.New()}
|
||||
}
|
||||
|
||||
// WithPrefix returns a router that prefixes all registered routes with prefix.
|
||||
func (r *Router) WithPrefix(prefix string) *Router {
|
||||
return &Router{rtr: r.rtr, prefix: r.prefix + prefix}
|
||||
}
|
||||
|
||||
// Get registers a new GET route.
|
||||
func (r *Router) Get(path string, h http.HandlerFunc) {
|
||||
r.rtr.GET(r.prefix+path, handle(h))
|
||||
}
|
||||
|
||||
// Del registers a new DELETE route.
|
||||
func (r *Router) Del(path string, h http.HandlerFunc) {
|
||||
r.rtr.DELETE(r.prefix+path, handle(h))
|
||||
}
|
||||
|
||||
// Put registers a new PUT route.
|
||||
func (r *Router) Put(path string, h http.HandlerFunc) {
|
||||
r.rtr.PUT(r.prefix+path, handle(h))
|
||||
}
|
||||
|
||||
// Post registers a new POST route.
|
||||
func (r *Router) Post(path string, h http.HandlerFunc) {
|
||||
r.rtr.POST(r.prefix+path, handle(h))
|
||||
}
|
||||
|
||||
// Redirect takes an absolute path and sends an internal HTTP redirect for it,
|
||||
// prefixed by the router's path prefix. Note that this method does not include
|
||||
// functionality for handling relative paths or full URL redirects.
|
||||
func (r *Router) Redirect(w http.ResponseWriter, req *http.Request, path string, code int) {
|
||||
http.Redirect(w, req, r.prefix+path, code)
|
||||
}
|
||||
|
||||
// ServeHTTP implements http.Handler.
|
||||
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
r.rtr.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
// FileServe returns a new http.HandlerFunc that serves files from dir.
|
||||
// Using routes must provide the *filepath parameter.
|
||||
func FileServe(dir string) http.HandlerFunc {
|
||||
fs := http.FileServer(http.Dir(dir))
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
r.URL.Path = Param(Context(r), "filepath")
|
||||
fs.ServeHTTP(w, r)
|
||||
}
|
||||
}
|
||||
27
vendor/github.com/prometheus/common/route/route_test.go
generated
vendored
Normal file
27
vendor/github.com/prometheus/common/route/route_test.go
generated
vendored
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
package route
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRedirect(t *testing.T) {
|
||||
router := New().WithPrefix("/test/prefix")
|
||||
w := httptest.NewRecorder()
|
||||
r, err := http.NewRequest("GET", "http://localhost:9090/foo", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Error building test request: %s", err)
|
||||
}
|
||||
|
||||
router.Redirect(w, r, "/some/endpoint", http.StatusFound)
|
||||
if w.Code != http.StatusFound {
|
||||
t.Fatalf("Unexpected redirect status code: got %d, want %d", w.Code, http.StatusFound)
|
||||
}
|
||||
|
||||
want := "/test/prefix/some/endpoint"
|
||||
got := w.Header()["Location"][0]
|
||||
if want != got {
|
||||
t.Fatalf("Unexpected redirect location: got %s, want %s", got, want)
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue