mirror of
https://github.com/kubernetes-sigs/prometheus-adapter.git
synced 2026-04-06 09:47:54 +00:00
Compare commits
375 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
01919d0ef1 | ||
|
|
21ea0ab279 | ||
|
|
c2ae4cdaf1 | ||
|
|
26d05b7ae9 | ||
|
|
17cef511b1 | ||
|
|
9988fd3e91 | ||
|
|
06e1d3913e | ||
|
|
39ef9fa0e7 | ||
|
|
01b29a6578 | ||
|
|
1d31a46aa1 | ||
|
|
d3784c5725 | ||
|
|
aba25ac4aa | ||
|
|
fdde189945 | ||
|
|
63bd3e8d44 | ||
|
|
1692f124d3 | ||
|
|
11d7d2bb05 | ||
|
|
b224085e86 | ||
|
|
5d9b01a57a | ||
|
|
4d5c98d364 | ||
|
|
b48bff400e | ||
|
|
9156bf3fbc | ||
|
|
27cf936f32 | ||
|
|
ed795c1ae2 | ||
|
|
a64d132d91 | ||
|
|
a01b094a63 | ||
|
|
f588141f08 | ||
|
|
98e716c7d3 | ||
|
|
ba77337ae4 | ||
|
|
a5bcb39046 | ||
|
|
2a4a4316dd | ||
|
|
f82ee9d1dc | ||
|
|
a53ee9eed1 | ||
|
|
7ba3c13bb6 | ||
|
|
891c52fe00 | ||
|
|
e772844ed8 | ||
|
|
6a1ba321da | ||
|
|
0032610ace | ||
|
|
fda3dad49b | ||
|
|
4cc5de93cb | ||
|
|
a4100f047a | ||
|
|
cb883fb789 | ||
|
|
198c469805 | ||
|
|
7cf3ac5d90 | ||
|
|
966ef227fe | ||
|
|
74ba84b76e | ||
|
|
36fbcc78f1 | ||
|
|
0a6e74a5b3 | ||
|
|
147dacee4a | ||
|
|
f733e2f74d | ||
|
|
b50333c035 | ||
|
|
f69aae4c78 | ||
|
|
8579be6c7b | ||
|
|
e69388346f | ||
|
|
86efb37019 | ||
|
|
c8caa11da1 | ||
|
|
b3a3d97596 | ||
|
|
27eb607509 | ||
|
|
d58cdcee93 | ||
|
|
aab718746b | ||
|
|
8528f29516 | ||
|
|
9a1ffb7b17 | ||
|
|
6a7f2b5ce1 | ||
|
|
f607905cf6 | ||
|
|
7bdb7f14b9 | ||
|
|
1145dbfe93 | ||
|
|
307795482f | ||
|
|
d341e8f67b | ||
|
|
b03cc3e7c8 | ||
|
|
b233597358 | ||
|
|
5d24df6353 | ||
|
|
411763b355 | ||
|
|
062c42eccc | ||
|
|
e18cc18201 | ||
|
|
70604d2f54 | ||
|
|
03cd31007e | ||
|
|
3d590269aa | ||
|
|
09cc27e609 | ||
|
|
a5faf9f920 | ||
|
|
dc0c0058d0 | ||
|
|
8958457968 | ||
|
|
0ea1c1b8d3 | ||
|
|
85e2d2052d | ||
|
|
d5c45b27b0 | ||
|
|
fdfecc8d7f | ||
|
|
e740fee947 | ||
|
|
268b2a8ec2 | ||
|
|
372dfc9d3a | ||
|
|
3afe2c74bc | ||
|
|
dd75b55557 | ||
|
|
4767a63a67 | ||
|
|
204d5996a4 | ||
|
|
d4d0a69514 | ||
|
|
e5ad3d8903 | ||
|
|
465e4153f9 | ||
|
|
f23e67113a | ||
|
|
303ac6fd45 | ||
|
|
7b4ba08b5d | ||
|
|
56b57a0b0e | ||
|
|
dd85956fbf | ||
|
|
65abf73917 | ||
|
|
47ca16ef50 | ||
|
|
cca107d97c | ||
|
|
9321bf0162 | ||
|
|
d2ae4c1569 | ||
|
|
c6e518beac | ||
|
|
508b82b712 | ||
|
|
a8742cff28 | ||
|
|
9008b12a01 | ||
|
|
df3080de31 | ||
|
|
00920756a4 | ||
|
|
e85e426ee0 | ||
|
|
bf33cafefc | ||
|
|
0aaf002fbc | ||
|
|
2cc6362964 | ||
|
|
8441ee2f74 | ||
|
|
c9e69613d3 | ||
|
|
b877e9d1bb | ||
|
|
bd568beea0 | ||
|
|
57a6fda6b1 | ||
|
|
6720d67d3a | ||
|
|
4f58885c9a | ||
|
|
3206c65b47 | ||
|
|
bb4722e38b | ||
|
|
dd107a714b | ||
|
|
d76d3eaa49 | ||
|
|
12309c9d1d | ||
|
|
3288fb9d41 | ||
|
|
56df87890c | ||
|
|
ae458c4464 | ||
|
|
1ef79d0a86 | ||
|
|
7040f70905 | ||
|
|
0a9c781e5c | ||
|
|
11ee7ee7e1 | ||
|
|
0f60f49639 | ||
|
|
8b85c68c9e | ||
|
|
4264c97f7b | ||
|
|
4eb6c313a1 | ||
|
|
cc5d3b8ed2 | ||
|
|
0a2c697e0b | ||
|
|
c6f774e28a | ||
|
|
20a5b7a80d | ||
|
|
ac814833e1 | ||
|
|
eef6b8fef1 | ||
|
|
6cea5b88ca | ||
|
|
97236f92ed | ||
|
|
d84340cc85 | ||
|
|
3fde77674e | ||
|
|
95995bcf4b | ||
|
|
5cf9dc3427 | ||
|
|
71ab6c4d90 | ||
|
|
aed49ff54f | ||
|
|
134774884c | ||
|
|
0b3ac78d19 | ||
|
|
93450fc29f | ||
|
|
c8ee46b6b4 | ||
|
|
4a22d18a5d | ||
|
|
9fd8918914 | ||
|
|
731e852494 | ||
|
|
2dbb46f158 | ||
|
|
4256683587 | ||
|
|
0ceb09085c | ||
|
|
670b3def30 | ||
|
|
7cd63baccf | ||
|
|
89425b72cc | ||
|
|
5e59822274 | ||
|
|
467f24d45c | ||
|
|
231446751c | ||
|
|
a057c04b09 | ||
|
|
ae1765153a | ||
|
|
e4d11e44e3 | ||
|
|
06e41b486c | ||
|
|
046b970edb | ||
|
|
70418fdbf8 | ||
|
|
91b9b7afc2 | ||
|
|
152cf3bbaa | ||
|
|
407217728b | ||
|
|
82a71ebb6f | ||
|
|
aae4ef6b51 | ||
|
|
89b6c7e31c | ||
|
|
a7ff3cb9c2 | ||
|
|
ef7bb58ff2 | ||
|
|
03e8eb8ddb | ||
|
|
cf45915a4a | ||
|
|
815fa20931 | ||
|
|
9dfbca09ca | ||
|
|
215cb0c292 | ||
|
|
76e61d47f6 | ||
|
|
09334d3a6d | ||
|
|
c67e8f5956 | ||
|
|
dd7a263002 | ||
|
|
9148122308 | ||
|
|
39b782bce9 | ||
|
|
f3aafa7c8f | ||
|
|
7bc0f0473d | ||
|
|
c893b1140c | ||
|
|
6c1d85ccf9 | ||
|
|
c0ae5d6dd4 | ||
|
|
fa5f8cd742 | ||
|
|
510c3724ce | ||
|
|
4b40c1796d | ||
|
|
f4d5bc9045 | ||
|
|
b67ac3e747 | ||
|
|
9db8d2f731 | ||
|
|
c30c69de09 | ||
|
|
7151cd83b9 | ||
|
|
c41a99a529 | ||
|
|
0e105eeeb1 | ||
|
|
54cd969594 | ||
|
|
df347a1427 | ||
|
|
dce6abfba9 | ||
|
|
2c1be65011 | ||
|
|
9231ff996d | ||
|
|
976c38aee4 | ||
|
|
737c8232e1 | ||
|
|
ef33937e43 | ||
|
|
3ae38c7417 | ||
|
|
3bd8b54ad5 | ||
|
|
dcf0ece4ea | ||
|
|
7e11fe30ee | ||
|
|
019a27f200 | ||
|
|
e087f72404 | ||
|
|
30f6e2fd07 | ||
|
|
808bd76c5a | ||
|
|
cd55a67b89 | ||
|
|
1cc7bed020 | ||
|
|
dd841a6e5e | ||
|
|
079f67825f | ||
|
|
12d1fb4a72 | ||
|
|
78eec11706 | ||
|
|
61a30408f6 | ||
|
|
b0423f39ac | ||
|
|
f604e07020 | ||
|
|
48e9f418fa | ||
|
|
f33fc94229 | ||
|
|
aa77eca551 | ||
|
|
96cdc4d143 | ||
|
|
147b5c8858 | ||
|
|
9f0440be0f | ||
|
|
269295a414 | ||
|
|
76020f6618 | ||
|
|
be35274475 | ||
|
|
6d8a82f423 | ||
|
|
e0ddb886a7 | ||
|
|
e16510e3e6 | ||
|
|
4d0d0f3a38 | ||
|
|
b09c680295 | ||
|
|
95df8b43dd | ||
|
|
b88f59a02f | ||
|
|
87c429b5c6 | ||
|
|
82450eb6ec | ||
|
|
69569bf7ab | ||
|
|
523aa52367 | ||
|
|
752ce84723 | ||
|
|
6b412c4a36 | ||
|
|
a858d53495 | ||
|
|
bdc8b487ba | ||
|
|
b9e5a71d72 | ||
|
|
7a1bdecc98 | ||
|
|
1d44cbbbb8 | ||
|
|
47a5ed8047 | ||
|
|
b480e45a67 | ||
|
|
43043ced4a | ||
|
|
4c673534f2 | ||
|
|
7d16598ef4 | ||
|
|
2678f90c5e | ||
|
|
9c7743822c | ||
|
|
c45a40bec0 | ||
|
|
f5a0de3b44 | ||
|
|
5a6322b4ce | ||
|
|
936bda2ef0 | ||
|
|
26e4d23789 | ||
|
|
4b5788e847 | ||
|
|
d6b587d52b | ||
|
|
6e05ab938e | ||
|
|
4ca64b85f0 | ||
|
|
c6ac5cbc87 | ||
|
|
be9797dc49 | ||
|
|
cd12d4a020 | ||
|
|
7225e3d6a7 | ||
|
|
128f9a29f5 | ||
|
|
d091fff18b | ||
|
|
5afd30edcf | ||
|
|
ed4b59b359 | ||
|
|
4788770bf6 | ||
|
|
8d8bfc7c33 | ||
|
|
03bc47e9fb | ||
|
|
28a807aa9f | ||
|
|
dce4c3f75f | ||
|
|
7e520a889e | ||
|
|
571fb936bc | ||
|
|
3e6d59813b | ||
|
|
3365677208 | ||
|
|
f2fc8dea85 | ||
|
|
552284f174 | ||
|
|
a47edfe5a9 | ||
|
|
8c4001831d | ||
|
|
57f60449f3 | ||
|
|
dc65c75140 | ||
|
|
e689c0bcfb | ||
|
|
9df21aa545 | ||
|
|
e84becd7ac | ||
|
|
486324753e | ||
|
|
4b695d4e06 | ||
|
|
2374cef641 | ||
|
|
99d52a4ce8 | ||
|
|
83036844f9 | ||
|
|
0d5c3ebd04 | ||
|
|
f0ec22a3f5 | ||
|
|
43c00d9c1c | ||
|
|
9e072b2b57 | ||
|
|
ff0a50100d | ||
|
|
72abf135d6 | ||
|
|
604208ef4f | ||
|
|
f18b6fd370 | ||
|
|
f69586b71c | ||
|
|
fe25941b91 | ||
|
|
27a617bb9e | ||
|
|
113f90ec5e | ||
|
|
680e404250 | ||
|
|
fa27078586 | ||
|
|
d447eb1ec2 | ||
|
|
5de0247b4a | ||
|
|
e103a8eed2 | ||
|
|
ff409a0994 | ||
|
|
918954bd5e | ||
|
|
27c3bc1b88 | ||
|
|
3f7f249cb8 | ||
|
|
0bb762b367 | ||
|
|
980bc01d67 | ||
|
|
8ef8c8a291 | ||
|
|
ed9eb31b3a | ||
|
|
b379be6818 | ||
|
|
6030912cc0 | ||
|
|
c2e176bb23 | ||
|
|
7480349096 | ||
|
|
ab6ada9081 | ||
|
|
a02ca0fbad | ||
|
|
dce283def1 | ||
|
|
7624952870 | ||
|
|
e9ef0bb4d0 | ||
|
|
1e5a868378 | ||
|
|
326bf3c276 | ||
|
|
99104cba2a | ||
|
|
b3dfbe1b29 | ||
|
|
d3bbe8247a | ||
|
|
9fb46c3c55 | ||
|
|
4a16ae6d9a | ||
|
|
bef034e699 | ||
|
|
88c0ad0b6a | ||
|
|
3f1b120eda | ||
|
|
5d837a29dd | ||
|
|
2e82759ca9 | ||
|
|
3bd75f5c3a | ||
|
|
405a55521f | ||
|
|
5a461c3fcd | ||
|
|
d12e5f0684 | ||
|
|
7360c51c0e | ||
|
|
919c8bcbe9 | ||
|
|
94b0063c52 | ||
|
|
7a421bb91e | ||
|
|
083300bf32 | ||
|
|
5f52b29d47 | ||
|
|
6c8f44623e | ||
|
|
49287fecc9 | ||
|
|
cc08a1fb41 | ||
|
|
c5801455ec | ||
|
|
74c0c53e4f | ||
|
|
7dd9e94aea | ||
|
|
d02384477a | ||
|
|
94379a1780 | ||
|
|
8d12f4f5fd | ||
|
|
b381aef37b | ||
|
|
93974115ad | ||
|
|
6b2c04dd61 | ||
|
|
c916572aca |
2625 changed files with 23064 additions and 933423 deletions
52
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
52
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
|
|
@ -0,0 +1,52 @@
|
||||||
|
---
|
||||||
|
name: Bug report
|
||||||
|
about: Report a bug encountered while running prometheus-adapter
|
||||||
|
title: ''
|
||||||
|
labels: kind/bug
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
<!-- Please use this template while reporting a bug and provide as much info as possible. Not doing so may result in your bug not being addressed in a timely manner. Thanks!
|
||||||
|
|
||||||
|
If the matter is security related, please disclose it privately see https://github.com/kubernetes/kube-state-metrics/blob/master/SECURITY.md
|
||||||
|
-->
|
||||||
|
|
||||||
|
**What happened?**:
|
||||||
|
|
||||||
|
**What did you expect to happen?**:
|
||||||
|
|
||||||
|
**Please provide the prometheus-adapter config**:
|
||||||
|
<details open>
|
||||||
|
<summary>prometheus-adapter config</summary>
|
||||||
|
|
||||||
|
<!--- INSERT config HERE --->
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
**Please provide the HPA resource used for autoscaling**:
|
||||||
|
<details open>
|
||||||
|
<summary>HPA yaml</summary>
|
||||||
|
|
||||||
|
<!--- INSERT yaml HERE --->
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
**Please provide the HPA status**:
|
||||||
|
|
||||||
|
**Please provide the prometheus-adapter logs with -v=6 around the time the issue happened**:
|
||||||
|
<details open>
|
||||||
|
<summary>prometheus-adapter logs</summary>
|
||||||
|
|
||||||
|
<!--- INSERT logs HERE --->
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
**Anything else we need to know?**:
|
||||||
|
|
||||||
|
**Environment**:
|
||||||
|
- prometheus-adapter version:
|
||||||
|
- prometheus version:
|
||||||
|
- Kubernetes version (use `kubectl version`):
|
||||||
|
- Cloud provider or hardware configuration:
|
||||||
|
- Other info:
|
||||||
5
.gitignore
vendored
5
.gitignore
vendored
|
|
@ -1,4 +1,5 @@
|
||||||
*.swp
|
*.swp
|
||||||
*~
|
*~
|
||||||
_output
|
/vendor
|
||||||
deploy/adapter
|
/adapter
|
||||||
|
.e2e
|
||||||
|
|
|
||||||
39
.golangci.yml
Normal file
39
.golangci.yml
Normal file
|
|
@ -0,0 +1,39 @@
|
||||||
|
run:
|
||||||
|
deadline: 5m
|
||||||
|
|
||||||
|
linters:
|
||||||
|
disable-all: true
|
||||||
|
enable:
|
||||||
|
- bodyclose
|
||||||
|
- dogsled
|
||||||
|
- dupl
|
||||||
|
- errcheck
|
||||||
|
- exportloopref
|
||||||
|
- gocritic
|
||||||
|
- gocyclo
|
||||||
|
- gofmt
|
||||||
|
- goimports
|
||||||
|
- gosec
|
||||||
|
- goprintffuncname
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
|
- ineffassign
|
||||||
|
- misspell
|
||||||
|
- nakedret
|
||||||
|
- nolintlint
|
||||||
|
- revive
|
||||||
|
- staticcheck
|
||||||
|
- stylecheck
|
||||||
|
- typecheck
|
||||||
|
- unconvert
|
||||||
|
- unused
|
||||||
|
- whitespace
|
||||||
|
|
||||||
|
linters-settings:
|
||||||
|
goimports:
|
||||||
|
local-prefixes: sigs.k8s.io/prometheus-adapter
|
||||||
|
revive:
|
||||||
|
rules:
|
||||||
|
- name: exported
|
||||||
|
arguments:
|
||||||
|
- disableStutteringCheck
|
||||||
|
|
@ -1,9 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
set -x
|
|
||||||
docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD"
|
|
||||||
|
|
||||||
if [[ -n $TRAVIS_TAG ]]; then
|
|
||||||
make push VERSION=${TRAVIS_TAG}
|
|
||||||
else
|
|
||||||
make push-amd64
|
|
||||||
fi
|
|
||||||
23
.travis.yml
23
.travis.yml
|
|
@ -1,23 +0,0 @@
|
||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- '1.10'
|
|
||||||
|
|
||||||
# blech, Travis downloads with capitals in DirectXMan12, which confuses go
|
|
||||||
go_import_path: github.com/directxman12/k8s-prometheus-adapter
|
|
||||||
|
|
||||||
script: make verify
|
|
||||||
|
|
||||||
sudo: required
|
|
||||||
services:
|
|
||||||
- docker
|
|
||||||
|
|
||||||
deploy:
|
|
||||||
- provider: script
|
|
||||||
script: bash .travis-deploy.sh
|
|
||||||
on:
|
|
||||||
branch: master
|
|
||||||
- provider: script
|
|
||||||
script: bash .travis-deploy.sh
|
|
||||||
on:
|
|
||||||
tags: true
|
|
||||||
31
CONTRIBUTING.md
Normal file
31
CONTRIBUTING.md
Normal file
|
|
@ -0,0 +1,31 @@
|
||||||
|
# Contributing Guidelines
|
||||||
|
|
||||||
|
Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://git.k8s.io/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
|
||||||
|
|
||||||
|
_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
We have full documentation on how to get started contributing here:
|
||||||
|
|
||||||
|
<!---
|
||||||
|
If your repo has certain guidelines for contribution, put them here ahead of the general k8s resources
|
||||||
|
-->
|
||||||
|
|
||||||
|
- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
|
||||||
|
- [Kubernetes Contributor Guide](https://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](https://git.k8s.io/community/contributors/guide#contributing)
|
||||||
|
- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers
|
||||||
|
|
||||||
|
## Mentorship
|
||||||
|
|
||||||
|
- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
|
||||||
|
|
||||||
|
<!---
|
||||||
|
Custom Information - if you're copying this template for the first time you can add custom content here, for example:
|
||||||
|
|
||||||
|
## Contact Information
|
||||||
|
|
||||||
|
- [Slack channel](https://kubernetes.slack.com/messages/kubernetes-users) - Replace `kubernetes-users` with your slack channel string, this will send users directly to your channel.
|
||||||
|
- [Mailing list](URL)
|
||||||
|
|
||||||
|
-->
|
||||||
22
Dockerfile
Normal file
22
Dockerfile
Normal file
|
|
@ -0,0 +1,22 @@
|
||||||
|
ARG ARCH
|
||||||
|
ARG GO_VERSION
|
||||||
|
|
||||||
|
FROM golang:${GO_VERSION} as build
|
||||||
|
|
||||||
|
WORKDIR /go/src/sigs.k8s.io/prometheus-adapter
|
||||||
|
COPY go.mod .
|
||||||
|
COPY go.sum .
|
||||||
|
RUN go mod download
|
||||||
|
|
||||||
|
COPY pkg pkg
|
||||||
|
COPY cmd cmd
|
||||||
|
COPY Makefile Makefile
|
||||||
|
|
||||||
|
ARG ARCH
|
||||||
|
RUN make prometheus-adapter
|
||||||
|
|
||||||
|
FROM gcr.io/distroless/static:latest-$ARCH
|
||||||
|
|
||||||
|
COPY --from=build /go/src/sigs.k8s.io/prometheus-adapter/adapter /
|
||||||
|
USER 65534
|
||||||
|
ENTRYPOINT ["/adapter"]
|
||||||
807
Gopkg.lock
generated
807
Gopkg.lock
generated
|
|
@ -1,807 +0,0 @@
|
||||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
|
||||||
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "default"
|
|
||||||
name = "bitbucket.org/ww/goautoneg"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "75cd24fc2f2c2a2088577d12123ddee5f54e0675"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/NYTimes/gziphandler"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "2600fb119af974220d3916a5916d6e31176aac1b"
|
|
||||||
version = "v1.0.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/PuerkitoBio/purell"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4"
|
|
||||||
version = "v1.1.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/PuerkitoBio/urlesc"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "de5bf2ad457846296e2031421a34e2568e304e35"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/beorn7/perks"
|
|
||||||
packages = ["quantile"]
|
|
||||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/coreos/etcd"
|
|
||||||
packages = [
|
|
||||||
"auth/authpb",
|
|
||||||
"client",
|
|
||||||
"clientv3",
|
|
||||||
"etcdserver/api/v3rpc/rpctypes",
|
|
||||||
"etcdserver/etcdserverpb",
|
|
||||||
"mvcc/mvccpb",
|
|
||||||
"pkg/pathutil",
|
|
||||||
"pkg/srv",
|
|
||||||
"pkg/tlsutil",
|
|
||||||
"pkg/transport",
|
|
||||||
"pkg/types",
|
|
||||||
"version"
|
|
||||||
]
|
|
||||||
revision = "33245c6b5b49130ca99280408fadfab01aac0e48"
|
|
||||||
version = "v3.3.8"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/coreos/go-semver"
|
|
||||||
packages = ["semver"]
|
|
||||||
revision = "8ab6407b697782a06568d4b7f1db25550ec2e4c6"
|
|
||||||
version = "v0.2.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/coreos/go-systemd"
|
|
||||||
packages = ["daemon"]
|
|
||||||
revision = "39ca1b05acc7ad1220e09f133283b8859a8b71ab"
|
|
||||||
version = "v17"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/davecgh/go-spew"
|
|
||||||
packages = ["spew"]
|
|
||||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
|
||||||
version = "v1.1.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/elazarl/go-bindata-assetfs"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "30f82fa23fd844bd5bb1e5f216db87fd77b5eb43"
|
|
||||||
version = "v1.0.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/emicklei/go-restful"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"log"
|
|
||||||
]
|
|
||||||
revision = "3658237ded108b4134956c1b3050349d93e7b895"
|
|
||||||
version = "v2.7.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/emicklei/go-restful-swagger12"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "dcef7f55730566d41eae5db10e7d6981829720f6"
|
|
||||||
version = "1.0.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/evanphx/json-patch"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "afac545df32f2287a079e2dfb7ba2745a643747e"
|
|
||||||
version = "v3.0.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/ghodss/yaml"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
|
|
||||||
version = "v1.0.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/go-openapi/jsonpointer"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "3a0015ad55fa9873f41605d3e8f28cd279c32ab2"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/go-openapi/jsonreference"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "3fb327e6747da3043567ee86abd02bb6376b6be2"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/go-openapi/spec"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "bcff419492eeeb01f76e77d2ebc714dc97b607f5"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/go-openapi/swag"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "811b1089cde9dad18d4d0c2d09fbdbf28dbd27a5"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/gogo/protobuf"
|
|
||||||
packages = [
|
|
||||||
"gogoproto",
|
|
||||||
"proto",
|
|
||||||
"protoc-gen-gogo/descriptor",
|
|
||||||
"sortkeys"
|
|
||||||
]
|
|
||||||
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
|
|
||||||
version = "v1.0.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/golang/glog"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/golang/protobuf"
|
|
||||||
packages = [
|
|
||||||
"proto",
|
|
||||||
"ptypes",
|
|
||||||
"ptypes/any",
|
|
||||||
"ptypes/duration",
|
|
||||||
"ptypes/timestamp"
|
|
||||||
]
|
|
||||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
|
||||||
version = "v1.1.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/google/btree"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/google/gofuzz"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/googleapis/gnostic"
|
|
||||||
packages = [
|
|
||||||
"OpenAPIv2",
|
|
||||||
"compiler",
|
|
||||||
"extensions"
|
|
||||||
]
|
|
||||||
revision = "7c663266750e7d82587642f65e60bc4083f1f84e"
|
|
||||||
version = "v0.2.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/gregjones/httpcache"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"diskcache"
|
|
||||||
]
|
|
||||||
revision = "9cad4c3443a7200dd6400aef47183728de563a38"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/hashicorp/golang-lru"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"simplelru"
|
|
||||||
]
|
|
||||||
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/imdario/mergo"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "9316a62528ac99aaecb4e47eadd6dc8aa6533d58"
|
|
||||||
version = "v0.3.5"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/inconshreveable/mousetrap"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
|
||||||
version = "v1.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/json-iterator/go"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "ca39e5af3ece67bbcda3d0f4f56a8e24d9f2dad4"
|
|
||||||
version = "1.1.3"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/kubernetes-incubator/custom-metrics-apiserver"
|
|
||||||
packages = [
|
|
||||||
"pkg/apiserver",
|
|
||||||
"pkg/apiserver/installer",
|
|
||||||
"pkg/cmd/server",
|
|
||||||
"pkg/dynamicmapper",
|
|
||||||
"pkg/provider",
|
|
||||||
"pkg/registry/custom_metrics",
|
|
||||||
"pkg/registry/external_metrics"
|
|
||||||
]
|
|
||||||
revision = "d8f23423aa1d0ff2bc9656da863d721725b3c68a"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/mailru/easyjson"
|
|
||||||
packages = [
|
|
||||||
"buffer",
|
|
||||||
"jlexer",
|
|
||||||
"jwriter"
|
|
||||||
]
|
|
||||||
revision = "3fdea8d05856a0c8df22ed4bc71b3219245e4485"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
|
||||||
packages = ["pbutil"]
|
|
||||||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
|
||||||
version = "v1.0.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/modern-go/concurrent"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
|
|
||||||
version = "1.0.3"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/modern-go/reflect2"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f"
|
|
||||||
version = "1.0.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/pborman/uuid"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53"
|
|
||||||
version = "v1.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/petar/GoLLRB"
|
|
||||||
packages = ["llrb"]
|
|
||||||
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/peterbourgon/diskv"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
|
||||||
version = "v2.0.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/pmezard/go-difflib"
|
|
||||||
packages = ["difflib"]
|
|
||||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
|
||||||
version = "v1.0.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/prometheus/client_golang"
|
|
||||||
packages = ["prometheus"]
|
|
||||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
|
||||||
version = "v0.8.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/prometheus/client_model"
|
|
||||||
packages = ["go"]
|
|
||||||
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/prometheus/common"
|
|
||||||
packages = [
|
|
||||||
"expfmt",
|
|
||||||
"internal/bitbucket.org/ww/goautoneg",
|
|
||||||
"model"
|
|
||||||
]
|
|
||||||
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/prometheus/procfs"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"internal/util",
|
|
||||||
"nfs",
|
|
||||||
"xfs"
|
|
||||||
]
|
|
||||||
revision = "7d6f385de8bea29190f15ba9931442a0eaef9af7"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/spf13/cobra"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
|
|
||||||
version = "v0.0.3"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/spf13/pflag"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
|
|
||||||
version = "v1.0.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/stretchr/testify"
|
|
||||||
packages = [
|
|
||||||
"assert",
|
|
||||||
"require"
|
|
||||||
]
|
|
||||||
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
|
||||||
version = "v1.2.2"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/ugorji/go"
|
|
||||||
packages = ["codec"]
|
|
||||||
revision = "b4c50a2b199d93b13dc15e78929cfb23bfdf21ab"
|
|
||||||
version = "v1.1.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "golang.org/x/crypto"
|
|
||||||
packages = ["ssh/terminal"]
|
|
||||||
revision = "a49355c7e3f8fe157a85be2f77e6e269a0f89602"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "golang.org/x/net"
|
|
||||||
packages = [
|
|
||||||
"context",
|
|
||||||
"http/httpguts",
|
|
||||||
"http2",
|
|
||||||
"http2/hpack",
|
|
||||||
"idna",
|
|
||||||
"internal/timeseries",
|
|
||||||
"trace",
|
|
||||||
"websocket"
|
|
||||||
]
|
|
||||||
revision = "afe8f62b1d6bbd81f31868121a50b06d8188e1f9"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "golang.org/x/sys"
|
|
||||||
packages = [
|
|
||||||
"unix",
|
|
||||||
"windows"
|
|
||||||
]
|
|
||||||
revision = "63fc586f45fe72d95d5240a5d5eb95e6503907d3"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "golang.org/x/text"
|
|
||||||
packages = [
|
|
||||||
"collate",
|
|
||||||
"collate/build",
|
|
||||||
"internal/colltab",
|
|
||||||
"internal/gen",
|
|
||||||
"internal/tag",
|
|
||||||
"internal/triegen",
|
|
||||||
"internal/ucd",
|
|
||||||
"language",
|
|
||||||
"secure/bidirule",
|
|
||||||
"transform",
|
|
||||||
"unicode/bidi",
|
|
||||||
"unicode/cldr",
|
|
||||||
"unicode/norm",
|
|
||||||
"unicode/rangetable",
|
|
||||||
"width"
|
|
||||||
]
|
|
||||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
|
||||||
version = "v0.3.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "golang.org/x/time"
|
|
||||||
packages = ["rate"]
|
|
||||||
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "google.golang.org/genproto"
|
|
||||||
packages = ["googleapis/rpc/status"]
|
|
||||||
revision = "80063a038e333bbe006c878e4c5ce4c74d055498"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "google.golang.org/grpc"
|
|
||||||
packages = [
|
|
||||||
".",
|
|
||||||
"balancer",
|
|
||||||
"balancer/base",
|
|
||||||
"balancer/roundrobin",
|
|
||||||
"codes",
|
|
||||||
"connectivity",
|
|
||||||
"credentials",
|
|
||||||
"encoding",
|
|
||||||
"encoding/proto",
|
|
||||||
"grpclog",
|
|
||||||
"health/grpc_health_v1",
|
|
||||||
"internal",
|
|
||||||
"internal/backoff",
|
|
||||||
"internal/channelz",
|
|
||||||
"internal/grpcrand",
|
|
||||||
"keepalive",
|
|
||||||
"metadata",
|
|
||||||
"naming",
|
|
||||||
"peer",
|
|
||||||
"resolver",
|
|
||||||
"resolver/dns",
|
|
||||||
"resolver/passthrough",
|
|
||||||
"stats",
|
|
||||||
"status",
|
|
||||||
"tap",
|
|
||||||
"transport"
|
|
||||||
]
|
|
||||||
revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
|
||||||
version = "v1.13.0"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "gopkg.in/inf.v0"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
|
|
||||||
version = "v0.9.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "gopkg.in/natefinch/lumberjack.v2"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "a96e63847dc3c67d17befa69c303767e2f84e54f"
|
|
||||||
version = "v2.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "gopkg.in/yaml.v2"
|
|
||||||
packages = ["."]
|
|
||||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
|
||||||
version = "v2.2.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "k8s.io/api"
|
|
||||||
packages = [
|
|
||||||
"admission/v1beta1",
|
|
||||||
"admissionregistration/v1alpha1",
|
|
||||||
"admissionregistration/v1beta1",
|
|
||||||
"apps/v1",
|
|
||||||
"apps/v1beta1",
|
|
||||||
"apps/v1beta2",
|
|
||||||
"authentication/v1",
|
|
||||||
"authentication/v1beta1",
|
|
||||||
"authorization/v1",
|
|
||||||
"authorization/v1beta1",
|
|
||||||
"autoscaling/v1",
|
|
||||||
"autoscaling/v2beta1",
|
|
||||||
"batch/v1",
|
|
||||||
"batch/v1beta1",
|
|
||||||
"batch/v2alpha1",
|
|
||||||
"certificates/v1beta1",
|
|
||||||
"core/v1",
|
|
||||||
"events/v1beta1",
|
|
||||||
"extensions/v1beta1",
|
|
||||||
"networking/v1",
|
|
||||||
"policy/v1beta1",
|
|
||||||
"rbac/v1",
|
|
||||||
"rbac/v1alpha1",
|
|
||||||
"rbac/v1beta1",
|
|
||||||
"scheduling/v1alpha1",
|
|
||||||
"scheduling/v1beta1",
|
|
||||||
"settings/v1alpha1",
|
|
||||||
"storage/v1",
|
|
||||||
"storage/v1alpha1",
|
|
||||||
"storage/v1beta1"
|
|
||||||
]
|
|
||||||
revision = "91b2d7a92a8930454bf5020e0595b8ea0f2a5047"
|
|
||||||
version = "kubernetes-1.11.0-rc.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "k8s.io/apimachinery"
|
|
||||||
packages = [
|
|
||||||
"pkg/api/equality",
|
|
||||||
"pkg/api/errors",
|
|
||||||
"pkg/api/meta",
|
|
||||||
"pkg/api/resource",
|
|
||||||
"pkg/api/validation",
|
|
||||||
"pkg/api/validation/path",
|
|
||||||
"pkg/apis/meta/internalversion",
|
|
||||||
"pkg/apis/meta/v1",
|
|
||||||
"pkg/apis/meta/v1/unstructured",
|
|
||||||
"pkg/apis/meta/v1/validation",
|
|
||||||
"pkg/apis/meta/v1beta1",
|
|
||||||
"pkg/conversion",
|
|
||||||
"pkg/conversion/queryparams",
|
|
||||||
"pkg/fields",
|
|
||||||
"pkg/labels",
|
|
||||||
"pkg/runtime",
|
|
||||||
"pkg/runtime/schema",
|
|
||||||
"pkg/runtime/serializer",
|
|
||||||
"pkg/runtime/serializer/json",
|
|
||||||
"pkg/runtime/serializer/protobuf",
|
|
||||||
"pkg/runtime/serializer/recognizer",
|
|
||||||
"pkg/runtime/serializer/streaming",
|
|
||||||
"pkg/runtime/serializer/versioning",
|
|
||||||
"pkg/selection",
|
|
||||||
"pkg/types",
|
|
||||||
"pkg/util/cache",
|
|
||||||
"pkg/util/clock",
|
|
||||||
"pkg/util/diff",
|
|
||||||
"pkg/util/errors",
|
|
||||||
"pkg/util/framer",
|
|
||||||
"pkg/util/intstr",
|
|
||||||
"pkg/util/json",
|
|
||||||
"pkg/util/mergepatch",
|
|
||||||
"pkg/util/net",
|
|
||||||
"pkg/util/rand",
|
|
||||||
"pkg/util/runtime",
|
|
||||||
"pkg/util/sets",
|
|
||||||
"pkg/util/strategicpatch",
|
|
||||||
"pkg/util/uuid",
|
|
||||||
"pkg/util/validation",
|
|
||||||
"pkg/util/validation/field",
|
|
||||||
"pkg/util/wait",
|
|
||||||
"pkg/util/waitgroup",
|
|
||||||
"pkg/util/yaml",
|
|
||||||
"pkg/version",
|
|
||||||
"pkg/watch",
|
|
||||||
"third_party/forked/golang/json",
|
|
||||||
"third_party/forked/golang/reflect"
|
|
||||||
]
|
|
||||||
revision = "fda675fbe85280c4550452dae2a5ebf74e4a59b7"
|
|
||||||
version = "kubernetes-1.11.0-rc.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "k8s.io/apiserver"
|
|
||||||
packages = [
|
|
||||||
"pkg/admission",
|
|
||||||
"pkg/admission/configuration",
|
|
||||||
"pkg/admission/initializer",
|
|
||||||
"pkg/admission/metrics",
|
|
||||||
"pkg/admission/plugin/initialization",
|
|
||||||
"pkg/admission/plugin/namespace/lifecycle",
|
|
||||||
"pkg/admission/plugin/webhook/config",
|
|
||||||
"pkg/admission/plugin/webhook/config/apis/webhookadmission",
|
|
||||||
"pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1",
|
|
||||||
"pkg/admission/plugin/webhook/errors",
|
|
||||||
"pkg/admission/plugin/webhook/generic",
|
|
||||||
"pkg/admission/plugin/webhook/mutating",
|
|
||||||
"pkg/admission/plugin/webhook/namespace",
|
|
||||||
"pkg/admission/plugin/webhook/request",
|
|
||||||
"pkg/admission/plugin/webhook/rules",
|
|
||||||
"pkg/admission/plugin/webhook/validating",
|
|
||||||
"pkg/apis/apiserver",
|
|
||||||
"pkg/apis/apiserver/install",
|
|
||||||
"pkg/apis/apiserver/v1alpha1",
|
|
||||||
"pkg/apis/audit",
|
|
||||||
"pkg/apis/audit/install",
|
|
||||||
"pkg/apis/audit/v1alpha1",
|
|
||||||
"pkg/apis/audit/v1beta1",
|
|
||||||
"pkg/apis/audit/validation",
|
|
||||||
"pkg/audit",
|
|
||||||
"pkg/audit/policy",
|
|
||||||
"pkg/authentication/authenticator",
|
|
||||||
"pkg/authentication/authenticatorfactory",
|
|
||||||
"pkg/authentication/group",
|
|
||||||
"pkg/authentication/request/anonymous",
|
|
||||||
"pkg/authentication/request/bearertoken",
|
|
||||||
"pkg/authentication/request/headerrequest",
|
|
||||||
"pkg/authentication/request/union",
|
|
||||||
"pkg/authentication/request/websocket",
|
|
||||||
"pkg/authentication/request/x509",
|
|
||||||
"pkg/authentication/serviceaccount",
|
|
||||||
"pkg/authentication/token/tokenfile",
|
|
||||||
"pkg/authentication/user",
|
|
||||||
"pkg/authorization/authorizer",
|
|
||||||
"pkg/authorization/authorizerfactory",
|
|
||||||
"pkg/authorization/union",
|
|
||||||
"pkg/endpoints",
|
|
||||||
"pkg/endpoints/discovery",
|
|
||||||
"pkg/endpoints/filters",
|
|
||||||
"pkg/endpoints/handlers",
|
|
||||||
"pkg/endpoints/handlers/negotiation",
|
|
||||||
"pkg/endpoints/handlers/responsewriters",
|
|
||||||
"pkg/endpoints/metrics",
|
|
||||||
"pkg/endpoints/openapi",
|
|
||||||
"pkg/endpoints/request",
|
|
||||||
"pkg/features",
|
|
||||||
"pkg/registry/generic",
|
|
||||||
"pkg/registry/generic/registry",
|
|
||||||
"pkg/registry/rest",
|
|
||||||
"pkg/server",
|
|
||||||
"pkg/server/filters",
|
|
||||||
"pkg/server/healthz",
|
|
||||||
"pkg/server/httplog",
|
|
||||||
"pkg/server/mux",
|
|
||||||
"pkg/server/options",
|
|
||||||
"pkg/server/resourceconfig",
|
|
||||||
"pkg/server/routes",
|
|
||||||
"pkg/server/routes/data/swagger",
|
|
||||||
"pkg/server/storage",
|
|
||||||
"pkg/storage",
|
|
||||||
"pkg/storage/errors",
|
|
||||||
"pkg/storage/etcd",
|
|
||||||
"pkg/storage/etcd/metrics",
|
|
||||||
"pkg/storage/etcd/util",
|
|
||||||
"pkg/storage/etcd3",
|
|
||||||
"pkg/storage/etcd3/preflight",
|
|
||||||
"pkg/storage/names",
|
|
||||||
"pkg/storage/storagebackend",
|
|
||||||
"pkg/storage/storagebackend/factory",
|
|
||||||
"pkg/storage/value",
|
|
||||||
"pkg/util/feature",
|
|
||||||
"pkg/util/flag",
|
|
||||||
"pkg/util/flushwriter",
|
|
||||||
"pkg/util/logs",
|
|
||||||
"pkg/util/openapi",
|
|
||||||
"pkg/util/trace",
|
|
||||||
"pkg/util/webhook",
|
|
||||||
"pkg/util/wsstream",
|
|
||||||
"plugin/pkg/audit/buffered",
|
|
||||||
"plugin/pkg/audit/log",
|
|
||||||
"plugin/pkg/audit/truncate",
|
|
||||||
"plugin/pkg/audit/webhook",
|
|
||||||
"plugin/pkg/authenticator/token/webhook",
|
|
||||||
"plugin/pkg/authorizer/webhook"
|
|
||||||
]
|
|
||||||
revision = "44b612291bb7545430c499a3882c610c727f37b0"
|
|
||||||
version = "kubernetes-1.11.0-rc.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "k8s.io/client-go"
|
|
||||||
packages = [
|
|
||||||
"discovery",
|
|
||||||
"dynamic",
|
|
||||||
"dynamic/fake",
|
|
||||||
"informers",
|
|
||||||
"informers/admissionregistration",
|
|
||||||
"informers/admissionregistration/v1alpha1",
|
|
||||||
"informers/admissionregistration/v1beta1",
|
|
||||||
"informers/apps",
|
|
||||||
"informers/apps/v1",
|
|
||||||
"informers/apps/v1beta1",
|
|
||||||
"informers/apps/v1beta2",
|
|
||||||
"informers/autoscaling",
|
|
||||||
"informers/autoscaling/v1",
|
|
||||||
"informers/autoscaling/v2beta1",
|
|
||||||
"informers/batch",
|
|
||||||
"informers/batch/v1",
|
|
||||||
"informers/batch/v1beta1",
|
|
||||||
"informers/batch/v2alpha1",
|
|
||||||
"informers/certificates",
|
|
||||||
"informers/certificates/v1beta1",
|
|
||||||
"informers/core",
|
|
||||||
"informers/core/v1",
|
|
||||||
"informers/events",
|
|
||||||
"informers/events/v1beta1",
|
|
||||||
"informers/extensions",
|
|
||||||
"informers/extensions/v1beta1",
|
|
||||||
"informers/internalinterfaces",
|
|
||||||
"informers/networking",
|
|
||||||
"informers/networking/v1",
|
|
||||||
"informers/policy",
|
|
||||||
"informers/policy/v1beta1",
|
|
||||||
"informers/rbac",
|
|
||||||
"informers/rbac/v1",
|
|
||||||
"informers/rbac/v1alpha1",
|
|
||||||
"informers/rbac/v1beta1",
|
|
||||||
"informers/scheduling",
|
|
||||||
"informers/scheduling/v1alpha1",
|
|
||||||
"informers/scheduling/v1beta1",
|
|
||||||
"informers/settings",
|
|
||||||
"informers/settings/v1alpha1",
|
|
||||||
"informers/storage",
|
|
||||||
"informers/storage/v1",
|
|
||||||
"informers/storage/v1alpha1",
|
|
||||||
"informers/storage/v1beta1",
|
|
||||||
"kubernetes",
|
|
||||||
"kubernetes/scheme",
|
|
||||||
"kubernetes/typed/admissionregistration/v1alpha1",
|
|
||||||
"kubernetes/typed/admissionregistration/v1beta1",
|
|
||||||
"kubernetes/typed/apps/v1",
|
|
||||||
"kubernetes/typed/apps/v1beta1",
|
|
||||||
"kubernetes/typed/apps/v1beta2",
|
|
||||||
"kubernetes/typed/authentication/v1",
|
|
||||||
"kubernetes/typed/authentication/v1beta1",
|
|
||||||
"kubernetes/typed/authorization/v1",
|
|
||||||
"kubernetes/typed/authorization/v1beta1",
|
|
||||||
"kubernetes/typed/autoscaling/v1",
|
|
||||||
"kubernetes/typed/autoscaling/v2beta1",
|
|
||||||
"kubernetes/typed/batch/v1",
|
|
||||||
"kubernetes/typed/batch/v1beta1",
|
|
||||||
"kubernetes/typed/batch/v2alpha1",
|
|
||||||
"kubernetes/typed/certificates/v1beta1",
|
|
||||||
"kubernetes/typed/core/v1",
|
|
||||||
"kubernetes/typed/events/v1beta1",
|
|
||||||
"kubernetes/typed/extensions/v1beta1",
|
|
||||||
"kubernetes/typed/networking/v1",
|
|
||||||
"kubernetes/typed/policy/v1beta1",
|
|
||||||
"kubernetes/typed/rbac/v1",
|
|
||||||
"kubernetes/typed/rbac/v1alpha1",
|
|
||||||
"kubernetes/typed/rbac/v1beta1",
|
|
||||||
"kubernetes/typed/scheduling/v1alpha1",
|
|
||||||
"kubernetes/typed/scheduling/v1beta1",
|
|
||||||
"kubernetes/typed/settings/v1alpha1",
|
|
||||||
"kubernetes/typed/storage/v1",
|
|
||||||
"kubernetes/typed/storage/v1alpha1",
|
|
||||||
"kubernetes/typed/storage/v1beta1",
|
|
||||||
"listers/admissionregistration/v1alpha1",
|
|
||||||
"listers/admissionregistration/v1beta1",
|
|
||||||
"listers/apps/v1",
|
|
||||||
"listers/apps/v1beta1",
|
|
||||||
"listers/apps/v1beta2",
|
|
||||||
"listers/autoscaling/v1",
|
|
||||||
"listers/autoscaling/v2beta1",
|
|
||||||
"listers/batch/v1",
|
|
||||||
"listers/batch/v1beta1",
|
|
||||||
"listers/batch/v2alpha1",
|
|
||||||
"listers/certificates/v1beta1",
|
|
||||||
"listers/core/v1",
|
|
||||||
"listers/events/v1beta1",
|
|
||||||
"listers/extensions/v1beta1",
|
|
||||||
"listers/networking/v1",
|
|
||||||
"listers/policy/v1beta1",
|
|
||||||
"listers/rbac/v1",
|
|
||||||
"listers/rbac/v1alpha1",
|
|
||||||
"listers/rbac/v1beta1",
|
|
||||||
"listers/scheduling/v1alpha1",
|
|
||||||
"listers/scheduling/v1beta1",
|
|
||||||
"listers/settings/v1alpha1",
|
|
||||||
"listers/storage/v1",
|
|
||||||
"listers/storage/v1alpha1",
|
|
||||||
"listers/storage/v1beta1",
|
|
||||||
"pkg/apis/clientauthentication",
|
|
||||||
"pkg/apis/clientauthentication/v1alpha1",
|
|
||||||
"pkg/apis/clientauthentication/v1beta1",
|
|
||||||
"pkg/version",
|
|
||||||
"plugin/pkg/client/auth/exec",
|
|
||||||
"rest",
|
|
||||||
"rest/watch",
|
|
||||||
"restmapper",
|
|
||||||
"testing",
|
|
||||||
"tools/auth",
|
|
||||||
"tools/cache",
|
|
||||||
"tools/clientcmd",
|
|
||||||
"tools/clientcmd/api",
|
|
||||||
"tools/clientcmd/api/latest",
|
|
||||||
"tools/clientcmd/api/v1",
|
|
||||||
"tools/metrics",
|
|
||||||
"tools/pager",
|
|
||||||
"tools/reference",
|
|
||||||
"transport",
|
|
||||||
"util/buffer",
|
|
||||||
"util/cert",
|
|
||||||
"util/connrotation",
|
|
||||||
"util/flowcontrol",
|
|
||||||
"util/homedir",
|
|
||||||
"util/integer",
|
|
||||||
"util/retry"
|
|
||||||
]
|
|
||||||
revision = "4cacfee698b01630072bc41e3384280562a97d95"
|
|
||||||
version = "kubernetes-1.11.0-rc.1"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
branch = "master"
|
|
||||||
name = "k8s.io/kube-openapi"
|
|
||||||
packages = [
|
|
||||||
"pkg/builder",
|
|
||||||
"pkg/common",
|
|
||||||
"pkg/handler",
|
|
||||||
"pkg/util",
|
|
||||||
"pkg/util/proto"
|
|
||||||
]
|
|
||||||
revision = "91cfa479c814065e420cee7ed227db0f63a5854e"
|
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "k8s.io/metrics"
|
|
||||||
packages = [
|
|
||||||
"pkg/apis/custom_metrics",
|
|
||||||
"pkg/apis/custom_metrics/install",
|
|
||||||
"pkg/apis/custom_metrics/v1beta1",
|
|
||||||
"pkg/apis/external_metrics",
|
|
||||||
"pkg/apis/external_metrics/install",
|
|
||||||
"pkg/apis/external_metrics/v1beta1"
|
|
||||||
]
|
|
||||||
revision = "89f8a18a5efb0c0162a32c75db752bc53ed7f8ee"
|
|
||||||
version = "kubernetes-1.11.0-rc.1"
|
|
||||||
|
|
||||||
[solve-meta]
|
|
||||||
analyzer-name = "dep"
|
|
||||||
analyzer-version = 1
|
|
||||||
inputs-digest = "922da691d7be0fa3bde2ab628c629fea6718792cb234a2e5c661a193f0545d6f"
|
|
||||||
solver-name = "gps-cdcl"
|
|
||||||
solver-version = 1
|
|
||||||
82
Gopkg.toml
82
Gopkg.toml
|
|
@ -1,82 +0,0 @@
|
||||||
# Gopkg.toml example
|
|
||||||
#
|
|
||||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
|
||||||
# for detailed Gopkg.toml documentation.
|
|
||||||
#
|
|
||||||
# required = ["github.com/user/thing/cmd/thing"]
|
|
||||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
|
||||||
#
|
|
||||||
# [[constraint]]
|
|
||||||
# name = "github.com/user/project"
|
|
||||||
# version = "1.0.0"
|
|
||||||
#
|
|
||||||
# [[constraint]]
|
|
||||||
# name = "github.com/user/project2"
|
|
||||||
# branch = "dev"
|
|
||||||
# source = "github.com/myfork/project2"
|
|
||||||
#
|
|
||||||
# [[override]]
|
|
||||||
# name = "github.com/x/y"
|
|
||||||
# version = "2.4.0"
|
|
||||||
#
|
|
||||||
# [prune]
|
|
||||||
# non-go = false
|
|
||||||
# go-tests = true
|
|
||||||
# unused-packages = true
|
|
||||||
|
|
||||||
|
|
||||||
# Utility library deps
|
|
||||||
[[constraint]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/golang/glog"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/prometheus/client_golang"
|
|
||||||
version = "0.8.0"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
branch = "master"
|
|
||||||
name = "github.com/prometheus/common"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/spf13/cobra"
|
|
||||||
version = "0.0.3"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "gopkg.in/yaml.v2"
|
|
||||||
version = "2.2.1"
|
|
||||||
|
|
||||||
# Kubernetes incubator deps
|
|
||||||
[[constraint]]
|
|
||||||
version = "kubernetes-1.11.0-rc.1"
|
|
||||||
name = "github.com/kubernetes-incubator/custom-metrics-apiserver"
|
|
||||||
|
|
||||||
# Core Kubernetes deps
|
|
||||||
[[constraint]]
|
|
||||||
name = "k8s.io/api"
|
|
||||||
version = "kubernetes-1.11.0-rc.1"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "k8s.io/apimachinery"
|
|
||||||
version = "kubernetes-1.11.0-rc.1"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "k8s.io/apiserver"
|
|
||||||
version = "kubernetes-1.11.0-rc.1"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "k8s.io/client-go"
|
|
||||||
version = "kubernetes-1.11.0-rc.1"
|
|
||||||
|
|
||||||
[[constraint]]
|
|
||||||
name = "k8s.io/metrics"
|
|
||||||
version = "kubernetes-1.11.0-rc.1"
|
|
||||||
|
|
||||||
# Test deps
|
|
||||||
[[constraint]]
|
|
||||||
name = "github.com/stretchr/testify"
|
|
||||||
version = "1.2.2"
|
|
||||||
|
|
||||||
[prune]
|
|
||||||
go-tests = true
|
|
||||||
unused-packages = true
|
|
||||||
164
Makefile
164
Makefile
|
|
@ -1,84 +1,118 @@
|
||||||
REGISTRY?=directxman12
|
REGISTRY?=gcr.io/k8s-staging-prometheus-adapter
|
||||||
IMAGE?=k8s-prometheus-adapter
|
IMAGE=prometheus-adapter
|
||||||
TEMP_DIR:=$(shell mktemp -d)
|
ARCH?=$(shell go env GOARCH)
|
||||||
ARCH?=amd64
|
|
||||||
ALL_ARCH=amd64 arm arm64 ppc64le s390x
|
ALL_ARCH=amd64 arm arm64 ppc64le s390x
|
||||||
ML_PLATFORMS=linux/amd64,linux/arm,linux/arm64,linux/ppc64le,linux/s390x
|
GOPATH:=$(shell go env GOPATH)
|
||||||
OUT_DIR?=./_output
|
|
||||||
VENDOR_DOCKERIZED=0
|
|
||||||
|
|
||||||
VERSION?=latest
|
VERSION=$(shell cat VERSION)
|
||||||
GOIMAGE=golang:1.10
|
TAG_PREFIX=v
|
||||||
|
TAG?=$(TAG_PREFIX)$(VERSION)
|
||||||
|
|
||||||
ifeq ($(ARCH),amd64)
|
GO_VERSION?=1.22.5
|
||||||
BASEIMAGE?=busybox
|
GOLANGCI_VERSION?=1.56.2
|
||||||
endif
|
|
||||||
ifeq ($(ARCH),arm)
|
|
||||||
BASEIMAGE?=armhf/busybox
|
|
||||||
endif
|
|
||||||
ifeq ($(ARCH),arm64)
|
|
||||||
BASEIMAGE?=aarch64/busybox
|
|
||||||
endif
|
|
||||||
ifeq ($(ARCH),ppc64le)
|
|
||||||
BASEIMAGE?=ppc64le/busybox
|
|
||||||
endif
|
|
||||||
ifeq ($(ARCH),s390x)
|
|
||||||
BASEIMAGE?=s390x/busybox
|
|
||||||
GOIMAGE=s390x/golang:1.10
|
|
||||||
endif
|
|
||||||
|
|
||||||
.PHONY: all docker-build push-% push test verify-gofmt gofmt verify build-local-image
|
.PHONY: all
|
||||||
|
all: prometheus-adapter
|
||||||
|
|
||||||
all: $(OUT_DIR)/$(ARCH)/adapter
|
# Build
|
||||||
|
# -----
|
||||||
|
|
||||||
src_deps=$(shell find pkg cmd -type f -name "*.go")
|
SRC_DEPS=$(shell find pkg cmd -type f -name "*.go")
|
||||||
$(OUT_DIR)/%/adapter: $(src_deps)
|
|
||||||
CGO_ENABLED=0 GOARCH=$* go build -tags netgo -o $(OUT_DIR)/$*/adapter github.com/directxman12/k8s-prometheus-adapter/cmd/adapter
|
|
||||||
|
|
||||||
docker-build:
|
prometheus-adapter: $(SRC_DEPS)
|
||||||
cp deploy/Dockerfile $(TEMP_DIR)
|
CGO_ENABLED=0 GOARCH=$(ARCH) go build sigs.k8s.io/prometheus-adapter/cmd/adapter
|
||||||
cd $(TEMP_DIR) && sed -i "s|BASEIMAGE|$(BASEIMAGE)|g" Dockerfile
|
|
||||||
|
|
||||||
docker run -it -v $(TEMP_DIR):/build -v $(shell pwd):/go/src/github.com/directxman12/k8s-prometheus-adapter -e GOARCH=$(ARCH) $(GOIMAGE) /bin/bash -c "\
|
.PHONY: container
|
||||||
CGO_ENABLED=0 go build -tags netgo -o /build/adapter github.com/directxman12/k8s-prometheus-adapter/cmd/adapter"
|
container:
|
||||||
|
docker build -t $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG) --build-arg ARCH=$(ARCH) --build-arg GO_VERSION=$(GO_VERSION) .
|
||||||
|
|
||||||
docker build -t $(REGISTRY)/$(IMAGE)-$(ARCH):$(VERSION) $(TEMP_DIR)
|
# Container push
|
||||||
rm -rf $(TEMP_DIR)
|
# --------------
|
||||||
|
|
||||||
build-local-image: $(OUT_DIR)/$(ARCH)/adapter
|
PUSH_ARCH_TARGETS=$(addprefix push-,$(ALL_ARCH))
|
||||||
cp deploy/Dockerfile $(TEMP_DIR)
|
|
||||||
cp $(OUT_DIR)/$(ARCH)/adapter $(TEMP_DIR)
|
|
||||||
cd $(TEMP_DIR) && sed -i "s|BASEIMAGE|scratch|g" Dockerfile
|
|
||||||
docker build -t $(REGISTRY)/$(IMAGE)-$(ARCH):$(VERSION) $(TEMP_DIR)
|
|
||||||
rm -rf $(TEMP_DIR)
|
|
||||||
|
|
||||||
push-%:
|
.PHONY: push
|
||||||
$(MAKE) ARCH=$* docker-build
|
push: container
|
||||||
docker push $(REGISTRY)/$(IMAGE)-$*:$(VERSION)
|
docker push $(REGISTRY)/$(IMAGE)-$(ARCH):$(TAG)
|
||||||
|
|
||||||
push: ./manifest-tool $(addprefix push-,$(ALL_ARCH))
|
push-all: $(PUSH_ARCH_TARGETS) push-multi-arch;
|
||||||
./manifest-tool push from-args --platforms $(ML_PLATFORMS) --template $(REGISTRY)/$(IMAGE)-ARCH:$(VERSION) --target $(REGISTRY)/$(IMAGE):$(VERSION)
|
|
||||||
|
|
||||||
./manifest-tool:
|
.PHONY: $(PUSH_ARCH_TARGETS)
|
||||||
curl -sSL https://github.com/estesp/manifest-tool/releases/download/v0.5.0/manifest-tool-linux-amd64 > manifest-tool
|
$(PUSH_ARCH_TARGETS): push-%:
|
||||||
chmod +x manifest-tool
|
ARCH=$* $(MAKE) push
|
||||||
|
|
||||||
vendor: Gopkg.lock
|
.PHONY: push-multi-arch
|
||||||
ifeq ($(VENDOR_DOCKERIZED),1)
|
push-multi-arch: export DOCKER_CLI_EXPERIMENTAL = enabled
|
||||||
docker run -it -v $(shell pwd):/go/src/github.com/directxman12/k8s-prometheus-adapter -w /go/src/github.com/directxman12/k8s-prometheus-adapter golang:1.10 /bin/bash -c "\
|
push-multi-arch:
|
||||||
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh \
|
docker manifest create --amend $(REGISTRY)/$(IMAGE):$(TAG) $(shell echo $(ALL_ARCH) | sed -e "s~[^ ]*~$(REGISTRY)/$(IMAGE)\-&:$(TAG)~g")
|
||||||
&& dep ensure -vendor-only"
|
@for arch in $(ALL_ARCH); do docker manifest annotate --arch $${arch} $(REGISTRY)/$(IMAGE):$(TAG) $(REGISTRY)/$(IMAGE)-$${arch}:$(TAG); done
|
||||||
else
|
docker manifest push --purge $(REGISTRY)/$(IMAGE):$(TAG)
|
||||||
dep ensure -vendor-only -v
|
|
||||||
endif
|
|
||||||
|
|
||||||
|
# Test
|
||||||
|
# ----
|
||||||
|
|
||||||
|
.PHONY: test
|
||||||
test:
|
test:
|
||||||
CGO_ENABLED=0 go test ./pkg/...
|
CGO_ENABLED=0 go test ./cmd/... ./pkg/...
|
||||||
|
|
||||||
verify-gofmt:
|
.PHONY: test-e2e
|
||||||
./hack/gofmt-all.sh -v
|
test-e2e:
|
||||||
|
./test/run-e2e-tests.sh
|
||||||
|
|
||||||
gofmt:
|
|
||||||
./hack/gofmt-all.sh
|
|
||||||
|
|
||||||
verify: verify-gofmt test
|
# Static analysis
|
||||||
|
# ---------------
|
||||||
|
|
||||||
|
.PHONY: verify
|
||||||
|
verify: verify-lint verify-deps verify-generated
|
||||||
|
|
||||||
|
.PHONY: update
|
||||||
|
update: update-lint update-generated
|
||||||
|
|
||||||
|
# Format and lint
|
||||||
|
# ---------------
|
||||||
|
|
||||||
|
HAS_GOLANGCI_VERSION:=$(shell $(GOPATH)/bin/golangci-lint version --format=short)
|
||||||
|
.PHONY: golangci
|
||||||
|
golangci:
|
||||||
|
ifneq ($(HAS_GOLANGCI_VERSION), $(GOLANGCI_VERSION))
|
||||||
|
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v$(GOLANGCI_VERSION)
|
||||||
|
endif
|
||||||
|
|
||||||
|
.PHONY: verify-lint
|
||||||
|
verify-lint: golangci
|
||||||
|
$(GOPATH)/bin/golangci-lint run --modules-download-mode=readonly || (echo 'Run "make update-lint"' && exit 1)
|
||||||
|
|
||||||
|
.PHONY: update-lint
|
||||||
|
update-lint: golangci
|
||||||
|
$(GOPATH)/bin/golangci-lint run --fix --modules-download-mode=readonly
|
||||||
|
|
||||||
|
|
||||||
|
# Dependencies
|
||||||
|
# ------------
|
||||||
|
|
||||||
|
.PHONY: verify-deps
|
||||||
|
verify-deps:
|
||||||
|
go mod verify
|
||||||
|
go mod tidy
|
||||||
|
@git diff --exit-code -- go.mod go.sum
|
||||||
|
|
||||||
|
# Generation
|
||||||
|
# ----------
|
||||||
|
|
||||||
|
generated_files=pkg/api/generated/openapi/zz_generated.openapi.go
|
||||||
|
|
||||||
|
.PHONY: verify-generated
|
||||||
|
verify-generated: update-generated
|
||||||
|
@git diff --exit-code -- $(generated_files)
|
||||||
|
|
||||||
|
.PHONY: update-generated
|
||||||
|
update-generated:
|
||||||
|
go install -mod=readonly k8s.io/kube-openapi/cmd/openapi-gen
|
||||||
|
$(GOPATH)/bin/openapi-gen --logtostderr \
|
||||||
|
--go-header-file ./hack/boilerplate.go.txt \
|
||||||
|
--output-pkg ./pkg/api/generated/openapi \
|
||||||
|
--output-file zz_generated.openapi.go \
|
||||||
|
--output-dir ./pkg/api/generated/openapi \
|
||||||
|
-r /dev/null \
|
||||||
|
"k8s.io/metrics/pkg/apis/custom_metrics" "k8s.io/metrics/pkg/apis/custom_metrics/v1beta1" "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2" "k8s.io/metrics/pkg/apis/external_metrics" "k8s.io/metrics/pkg/apis/external_metrics/v1beta1" "k8s.io/metrics/pkg/apis/metrics" "k8s.io/metrics/pkg/apis/metrics/v1beta1" "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/version" "k8s.io/api/core/v1"
|
||||||
|
|
|
||||||
16
NOTICE
Normal file
16
NOTICE
Normal file
|
|
@ -0,0 +1,16 @@
|
||||||
|
When donating the k8s-prometheus-adapter project to the CNCF, we could not
|
||||||
|
reach all the contributors to make them sign the CNCF CLA. As such, according
|
||||||
|
to the CNCF rules to donate a repository, we must add a NOTICE referencing
|
||||||
|
section 7 of the CLA with a list of developers who could not be reached.
|
||||||
|
|
||||||
|
`7. Should You wish to submit work that is not Your original creation, You may
|
||||||
|
submit it to the Foundation separately from any Contribution, identifying the
|
||||||
|
complete details of its source and of any license or other restriction
|
||||||
|
(including, but not limited to, related patents, trademarks, and license
|
||||||
|
agreements) of which you are personally aware, and conspicuously marking the
|
||||||
|
work as "Submitted on behalf of a third-party: [named here]".`
|
||||||
|
|
||||||
|
Submitted on behalf of a third-party: Andrew "thisisamurray" Murray
|
||||||
|
Submitted on behalf of a third-party: Duane "duane-ibm" D'Souza
|
||||||
|
Submitted on behalf of a third-party: John "john-delivuk" Delivuk
|
||||||
|
Submitted on behalf of a third-party: Richard "rrtaylor" Taylor
|
||||||
17
OWNERS
Normal file
17
OWNERS
Normal file
|
|
@ -0,0 +1,17 @@
|
||||||
|
# See the OWNERS docs at https://go.k8s.io/owners
|
||||||
|
approvers:
|
||||||
|
- dgrisonnet
|
||||||
|
- logicalhan
|
||||||
|
- dashpole
|
||||||
|
|
||||||
|
reviewers:
|
||||||
|
- dgrisonnet
|
||||||
|
- olivierlemasle
|
||||||
|
- logicalhan
|
||||||
|
- dashpole
|
||||||
|
|
||||||
|
emeritus_approvers:
|
||||||
|
- brancz
|
||||||
|
- directxman12
|
||||||
|
- lilic
|
||||||
|
- s-urbaniak
|
||||||
82
README.md
82
README.md
|
|
@ -1,13 +1,10 @@
|
||||||
Kubernetes Custom Metrics Adapter for Prometheus
|
# Prometheus Adapter for Kubernetes Metrics APIs
|
||||||
================================================
|
|
||||||
|
|
||||||
[](https://travis-ci.org/DirectXMan12/k8s-prometheus-adapter)
|
This repository contains an implementation of the Kubernetes Custom, Resource and External
|
||||||
|
[Metric APIs](https://github.com/kubernetes/metrics).
|
||||||
|
|
||||||
This repository contains an implementation of the Kubernetes custom
|
This adapter is therefore suitable for use with the autoscaling/v2 Horizontal Pod Autoscaler in Kubernetes 1.6+.
|
||||||
metrics API
|
It can also replace the [metrics server](https://github.com/kubernetes-incubator/metrics-server) on clusters that already run Prometheus and collect the appropriate metrics.
|
||||||
([custom.metrics.k8s.io/v1beta1](https://github.com/kubernetes/metrics/tree/master/pkg/apis/custom_metrics)),
|
|
||||||
suitable for use with the autoscaling/v2 Horizontal Pod Autoscaler in
|
|
||||||
Kubernetes 1.6+.
|
|
||||||
|
|
||||||
Quick Links
|
Quick Links
|
||||||
-----------
|
-----------
|
||||||
|
|
@ -18,13 +15,30 @@ Quick Links
|
||||||
|
|
||||||
Installation
|
Installation
|
||||||
-------------
|
-------------
|
||||||
If you're a helm user, a helm chart is listed on the Kubeapps Hub as [stable/prometheus-adapter](https://github.com/helm/charts/blob/master/stable/prometheus-adapter/README.md).
|
If you're a helm user, a helm chart is listed on prometheus-community repository as [prometheus-community/prometheus-adapter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-adapter).
|
||||||
|
|
||||||
To install it with the release name `my-release`, run this Helm command:
|
To install it with the release name `my-release`, run this Helm command:
|
||||||
|
|
||||||
|
For Helm2
|
||||||
```console
|
```console
|
||||||
$ helm install --name my-release stable/prometheus-adapter
|
$ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||||
|
$ helm repo update
|
||||||
|
$ helm install --name my-release prometheus-community/prometheus-adapter
|
||||||
```
|
```
|
||||||
|
For Helm3 ( as name is mandatory )
|
||||||
|
```console
|
||||||
|
$ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||||
|
$ helm repo update
|
||||||
|
$ helm install my-release prometheus-community/prometheus-adapter
|
||||||
|
```
|
||||||
|
|
||||||
|
Official images
|
||||||
|
---
|
||||||
|
All official images for releases after v0.8.4 are available in `registry.k8s.io/prometheus-adapter/prometheus-adapter:$VERSION`. The project also maintains a [staging registry](https://console.cloud.google.com/gcr/images/k8s-staging-prometheus-adapter/GLOBAL/) where images for each commit from the master branch are published. You can use this registry if you need to test a version from a specific commit, or if you need to deploy a patch while waiting for a new release.
|
||||||
|
|
||||||
|
Images for versions v0.8.4 and prior are only available in unofficial registries:
|
||||||
|
* https://quay.io/repository/coreos/k8s-prometheus-adapter-amd64
|
||||||
|
* https://hub.docker.com/r/directxman12/k8s-prometheus-adapter/
|
||||||
|
|
||||||
Configuration
|
Configuration
|
||||||
-------------
|
-------------
|
||||||
|
|
@ -35,7 +49,7 @@ will attempt to using [Kubernetes in-cluster
|
||||||
config](https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod)
|
config](https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod)
|
||||||
to connect to the cluster.
|
to connect to the cluster.
|
||||||
|
|
||||||
It takes the following addition arguments specific to configuring how the
|
It takes the following additional arguments specific to configuring how the
|
||||||
adapter talks to Prometheus and the main Kubernetes cluster:
|
adapter talks to Prometheus and the main Kubernetes cluster:
|
||||||
|
|
||||||
- `--lister-kubeconfig=<path-to-kubeconfig>`: This configures
|
- `--lister-kubeconfig=<path-to-kubeconfig>`: This configures
|
||||||
|
|
@ -44,11 +58,26 @@ adapter talks to Prometheus and the main Kubernetes cluster:
|
||||||
in-cluster config.
|
in-cluster config.
|
||||||
|
|
||||||
- `--metrics-relist-interval=<duration>`: This is the interval at which to
|
- `--metrics-relist-interval=<duration>`: This is the interval at which to
|
||||||
update the cache of available metrics from Prometheus. Since the adapter
|
update the cache of available metrics from Prometheus. By default, this
|
||||||
only lists metrics during discovery that exist between the current time and
|
value is set to 10 minutes.
|
||||||
the last discovery query, your relist interval should be equal to or larger
|
|
||||||
than your Prometheus scrape interval, otherwise your metrics will
|
- `--metrics-max-age=<duration>`: This is the max age of the metrics to be
|
||||||
occaisonally disappear from the adapter.
|
loaded from Prometheus. For example, when set to `10m`, it will query
|
||||||
|
Prometheus for metrics since 10m ago, and only those that has datapoints
|
||||||
|
within the time period will appear in the adapter. Therefore, the metrics-max-age
|
||||||
|
should be equal to or larger than your Prometheus' scrape interval,
|
||||||
|
or your metrics will occaisonally disappear from the adapter.
|
||||||
|
By default, this is set to be the same as metrics-relist-interval to avoid
|
||||||
|
some confusing behavior (See this [PR](https://github.com/kubernetes-sigs/prometheus-adapter/pull/230)).
|
||||||
|
|
||||||
|
Note: We recommend setting this only if you understand what is happening.
|
||||||
|
For example, this setting could be useful in cases where the scrape duration is
|
||||||
|
over a network call, e.g. pulling metrics from AWS CloudWatch, or Google Monitoring,
|
||||||
|
more specifically, Google Monitoring sometimes have delays on when data will show
|
||||||
|
up in their system after being sampled. This means that even if you scraped data
|
||||||
|
frequently, they might not show up soon. If you configured the relist interval to
|
||||||
|
a short period but without configuring this, you might not be able to see your
|
||||||
|
metrics in the adapter in certain scenarios.
|
||||||
|
|
||||||
- `--prometheus-url=<url>`: This is the URL used to connect to Prometheus.
|
- `--prometheus-url=<url>`: This is the URL used to connect to Prometheus.
|
||||||
It will eventually contain query parameters to configure the connection.
|
It will eventually contain query parameters to configure the connection.
|
||||||
|
|
@ -62,7 +91,7 @@ Presentation
|
||||||
------------
|
------------
|
||||||
|
|
||||||
The adapter gathers the names of available metrics from Prometheus
|
The adapter gathers the names of available metrics from Prometheus
|
||||||
a regular interval (see [Configuration](#configuration) above), and then
|
at a regular interval (see [Configuration](#configuration) above), and then
|
||||||
only exposes metrics that follow specific forms.
|
only exposes metrics that follow specific forms.
|
||||||
|
|
||||||
The rules governing this discovery are specified in a [configuration file](docs/config.md).
|
The rules governing this discovery are specified in a [configuration file](docs/config.md).
|
||||||
|
|
@ -71,7 +100,7 @@ you can use the included `config-gen` tool to generate a configuration that matc
|
||||||
the old implicit ruleset:
|
the old implicit ruleset:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ go run cmd/config-gen main.go [--rate-interval=<duration>] [--label-prefix=<prefix>]
|
$ go run cmd/config-gen/main.go [--rate-interval=<duration>] [--label-prefix=<prefix>]
|
||||||
```
|
```
|
||||||
|
|
||||||
Example
|
Example
|
||||||
|
|
@ -107,7 +136,7 @@ information.
|
||||||
### Why isn't my metric showing up?
|
### Why isn't my metric showing up?
|
||||||
|
|
||||||
First, check your configuration. Does it select your metric? You can
|
First, check your configuration. Does it select your metric? You can
|
||||||
find the [default configuration](/deploy/custom-metrics-config-map.yaml)
|
find the [default configuration](/deploy/manifests/custom-metrics-config-map.yaml)
|
||||||
in the deploy directory, and more information about configuring the
|
in the deploy directory, and more information about configuring the
|
||||||
adapter in the [docs](/docs/config.md).
|
adapter in the [docs](/docs/config.md).
|
||||||
|
|
||||||
|
|
@ -185,3 +214,18 @@ queries. The adapter only considers metrics with datapoints in the window
|
||||||
`[now-discoveryInterval, now]` (in order to only capture metrics that are
|
`[now-discoveryInterval, now]` (in order to only capture metrics that are
|
||||||
still present), so make sure that your discovery interval is at least as
|
still present), so make sure that your discovery interval is at least as
|
||||||
large as your collection interval.
|
large as your collection interval.
|
||||||
|
|
||||||
|
### I get errors when query namespace prefixed metrics?
|
||||||
|
|
||||||
|
I have namespace prefixed metrics like `{ "name": "namespaces/node_memory_PageTables_bytes", "singularName": "", "namespaced": false, "kind": "MetricValueList", "verbs": [ "get" ] }`, but I get error `Error from server (InternalError): Internal error occurred: unable to list matching resources` when access with `kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1/namespaces/*/node_memory_PageTables_bytes` .
|
||||||
|
|
||||||
|
Actually namespace prefixed metrics are special, we should access them with `kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1/namespaces/*/metrics/node_memory_PageTables_bytes`.
|
||||||
|
|
||||||
|
## Community, discussion, contribution, and support
|
||||||
|
|
||||||
|
Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/).
|
||||||
|
|
||||||
|
You can reach the maintainers of this project at:
|
||||||
|
|
||||||
|
- [Slack](http://slack.k8s.io/)
|
||||||
|
- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-dev)
|
||||||
|
|
|
||||||
13
RELEASE.md
Normal file
13
RELEASE.md
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
# Release Process
|
||||||
|
|
||||||
|
prometheus-adapter is released on an as-needed basis. The process is as follows:
|
||||||
|
|
||||||
|
1. An issue is proposing a new release with a changelog since the last release
|
||||||
|
1. At least one [OWNERS](OWNERS) must LGTM this release
|
||||||
|
1. A PR that bumps version hardcoded in code is created and merged
|
||||||
|
1. An OWNER creates a draft Github release
|
||||||
|
1. An OWNER creates a release tag using `git tag -s $VERSION`, inserts the changelog and pushes the tag with `git push $VERSION`. Then waits for [prow.k8s.io](https://prow.k8s.io) to build and push new images to [gcr.io/k8s-staging-prometheus-adapter](https://gcr.io/k8s-staging-prometheus-adapter)
|
||||||
|
1. A PR in [kubernetes/k8s.io](https://github.com/kubernetes/k8s.io/blob/main/k8s.gcr.io/images/k8s-staging-prometheus-adapter/images.yaml) is created to release images to `k8s.gcr.io`
|
||||||
|
1. An OWNER publishes the GitHub release
|
||||||
|
1. An announcement email is sent to `kubernetes-sig-instrumentation@googlegroups.com` with the subject `[ANNOUNCE] prometheus-adapter $VERSION is released`
|
||||||
|
1. The release issue is closed
|
||||||
22
SECURITY.md
Normal file
22
SECURITY.md
Normal file
|
|
@ -0,0 +1,22 @@
|
||||||
|
# Security Policy
|
||||||
|
|
||||||
|
## Security Announcements
|
||||||
|
|
||||||
|
Join the [kubernetes-security-announce] group for security and vulnerability announcements.
|
||||||
|
|
||||||
|
You can also subscribe to an RSS feed of the above using [this link][kubernetes-security-announce-rss].
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
Instructions for reporting a vulnerability can be found on the
|
||||||
|
[Kubernetes Security and Disclosure Information] page.
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
|
||||||
|
Information about supported Kubernetes versions can be found on the
|
||||||
|
[Kubernetes version and version skew support policy] page on the Kubernetes website.
|
||||||
|
|
||||||
|
[kubernetes-security-announce]: https://groups.google.com/forum/#!forum/kubernetes-security-announce
|
||||||
|
[kubernetes-security-announce-rss]: https://groups.google.com/forum/feed/kubernetes-security-announce/msgs/rss_v2_0.xml?num=50
|
||||||
|
[Kubernetes version and version skew support policy]: https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions
|
||||||
|
[Kubernetes Security and Disclosure Information]: https://kubernetes.io/docs/reference/issues-security/security/#report-a-vulnerability
|
||||||
14
SECURITY_CONTACTS
Normal file
14
SECURITY_CONTACTS
Normal file
|
|
@ -0,0 +1,14 @@
|
||||||
|
# Defined below are the security contacts for this repo.
|
||||||
|
#
|
||||||
|
# They are the contact point for the Product Security Committee to reach out
|
||||||
|
# to for triaging and handling of incoming issues.
|
||||||
|
#
|
||||||
|
# The below names agree to abide by the
|
||||||
|
# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
|
||||||
|
# and will be removed and replaced if they violate that agreement.
|
||||||
|
#
|
||||||
|
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
|
||||||
|
# INSTRUCTIONS AT https://kubernetes.io/security/
|
||||||
|
|
||||||
|
dgrisonnet
|
||||||
|
s-urbaniak
|
||||||
1
VERSION
Normal file
1
VERSION
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
0.12.0
|
||||||
11
cloudbuild.yaml
Normal file
11
cloudbuild.yaml
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
# See https://cloud.google.com/cloud-build/docs/build-config
|
||||||
|
timeout: 3600s
|
||||||
|
options:
|
||||||
|
substitution_option: ALLOW_LOOSE
|
||||||
|
steps:
|
||||||
|
- name: 'gcr.io/k8s-staging-test-infra/gcb-docker-gcloud:v20211118-2f2d816b90'
|
||||||
|
entrypoint: make
|
||||||
|
env:
|
||||||
|
- TAG=$_PULL_BASE_REF
|
||||||
|
args:
|
||||||
|
- push-all
|
||||||
|
|
@ -17,27 +17,454 @@ limitations under the License.
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apiserver/pkg/util/logs"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
openapinamer "k8s.io/apiserver/pkg/endpoints/openapi"
|
||||||
|
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||||
|
"k8s.io/client-go/metadata"
|
||||||
|
"k8s.io/client-go/metadata/metadatainformer"
|
||||||
|
"k8s.io/client-go/rest"
|
||||||
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
|
"k8s.io/client-go/transport"
|
||||||
|
"k8s.io/component-base/logs"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
"github.com/directxman12/k8s-prometheus-adapter/cmd/adapter/app"
|
customexternalmetrics "sigs.k8s.io/custom-metrics-apiserver/pkg/apiserver"
|
||||||
|
basecmd "sigs.k8s.io/custom-metrics-apiserver/pkg/cmd"
|
||||||
|
"sigs.k8s.io/custom-metrics-apiserver/pkg/provider"
|
||||||
|
"sigs.k8s.io/metrics-server/pkg/api"
|
||||||
|
|
||||||
|
generatedopenapi "sigs.k8s.io/prometheus-adapter/pkg/api/generated/openapi"
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
mprom "sigs.k8s.io/prometheus-adapter/pkg/client/metrics"
|
||||||
|
adaptercfg "sigs.k8s.io/prometheus-adapter/pkg/config"
|
||||||
|
cmprov "sigs.k8s.io/prometheus-adapter/pkg/custom-provider"
|
||||||
|
extprov "sigs.k8s.io/prometheus-adapter/pkg/external-provider"
|
||||||
|
"sigs.k8s.io/prometheus-adapter/pkg/naming"
|
||||||
|
resprov "sigs.k8s.io/prometheus-adapter/pkg/resourceprovider"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type PrometheusAdapter struct {
|
||||||
|
basecmd.AdapterBase
|
||||||
|
|
||||||
|
// PrometheusURL is the URL describing how to connect to Prometheus. Query parameters configure connection options.
|
||||||
|
PrometheusURL string
|
||||||
|
// PrometheusAuthInCluster enables using the auth details from the in-cluster kubeconfig to connect to Prometheus
|
||||||
|
PrometheusAuthInCluster bool
|
||||||
|
// PrometheusAuthConf is the kubeconfig file that contains auth details used to connect to Prometheus
|
||||||
|
PrometheusAuthConf string
|
||||||
|
// PrometheusCAFile points to the file containing the ca-root for connecting with Prometheus
|
||||||
|
PrometheusCAFile string
|
||||||
|
// PrometheusClientTLSCertFile points to the file containing the client TLS cert for connecting with Prometheus
|
||||||
|
PrometheusClientTLSCertFile string
|
||||||
|
// PrometheusClientTLSKeyFile points to the file containing the client TLS key for connecting with Prometheus
|
||||||
|
PrometheusClientTLSKeyFile string
|
||||||
|
// PrometheusTokenFile points to the file that contains the bearer token when connecting with Prometheus
|
||||||
|
PrometheusTokenFile string
|
||||||
|
// PrometheusHeaders is a k=v list of headers to set on requests to PrometheusURL
|
||||||
|
PrometheusHeaders []string
|
||||||
|
// PrometheusVerb is a verb to set on requests to PrometheusURL
|
||||||
|
PrometheusVerb string
|
||||||
|
// AdapterConfigFile points to the file containing the metrics discovery configuration.
|
||||||
|
AdapterConfigFile string
|
||||||
|
// MetricsRelistInterval is the interval at which to relist the set of available metrics
|
||||||
|
MetricsRelistInterval time.Duration
|
||||||
|
// MetricsMaxAge is the period to query available metrics for
|
||||||
|
MetricsMaxAge time.Duration
|
||||||
|
// DisableHTTP2 indicates that http2 should not be enabled.
|
||||||
|
DisableHTTP2 bool
|
||||||
|
metricsConfig *adaptercfg.MetricsDiscoveryConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *PrometheusAdapter) makePromClient() (prom.Client, error) {
|
||||||
|
baseURL, err := url.Parse(cmd.PrometheusURL)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid Prometheus URL %q: %v", baseURL, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cmd.PrometheusVerb != http.MethodGet && cmd.PrometheusVerb != http.MethodPost {
|
||||||
|
return nil, fmt.Errorf("unsupported Prometheus HTTP verb %q; supported verbs: \"GET\" and \"POST\"", cmd.PrometheusVerb)
|
||||||
|
}
|
||||||
|
|
||||||
|
var httpClient *http.Client
|
||||||
|
|
||||||
|
if cmd.PrometheusCAFile != "" {
|
||||||
|
prometheusCAClient, err := makePrometheusCAClient(cmd.PrometheusCAFile, cmd.PrometheusClientTLSCertFile, cmd.PrometheusClientTLSKeyFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
httpClient = prometheusCAClient
|
||||||
|
klog.Info("successfully loaded ca from file")
|
||||||
|
} else {
|
||||||
|
kubeconfigHTTPClient, err := makeKubeconfigHTTPClient(cmd.PrometheusAuthInCluster, cmd.PrometheusAuthConf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
httpClient = kubeconfigHTTPClient
|
||||||
|
klog.Info("successfully using in-cluster auth")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cmd.PrometheusTokenFile != "" {
|
||||||
|
data, err := os.ReadFile(cmd.PrometheusTokenFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read prometheus-token-file: %v", err)
|
||||||
|
}
|
||||||
|
wrappedTransport := http.DefaultTransport
|
||||||
|
if httpClient.Transport != nil {
|
||||||
|
wrappedTransport = httpClient.Transport
|
||||||
|
}
|
||||||
|
httpClient.Transport = transport.NewBearerAuthRoundTripper(string(data), wrappedTransport)
|
||||||
|
}
|
||||||
|
genericPromClient := prom.NewGenericAPIClient(httpClient, baseURL, parseHeaderArgs(cmd.PrometheusHeaders))
|
||||||
|
instrumentedGenericPromClient := mprom.InstrumentGenericAPIClient(genericPromClient, baseURL.String())
|
||||||
|
return prom.NewClientForAPI(instrumentedGenericPromClient, cmd.PrometheusVerb), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *PrometheusAdapter) addFlags() {
|
||||||
|
cmd.Flags().StringVar(&cmd.PrometheusURL, "prometheus-url", cmd.PrometheusURL,
|
||||||
|
"URL for connecting to Prometheus.")
|
||||||
|
cmd.Flags().BoolVar(&cmd.PrometheusAuthInCluster, "prometheus-auth-incluster", cmd.PrometheusAuthInCluster,
|
||||||
|
"use auth details from the in-cluster kubeconfig when connecting to prometheus.")
|
||||||
|
cmd.Flags().StringVar(&cmd.PrometheusAuthConf, "prometheus-auth-config", cmd.PrometheusAuthConf,
|
||||||
|
"kubeconfig file used to configure auth when connecting to Prometheus.")
|
||||||
|
cmd.Flags().StringVar(&cmd.PrometheusCAFile, "prometheus-ca-file", cmd.PrometheusCAFile,
|
||||||
|
"Optional CA file to use when connecting with Prometheus")
|
||||||
|
cmd.Flags().StringVar(&cmd.PrometheusClientTLSCertFile, "prometheus-client-tls-cert-file", cmd.PrometheusClientTLSCertFile,
|
||||||
|
"Optional client TLS cert file to use when connecting with Prometheus, auto-renewal is not supported")
|
||||||
|
cmd.Flags().StringVar(&cmd.PrometheusClientTLSKeyFile, "prometheus-client-tls-key-file", cmd.PrometheusClientTLSKeyFile,
|
||||||
|
"Optional client TLS key file to use when connecting with Prometheus, auto-renewal is not supported")
|
||||||
|
cmd.Flags().StringVar(&cmd.PrometheusTokenFile, "prometheus-token-file", cmd.PrometheusTokenFile,
|
||||||
|
"Optional file containing the bearer token to use when connecting with Prometheus")
|
||||||
|
cmd.Flags().StringArrayVar(&cmd.PrometheusHeaders, "prometheus-header", cmd.PrometheusHeaders,
|
||||||
|
"Optional header to set on requests to prometheus-url. Can be repeated")
|
||||||
|
cmd.Flags().StringVar(&cmd.PrometheusVerb, "prometheus-verb", cmd.PrometheusVerb,
|
||||||
|
"HTTP verb to set on requests to Prometheus. Possible values: \"GET\", \"POST\"")
|
||||||
|
cmd.Flags().StringVar(&cmd.AdapterConfigFile, "config", cmd.AdapterConfigFile,
|
||||||
|
"Configuration file containing details of how to transform between Prometheus metrics "+
|
||||||
|
"and custom metrics API resources")
|
||||||
|
cmd.Flags().DurationVar(&cmd.MetricsRelistInterval, "metrics-relist-interval", cmd.MetricsRelistInterval,
|
||||||
|
"interval at which to re-list the set of all available metrics from Prometheus")
|
||||||
|
cmd.Flags().DurationVar(&cmd.MetricsMaxAge, "metrics-max-age", cmd.MetricsMaxAge,
|
||||||
|
"period for which to query the set of available metrics from Prometheus")
|
||||||
|
cmd.Flags().BoolVar(&cmd.DisableHTTP2, "disable-http2", cmd.DisableHTTP2,
|
||||||
|
"Disable HTTP/2 support")
|
||||||
|
|
||||||
|
// Add logging flags
|
||||||
|
logs.AddFlags(cmd.Flags())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *PrometheusAdapter) loadConfig() error {
|
||||||
|
// load metrics discovery configuration
|
||||||
|
if cmd.AdapterConfigFile == "" {
|
||||||
|
return fmt.Errorf("no metrics discovery configuration file specified (make sure to use --config)")
|
||||||
|
}
|
||||||
|
metricsConfig, err := adaptercfg.FromFile(cmd.AdapterConfigFile)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to load metrics discovery configuration: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.metricsConfig = metricsConfig
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *PrometheusAdapter) makeProvider(promClient prom.Client, stopCh <-chan struct{}) (provider.CustomMetricsProvider, error) {
|
||||||
|
if len(cmd.metricsConfig.Rules) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if cmd.MetricsMaxAge < cmd.MetricsRelistInterval {
|
||||||
|
return nil, fmt.Errorf("max age must not be less than relist interval")
|
||||||
|
}
|
||||||
|
|
||||||
|
// grab the mapper and dynamic client
|
||||||
|
mapper, err := cmd.RESTMapper()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to construct RESTMapper: %v", err)
|
||||||
|
}
|
||||||
|
dynClient, err := cmd.DynamicClient()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to construct Kubernetes client: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// extract the namers
|
||||||
|
namers, err := naming.NamersFromConfig(cmd.metricsConfig.Rules, mapper)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to construct naming scheme from metrics rules: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// construct the provider and start it
|
||||||
|
cmProvider, runner := cmprov.NewPrometheusProvider(mapper, dynClient, promClient, namers, cmd.MetricsRelistInterval, cmd.MetricsMaxAge)
|
||||||
|
runner.RunUntil(stopCh)
|
||||||
|
|
||||||
|
return cmProvider, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *PrometheusAdapter) makeExternalProvider(promClient prom.Client, stopCh <-chan struct{}) (provider.ExternalMetricsProvider, error) {
|
||||||
|
if len(cmd.metricsConfig.ExternalRules) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// grab the mapper
|
||||||
|
mapper, err := cmd.RESTMapper()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to construct RESTMapper: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// extract the namers
|
||||||
|
namers, err := naming.NamersFromConfig(cmd.metricsConfig.ExternalRules, mapper)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to construct naming scheme from metrics rules: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// construct the provider and start it
|
||||||
|
emProvider, runner := extprov.NewExternalPrometheusProvider(promClient, namers, cmd.MetricsRelistInterval, cmd.MetricsMaxAge)
|
||||||
|
runner.RunUntil(stopCh)
|
||||||
|
|
||||||
|
return emProvider, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cmd *PrometheusAdapter) addResourceMetricsAPI(promClient prom.Client, stopCh <-chan struct{}) error {
|
||||||
|
if cmd.metricsConfig.ResourceRules == nil {
|
||||||
|
// bail if we don't have rules for setting things up
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
mapper, err := cmd.RESTMapper()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
provider, err := resprov.NewProvider(promClient, mapper, cmd.metricsConfig.ResourceRules)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to construct resource metrics API provider: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rest, err := cmd.ClientConfig()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := metadata.NewForConfig(rest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
podInformerFactory := metadatainformer.NewFilteredSharedInformerFactory(client, 0, corev1.NamespaceAll, func(options *metav1.ListOptions) {
|
||||||
|
options.FieldSelector = "status.phase=Running"
|
||||||
|
})
|
||||||
|
podInformer := podInformerFactory.ForResource(corev1.SchemeGroupVersion.WithResource("pods"))
|
||||||
|
|
||||||
|
informer, err := cmd.Informers()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := cmd.Config()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
config.GenericConfig.EnableMetrics = false
|
||||||
|
|
||||||
|
server, err := cmd.Server()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
metricsHandler, err := mprom.MetricsHandler()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
server.GenericAPIServer.Handler.NonGoRestfulMux.HandleFunc("/metrics", metricsHandler)
|
||||||
|
|
||||||
|
if err := api.Install(provider, podInformer.Lister(), informer.Core().V1().Nodes().Lister(), server.GenericAPIServer, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
go podInformer.Informer().Run(stopCh)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
logs.InitLogs()
|
logs.InitLogs()
|
||||||
defer logs.FlushLogs()
|
defer logs.FlushLogs()
|
||||||
|
|
||||||
if len(os.Getenv("GOMAXPROCS")) == 0 {
|
// set up flags
|
||||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
cmd := &PrometheusAdapter{
|
||||||
|
PrometheusURL: "https://localhost",
|
||||||
|
PrometheusVerb: http.MethodGet,
|
||||||
|
MetricsRelistInterval: 10 * time.Minute,
|
||||||
|
}
|
||||||
|
cmd.Name = "prometheus-metrics-adapter"
|
||||||
|
|
||||||
|
cmd.addFlags()
|
||||||
|
if err := cmd.Flags().Parse(os.Args); err != nil {
|
||||||
|
klog.Fatalf("unable to parse flags: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := app.NewCommandStartPrometheusAdapterServer(os.Stdout, os.Stderr, wait.NeverStop)
|
if cmd.OpenAPIConfig == nil {
|
||||||
cmd.Flags().AddGoFlagSet(flag.CommandLine)
|
cmd.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(generatedopenapi.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(api.Scheme, customexternalmetrics.Scheme))
|
||||||
if err := cmd.Execute(); err != nil {
|
cmd.OpenAPIConfig.Info.Title = "prometheus-metrics-adapter"
|
||||||
panic(err)
|
cmd.OpenAPIConfig.Info.Version = "1.0.0"
|
||||||
|
}
|
||||||
|
|
||||||
|
if cmd.OpenAPIV3Config == nil {
|
||||||
|
cmd.OpenAPIV3Config = genericapiserver.DefaultOpenAPIV3Config(generatedopenapi.GetOpenAPIDefinitions, openapinamer.NewDefinitionNamer(api.Scheme, customexternalmetrics.Scheme))
|
||||||
|
cmd.OpenAPIV3Config.Info.Title = "prometheus-metrics-adapter"
|
||||||
|
cmd.OpenAPIV3Config.Info.Version = "1.0.0"
|
||||||
|
}
|
||||||
|
|
||||||
|
// if --metrics-max-age is not set, make it equal to --metrics-relist-interval
|
||||||
|
if cmd.MetricsMaxAge == 0*time.Second {
|
||||||
|
cmd.MetricsMaxAge = cmd.MetricsRelistInterval
|
||||||
|
}
|
||||||
|
|
||||||
|
// make the prometheus client
|
||||||
|
promClient, err := cmd.makePromClient()
|
||||||
|
if err != nil {
|
||||||
|
klog.Fatalf("unable to construct Prometheus client: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// load the config
|
||||||
|
if err := cmd.loadConfig(); err != nil {
|
||||||
|
klog.Fatalf("unable to load metrics discovery config: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// stop channel closed on SIGTERM and SIGINT
|
||||||
|
stopCh := genericapiserver.SetupSignalHandler()
|
||||||
|
|
||||||
|
// construct the provider
|
||||||
|
cmProvider, err := cmd.makeProvider(promClient, stopCh)
|
||||||
|
if err != nil {
|
||||||
|
klog.Fatalf("unable to construct custom metrics provider: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// attach the provider to the server, if it's needed
|
||||||
|
if cmProvider != nil {
|
||||||
|
cmd.WithCustomMetrics(cmProvider)
|
||||||
|
}
|
||||||
|
|
||||||
|
// construct the external provider
|
||||||
|
emProvider, err := cmd.makeExternalProvider(promClient, stopCh)
|
||||||
|
if err != nil {
|
||||||
|
klog.Fatalf("unable to construct external metrics provider: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// attach the provider to the server, if it's needed
|
||||||
|
if emProvider != nil {
|
||||||
|
cmd.WithExternalMetrics(emProvider)
|
||||||
|
}
|
||||||
|
|
||||||
|
// attach resource metrics support, if it's needed
|
||||||
|
if err := cmd.addResourceMetricsAPI(promClient, stopCh); err != nil {
|
||||||
|
klog.Fatalf("unable to install resource metrics API: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// disable HTTP/2 to mitigate CVE-2023-44487 until the Go standard library
|
||||||
|
// and golang.org/x/net are fully fixed.
|
||||||
|
server, err := cmd.Server()
|
||||||
|
if err != nil {
|
||||||
|
klog.Fatalf("unable to fetch server: %v", err)
|
||||||
|
}
|
||||||
|
server.GenericAPIServer.SecureServingInfo.DisableHTTP2 = cmd.DisableHTTP2
|
||||||
|
|
||||||
|
// run the server
|
||||||
|
if err := cmd.Run(stopCh); err != nil {
|
||||||
|
klog.Fatalf("unable to run custom metrics adapter: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// makeKubeconfigHTTPClient constructs an HTTP for connecting with the given auth options.
|
||||||
|
func makeKubeconfigHTTPClient(inClusterAuth bool, kubeConfigPath string) (*http.Client, error) {
|
||||||
|
// make sure we're not trying to use two different sources of auth
|
||||||
|
if inClusterAuth && kubeConfigPath != "" {
|
||||||
|
return nil, fmt.Errorf("may not use both in-cluster auth and an explicit kubeconfig at the same time")
|
||||||
|
}
|
||||||
|
|
||||||
|
// return the default client if we're using no auth
|
||||||
|
if !inClusterAuth && kubeConfigPath == "" {
|
||||||
|
return http.DefaultClient, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var authConf *rest.Config
|
||||||
|
if kubeConfigPath != "" {
|
||||||
|
var err error
|
||||||
|
loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeConfigPath}
|
||||||
|
loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{})
|
||||||
|
authConf, err = loader.ClientConfig()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to construct auth configuration from %q for connecting to Prometheus: %v", kubeConfigPath, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
authConf, err = rest.InClusterConfig()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to construct in-cluster auth configuration for connecting to Prometheus: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tr, err := rest.TransportFor(authConf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to construct client transport for connecting to Prometheus: %v", err)
|
||||||
|
}
|
||||||
|
return &http.Client{Transport: tr}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func makePrometheusCAClient(caFilePath string, tlsCertFilePath string, tlsKeyFilePath string) (*http.Client, error) {
|
||||||
|
data, err := os.ReadFile(caFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read prometheus-ca-file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pool := x509.NewCertPool()
|
||||||
|
if !pool.AppendCertsFromPEM(data) {
|
||||||
|
return nil, fmt.Errorf("no certs found in prometheus-ca-file")
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tlsCertFilePath != "") && (tlsKeyFilePath != "") {
|
||||||
|
tlsClientCerts, err := tls.LoadX509KeyPair(tlsCertFilePath, tlsKeyFilePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read TLS key pair: %v", err)
|
||||||
|
}
|
||||||
|
return &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
TLSClientConfig: &tls.Config{
|
||||||
|
RootCAs: pool,
|
||||||
|
Certificates: []tls.Certificate{tlsClientCerts},
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
TLSClientConfig: &tls.Config{
|
||||||
|
RootCAs: pool,
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseHeaderArgs(args []string) http.Header {
|
||||||
|
headers := make(http.Header, len(args))
|
||||||
|
for _, h := range args {
|
||||||
|
parts := strings.SplitN(h, "=", 2)
|
||||||
|
value := ""
|
||||||
|
if len(parts) > 1 {
|
||||||
|
value = parts[1]
|
||||||
|
}
|
||||||
|
headers.Add(parts[0], value)
|
||||||
|
}
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
|
||||||
211
cmd/adapter/adapter_test.go
Normal file
211
cmd/adapter/adapter_test.go
Normal file
|
|
@ -0,0 +1,211 @@
|
||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
const certsDir = "testdata"
|
||||||
|
|
||||||
|
func TestMakeKubeconfigHTTPClient(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
kubeconfigPath string
|
||||||
|
inClusterAuth bool
|
||||||
|
success bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
kubeconfigPath: filepath.Join(certsDir, "kubeconfig"),
|
||||||
|
inClusterAuth: false,
|
||||||
|
success: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
kubeconfigPath: filepath.Join(certsDir, "kubeconfig"),
|
||||||
|
inClusterAuth: true,
|
||||||
|
success: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
kubeconfigPath: filepath.Join(certsDir, "kubeconfig-error"),
|
||||||
|
inClusterAuth: false,
|
||||||
|
success: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
kubeconfigPath: "",
|
||||||
|
inClusterAuth: false,
|
||||||
|
success: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Setenv("KUBERNETES_SERVICE_HOST", "prometheus")
|
||||||
|
os.Setenv("KUBERNETES_SERVICE_PORT", "8080")
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Logf("Running test for: inClusterAuth %v, kubeconfigPath %v", test.inClusterAuth, test.kubeconfigPath)
|
||||||
|
kubeconfigHTTPClient, err := makeKubeconfigHTTPClient(test.inClusterAuth, test.kubeconfigPath)
|
||||||
|
if test.success {
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error is %v, expected nil", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if kubeconfigHTTPClient.Transport == nil {
|
||||||
|
if test.inClusterAuth || test.kubeconfigPath != "" {
|
||||||
|
t.Error("HTTP client Transport is nil, expected http.RoundTripper")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if err == nil {
|
||||||
|
t.Errorf("Error is nil, expected %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMakePrometheusCAClient(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
caFilePath string
|
||||||
|
tlsCertFilePath string
|
||||||
|
tlsKeyFilePath string
|
||||||
|
success bool
|
||||||
|
tlsUsed bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
caFilePath: filepath.Join(certsDir, "ca.pem"),
|
||||||
|
tlsCertFilePath: filepath.Join(certsDir, "tlscert.crt"),
|
||||||
|
tlsKeyFilePath: filepath.Join(certsDir, "tlskey.key"),
|
||||||
|
success: true,
|
||||||
|
tlsUsed: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caFilePath: filepath.Join(certsDir, "ca-error.pem"),
|
||||||
|
tlsCertFilePath: filepath.Join(certsDir, "tlscert.crt"),
|
||||||
|
tlsKeyFilePath: filepath.Join(certsDir, "tlskey.key"),
|
||||||
|
success: false,
|
||||||
|
tlsUsed: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caFilePath: filepath.Join(certsDir, "ca.pem"),
|
||||||
|
tlsCertFilePath: filepath.Join(certsDir, "tlscert-error.crt"),
|
||||||
|
tlsKeyFilePath: filepath.Join(certsDir, "tlskey.key"),
|
||||||
|
success: false,
|
||||||
|
tlsUsed: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
caFilePath: filepath.Join(certsDir, "ca.pem"),
|
||||||
|
tlsCertFilePath: "",
|
||||||
|
tlsKeyFilePath: "",
|
||||||
|
success: true,
|
||||||
|
tlsUsed: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Logf("Running test for: caFilePath %v, tlsCertFilePath %v, tlsKeyFilePath %v", test.caFilePath, test.tlsCertFilePath, test.tlsKeyFilePath)
|
||||||
|
prometheusCAClient, err := makePrometheusCAClient(test.caFilePath, test.tlsCertFilePath, test.tlsKeyFilePath)
|
||||||
|
if test.success {
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error is %v, expected nil", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if prometheusCAClient.Transport.(*http.Transport).TLSClientConfig.RootCAs == nil {
|
||||||
|
t.Error("RootCAs is nil, expected *x509.CertPool")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if test.tlsUsed {
|
||||||
|
if prometheusCAClient.Transport.(*http.Transport).TLSClientConfig.Certificates == nil {
|
||||||
|
t.Error("TLS certificates is nil, expected []tls.Certificate")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if prometheusCAClient.Transport.(*http.Transport).TLSClientConfig.Certificates != nil {
|
||||||
|
t.Errorf("TLS certificates is %+v, expected nil", prometheusCAClient.Transport.(*http.Transport).TLSClientConfig.Certificates)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if err == nil {
|
||||||
|
t.Errorf("Error is nil, expected %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseHeaderArgs(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
args []string
|
||||||
|
headers http.Header
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
headers: http.Header{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
args: []string{"foo=bar"},
|
||||||
|
headers: http.Header{
|
||||||
|
"Foo": []string{"bar"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
args: []string{"foo"},
|
||||||
|
headers: http.Header{
|
||||||
|
"Foo": []string{""},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
args: []string{"foo=bar", "foo=baz", "bux=baz=23"},
|
||||||
|
headers: http.Header{
|
||||||
|
"Foo": []string{"bar", "baz"},
|
||||||
|
"Bux": []string{"baz=23"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
got := parseHeaderArgs(test.args)
|
||||||
|
if !reflect.DeepEqual(got, test.headers) {
|
||||||
|
t.Errorf("Expected %#v but got %#v", test.headers, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFlags(t *testing.T) {
|
||||||
|
cmd := &PrometheusAdapter{
|
||||||
|
PrometheusURL: "https://localhost",
|
||||||
|
}
|
||||||
|
cmd.addFlags()
|
||||||
|
|
||||||
|
flags := cmd.FlagSet
|
||||||
|
if flags == nil {
|
||||||
|
t.Fatalf("FlagSet should not be nil")
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedFlags := []struct {
|
||||||
|
flag string
|
||||||
|
defaultValue string
|
||||||
|
}{
|
||||||
|
{flag: "v", defaultValue: "0"}, // logging flag (klog)
|
||||||
|
{flag: "prometheus-url", defaultValue: "https://localhost"}, // default is set in cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, e := range expectedFlags {
|
||||||
|
flag := flags.Lookup(e.flag)
|
||||||
|
if flag == nil {
|
||||||
|
t.Errorf("Flag %q expected to be present, was absent", e.flag)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if flag.DefValue != e.defaultValue {
|
||||||
|
t.Errorf("Expected default value %q for flag %q, got %q", e.defaultValue, e.flag, flag.DefValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,219 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package app
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"k8s.io/client-go/discovery"
|
|
||||||
"k8s.io/client-go/dynamic"
|
|
||||||
"k8s.io/client-go/rest"
|
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
|
||||||
|
|
||||||
prom "github.com/directxman12/k8s-prometheus-adapter/pkg/client"
|
|
||||||
mprom "github.com/directxman12/k8s-prometheus-adapter/pkg/client/metrics"
|
|
||||||
adaptercfg "github.com/directxman12/k8s-prometheus-adapter/pkg/config"
|
|
||||||
cmprov "github.com/directxman12/k8s-prometheus-adapter/pkg/custom-provider"
|
|
||||||
"github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/cmd/server"
|
|
||||||
"github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/dynamicmapper"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewCommandStartPrometheusAdapterServer provides a CLI handler for 'start master' command
|
|
||||||
func NewCommandStartPrometheusAdapterServer(out, errOut io.Writer, stopCh <-chan struct{}) *cobra.Command {
|
|
||||||
baseOpts := server.NewCustomMetricsAdapterServerOptions(out, errOut)
|
|
||||||
o := PrometheusAdapterServerOptions{
|
|
||||||
CustomMetricsAdapterServerOptions: baseOpts,
|
|
||||||
MetricsRelistInterval: 10 * time.Minute,
|
|
||||||
PrometheusURL: "https://localhost",
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
|
||||||
Short: "Launch the custom metrics API adapter server",
|
|
||||||
Long: "Launch the custom metrics API adapter server",
|
|
||||||
RunE: func(c *cobra.Command, args []string) error {
|
|
||||||
if err := o.Complete(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := o.Validate(args); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := o.RunCustomMetricsAdapterServer(stopCh); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
flags := cmd.Flags()
|
|
||||||
o.SecureServing.AddFlags(flags)
|
|
||||||
o.Authentication.AddFlags(flags)
|
|
||||||
o.Authorization.AddFlags(flags)
|
|
||||||
o.Features.AddFlags(flags)
|
|
||||||
|
|
||||||
flags.StringVar(&o.RemoteKubeConfigFile, "lister-kubeconfig", o.RemoteKubeConfigFile, ""+
|
|
||||||
"kubeconfig file pointing at the 'core' kubernetes server with enough rights to list "+
|
|
||||||
"any described objets")
|
|
||||||
flags.DurationVar(&o.MetricsRelistInterval, "metrics-relist-interval", o.MetricsRelistInterval, ""+
|
|
||||||
"interval at which to re-list the set of all available metrics from Prometheus")
|
|
||||||
flags.DurationVar(&o.DiscoveryInterval, "discovery-interval", o.DiscoveryInterval, ""+
|
|
||||||
"interval at which to refresh API discovery information")
|
|
||||||
flags.StringVar(&o.PrometheusURL, "prometheus-url", o.PrometheusURL,
|
|
||||||
"URL for connecting to Prometheus.")
|
|
||||||
flags.BoolVar(&o.PrometheusAuthInCluster, "prometheus-auth-incluster", o.PrometheusAuthInCluster,
|
|
||||||
"use auth details from the in-cluster kubeconfig when connecting to prometheus.")
|
|
||||||
flags.StringVar(&o.PrometheusAuthConf, "prometheus-auth-config", o.PrometheusAuthConf,
|
|
||||||
"kubeconfig file used to configure auth when connecting to Prometheus.")
|
|
||||||
flags.StringVar(&o.AdapterConfigFile, "config", o.AdapterConfigFile,
|
|
||||||
"Configuration file containing details of how to transform between Prometheus metrics "+
|
|
||||||
"and custom metrics API resources")
|
|
||||||
|
|
||||||
cmd.MarkFlagRequired("config")
|
|
||||||
|
|
||||||
return cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeHTTPClient constructs an HTTP for connecting with the given auth options.
|
|
||||||
func makeHTTPClient(inClusterAuth bool, kubeConfigPath string) (*http.Client, error) {
|
|
||||||
// make sure we're not trying to use two different sources of auth
|
|
||||||
if inClusterAuth && kubeConfigPath != "" {
|
|
||||||
return nil, fmt.Errorf("may not use both in-cluster auth and an explicit kubeconfig at the same time")
|
|
||||||
}
|
|
||||||
|
|
||||||
// return the default client if we're using no auth
|
|
||||||
if !inClusterAuth && kubeConfigPath == "" {
|
|
||||||
return http.DefaultClient, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var authConf *rest.Config
|
|
||||||
if kubeConfigPath != "" {
|
|
||||||
var err error
|
|
||||||
loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeConfigPath}
|
|
||||||
loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{})
|
|
||||||
authConf, err = loader.ClientConfig()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to construct auth configuration from %q for connecting to Prometheus: %v", kubeConfigPath, err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var err error
|
|
||||||
authConf, err = rest.InClusterConfig()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to construct in-cluster auth configuration for connecting to Prometheus: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tr, err := rest.TransportFor(authConf)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to construct client transport for connecting to Prometheus: %v", err)
|
|
||||||
}
|
|
||||||
return &http.Client{Transport: tr}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o PrometheusAdapterServerOptions) RunCustomMetricsAdapterServer(stopCh <-chan struct{}) error {
|
|
||||||
if o.AdapterConfigFile == "" {
|
|
||||||
return fmt.Errorf("no discovery configuration file specified")
|
|
||||||
}
|
|
||||||
|
|
||||||
metricsConfig, err := adaptercfg.FromFile(o.AdapterConfigFile)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to load metrics discovery configuration: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
config, err := o.Config()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
config.GenericConfig.EnableMetrics = true
|
|
||||||
|
|
||||||
var clientConfig *rest.Config
|
|
||||||
if len(o.RemoteKubeConfigFile) > 0 {
|
|
||||||
loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: o.RemoteKubeConfigFile}
|
|
||||||
loader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{})
|
|
||||||
|
|
||||||
clientConfig, err = loader.ClientConfig()
|
|
||||||
} else {
|
|
||||||
clientConfig, err = rest.InClusterConfig()
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to construct lister client config to initialize provider: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
discoveryClient, err := discovery.NewDiscoveryClientForConfig(clientConfig)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to construct discovery client for dynamic client: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
dynamicMapper, err := dynamicmapper.NewRESTMapper(discoveryClient, o.DiscoveryInterval)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to construct dynamic discovery mapper: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
dynamicClient, err := dynamic.NewForConfig(clientConfig)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to construct lister client to initialize provider: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: actually configure this client (strip query vars, etc)
|
|
||||||
baseURL, err := url.Parse(o.PrometheusURL)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid Prometheus URL %q: %v", baseURL, err)
|
|
||||||
}
|
|
||||||
promHTTPClient, err := makeHTTPClient(o.PrometheusAuthInCluster, o.PrometheusAuthConf)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
genericPromClient := prom.NewGenericAPIClient(promHTTPClient, baseURL)
|
|
||||||
instrumentedGenericPromClient := mprom.InstrumentGenericAPIClient(genericPromClient, baseURL.String())
|
|
||||||
promClient := prom.NewClientForAPI(instrumentedGenericPromClient)
|
|
||||||
|
|
||||||
namers, err := cmprov.NamersFromConfig(metricsConfig, dynamicMapper)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to construct naming scheme from metrics rules: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cmProvider, runner := cmprov.NewPrometheusProvider(dynamicMapper, dynamicClient, promClient, namers, o.MetricsRelistInterval)
|
|
||||||
runner.RunUntil(stopCh)
|
|
||||||
|
|
||||||
server, err := config.Complete().New("prometheus-custom-metrics-adapter", cmProvider, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return server.GenericAPIServer.PrepareRun().Run(stopCh)
|
|
||||||
}
|
|
||||||
|
|
||||||
type PrometheusAdapterServerOptions struct {
|
|
||||||
*server.CustomMetricsAdapterServerOptions
|
|
||||||
|
|
||||||
// RemoteKubeConfigFile is the config used to list pods from the master API server
|
|
||||||
RemoteKubeConfigFile string
|
|
||||||
// MetricsRelistInterval is the interval at which to relist the set of available metrics
|
|
||||||
MetricsRelistInterval time.Duration
|
|
||||||
// DiscoveryInterval is the interval at which discovery information is refreshed
|
|
||||||
DiscoveryInterval time.Duration
|
|
||||||
// PrometheusURL is the URL describing how to connect to Prometheus. Query parameters configure connection options.
|
|
||||||
PrometheusURL string
|
|
||||||
// PrometheusAuthInCluster enables using the auth details from the in-cluster kubeconfig to connect to Prometheus
|
|
||||||
PrometheusAuthInCluster bool
|
|
||||||
// PrometheusAuthConf is the kubeconfig file that contains auth details used to connect to Prometheus
|
|
||||||
PrometheusAuthConf string
|
|
||||||
// AdapterConfigFile points to the file containing the metrics discovery configuration.
|
|
||||||
AdapterConfigFile string
|
|
||||||
}
|
|
||||||
16
cmd/adapter/testdata/ca-error.pem
vendored
Normal file
16
cmd/adapter/testdata/ca-error.pem
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIDdjCCAl4CCQDdbOsYxSKoeDANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJV
|
||||||
|
UzENMAsGA1UEBwwEVGVzdDEaMBgGA1UECgwRUHJvbWV0aGV1c0FkYXB0ZXIxJDAi
|
||||||
|
BgNVBAMMG2s4cy1wcm9tZXRoZXVzLWFkYXB0ZXIudGVzdDEdMBsGCSqGSIb3DQEJ
|
||||||
|
ARYOdGVzdEB0ZXN0LnRlc3QwHhcNMjEwMjA5MTE0NzUwWhcNMjYwMjA4MTE0NzUw
|
||||||
|
WjB9MQswCQYDVQQGEwJVUzENMAsGA1UEBwwEVGVzdDEaMBgGA1UECgwRUHJvbWV0
|
||||||
|
aGV1c0FkYXB0ZXIxJDAiBgNVBAMMG2s4cy1wcm9tZXRoZXVzLWFkYXB0ZXIudGVz
|
||||||
|
dDEdMBsGCSqGSIb3DQEJARYOdGVzdEB0ZXN0LnRlc3QwggEiMA0GCSqGSIb3DQEB
|
||||||
|
AQUAA4IBDwAwggEKAoIBAQC24TDfTWLtYZPLDXqEjF7yn4K7oBOltX5Nngsk7LNd
|
||||||
|
AQELBQADggEBAD/bbeAZuyvtuEwdJ+4wkhBsHYXQ4OPxff1f3t4buIQFtnilWTXE
|
||||||
|
S60K3SEaQS8rOw8V9eHmzCsh3mPuVCoM7WsgKhp2mVhbGVZoBWBZ8kPQXqtsw+v4
|
||||||
|
tqTuJXnFPiF4clXb6Wp96Rc7nxzRAfn/6uVbSWds4JwRToUVszVOxe+yu0I84vuB
|
||||||
|
SHrRa077b1V+UT8otm+C5tC3jBZ0/IPRNWoT/rVcSoVLouX0fkbtxNF7c9v+PYg6
|
||||||
|
849A9T8cGKWKpKPGNEwBL9HYwtK6W0tTJr8A8pnAJ/UlniHA6u7SMHN+NoqBfi6M
|
||||||
|
bqq9lQ4QhjFrN2B1z9r3ak+EzQX1711TQ8w=
|
||||||
|
-----END CERTIFICATE-----
|
||||||
20
cmd/adapter/testdata/ca.pem
vendored
Normal file
20
cmd/adapter/testdata/ca.pem
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIDLjCCAhYCCQDlnNCOw7JHFDANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJV
|
||||||
|
UzENMAsGA1UECgwEVGVzdDEbMBkGA1UEAwwScHJvbWV0aGV1cy1hZGFwdGVyMR0w
|
||||||
|
GwYJKoZIhvcNAQkBFg50ZXN0QHRlc3QudGVzdDAgFw0yMTAyMjIyMDMxNTBaGA80
|
||||||
|
NzU5MDEyMDIwMzE1MFowWDELMAkGA1UEBhMCVVMxDTALBgNVBAoMBFRlc3QxGzAZ
|
||||||
|
BgNVBAMMEnByb21ldGhldXMtYWRhcHRlcjEdMBsGCSqGSIb3DQEJARYOdGVzdEB0
|
||||||
|
ZXN0LnRlc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvn9HQlfhw
|
||||||
|
qDH77+eEFU+N+ztqCtat54neVez+sFa4dfYxuvVYK+nc+oh4E7SS4u+17eKV+QFb
|
||||||
|
ZhRhrOTNI+fmuO+xDPKyU1MuYUDfwasRfMDcUpssea2fO/SrsxHmX9oOam0kgefJ
|
||||||
|
8aSwI9TYw4N4kIpG+EGatogDlR2KXhrqsRfx5PUB4npFaCrdoglyvvAQig83Iq5L
|
||||||
|
+bCknSe6NUMiqtL9CcuLzzRKB3DMOrvbB0tJdb4uv/gS26sx/Hp/1ri73/tv4I9z
|
||||||
|
GLLoUUoff7vfvxrhiGR9i+qBOda7THbbmYBD54y+SR0dBa2uuDDX0JbgNNfXtjiG
|
||||||
|
52hvAnc1/wv7AgMBAAEwDQYJKoZIhvcNAQELBQADggEBACCysIzT9NKaniEvXtnx
|
||||||
|
Yx/jRxpiEEUGl8kg83a95X4f13jdPpUSwcn3/iK5SAE/7ntGVM+ajtlXrHGxwjB7
|
||||||
|
ER0w4WC6Ozypzoh/yI/VXs+DRJTJu8CBJOBRQEpzkK4r64HU8iN2c9lPp1+6b3Vy
|
||||||
|
jfbf3yfnRUbJztSjOFDUeA2t3FThVddhqif/oxj65s5R8p9HEurcwhA3Q6lE53yx
|
||||||
|
jgee8qV9HXAqa4V0qQQJ0tjcpajhQahDTtThRr+Z2H4TzQuwHa3dM7IIF6EPWsCo
|
||||||
|
DtbUXEPL7zT3EBH7THOdvNsFlD/SFmT2RwiQ5606bRAHwAzzxjxjxFTMl7r4tX5W
|
||||||
|
Ldc=
|
||||||
|
-----END CERTIFICATE-----
|
||||||
17
cmd/adapter/testdata/kubeconfig
vendored
Normal file
17
cmd/adapter/testdata/kubeconfig
vendored
Normal file
|
|
@ -0,0 +1,17 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Config
|
||||||
|
clusters:
|
||||||
|
- name: test
|
||||||
|
cluster:
|
||||||
|
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURMakNDQWhZQ0NRRGxuTkNPdzdKSEZEQU5CZ2txaGtpRzl3MEJBUXNGQURCWU1Rc3dDUVlEVlFRR0V3SlYKVXpFTk1Bc0dBMVVFQ2d3RVZHVnpkREViTUJrR0ExVUVBd3dTY0hKdmJXVjBhR1YxY3kxaFpHRndkR1Z5TVIwdwpHd1lKS29aSWh2Y05BUWtCRmc1MFpYTjBRSFJsYzNRdWRHVnpkREFnRncweU1UQXlNakl5TURNeE5UQmFHQTgwCk56VTVNREV5TURJd016RTFNRm93V0RFTE1Ba0dBMVVFQmhNQ1ZWTXhEVEFMQmdOVkJBb01CRlJsYzNReEd6QVoKQmdOVkJBTU1FbkJ5YjIxbGRHaGxkWE10WVdSaGNIUmxjakVkTUJzR0NTcUdTSWIzRFFFSkFSWU9kR1Z6ZEVCMApaWE4wTG5SbGMzUXdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDdm45SFFsZmh3CnFESDc3K2VFRlUrTit6dHFDdGF0NTRuZVZleitzRmE0ZGZZeHV2VllLK25jK29oNEU3U1M0dSsxN2VLVitRRmIKWmhSaHJPVE5JK2ZtdU8reERQS3lVMU11WVVEZndhc1JmTURjVXBzc2VhMmZPL1Nyc3hIbVg5b09hbTBrZ2VmSgo4YVN3STlUWXc0TjRrSXBHK0VHYXRvZ0RsUjJLWGhycXNSZng1UFVCNG5wRmFDcmRvZ2x5dnZBUWlnODNJcTVMCitiQ2tuU2U2TlVNaXF0TDlDY3VMenpSS0IzRE1PcnZiQjB0SmRiNHV2L2dTMjZzeC9IcC8xcmk3My90djRJOXoKR0xMb1VVb2ZmN3ZmdnhyaGlHUjlpK3FCT2RhN1RIYmJtWUJENTR5K1NSMGRCYTJ1dUREWDBKYmdOTmZYdGppRwo1Mmh2QW5jMS93djdBZ01CQUFFd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFDQ3lzSXpUOU5LYW5pRXZYdG54Cll4L2pSeHBpRUVVR2w4a2c4M2E5NVg0ZjEzamRQcFVTd2NuMy9pSzVTQUUvN250R1ZNK2FqdGxYckhHeHdqQjcKRVIwdzRXQzZPenlwem9oL3lJL1ZYcytEUkpUSnU4Q0JKT0JSUUVwemtLNHI2NEhVOGlOMmM5bFBwMSs2YjNWeQpqZmJmM3lmblJVYkp6dFNqT0ZEVWVBMnQzRlRoVmRkaHFpZi9veGo2NXM1UjhwOUhFdXJjd2hBM1E2bEU1M3l4CmpnZWU4cVY5SFhBcWE0VjBxUVFKMHRqY3BhamhRYWhEVHRUaFJyK1oySDRUelF1d0hhM2RNN0lJRjZFUFdzQ28KRHRiVVhFUEw3elQzRUJIN1RIT2R2TnNGbEQvU0ZtVDJSd2lRNTYwNmJSQUh3QXp6eGp4anhGVE1sN3I0dFg1VwpMZGM9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
|
||||||
|
server: test.test
|
||||||
|
contexts:
|
||||||
|
- name: test
|
||||||
|
context:
|
||||||
|
cluster: test
|
||||||
|
user: test-user
|
||||||
|
current-context: test
|
||||||
|
users:
|
||||||
|
- name: test-user
|
||||||
|
user:
|
||||||
|
token: abcde12345
|
||||||
18
cmd/adapter/testdata/kubeconfig-error
vendored
Normal file
18
cmd/adapter/testdata/kubeconfig-error
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Config
|
||||||
|
clusters:
|
||||||
|
- name: test
|
||||||
|
cluster:
|
||||||
|
certificate-authority-data: abcde12345
|
||||||
|
server: test.test
|
||||||
|
contexts:
|
||||||
|
- name: test
|
||||||
|
context:
|
||||||
|
cluster: test
|
||||||
|
user: test-user
|
||||||
|
current-context: test
|
||||||
|
users:
|
||||||
|
- name: test-user
|
||||||
|
user:
|
||||||
|
token: abcde12345
|
||||||
|
|
||||||
24
cmd/adapter/testdata/tlscert-error.crt
vendored
Normal file
24
cmd/adapter/testdata/tlscert-error.crt
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIFdjCCA14CCQC+svUhDVv51DANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJV
|
||||||
|
UzENMAsGA1UEBwwEVGVzdDEaMBgGA1UECgwRUHJvbWV0aGV1c0FkYXB0ZXIxJDAi
|
||||||
|
BgNVBAMMG2s4cy1wcm9tZXRoZXVzLWFkYXB0ZXIudGVzdDEdMBsGCSqGSIb3DQEJ
|
||||||
|
ARYOdGVzdEB0ZXN0LnRlc3QwHhcNMjEwMjA5MTE0NDMyWhcNMjIwMjA5MTE0NDMy
|
||||||
|
WjB9MQswCQYDVQQGEwJVUzENMAsGA1UEBwwEVGVzdDEaMBgGA1UECgwRUHJvbWV0
|
||||||
|
aGV1c0FkYXB0ZXIxJDAiBgNVBAMMG2s4cy1wcm9tZXRoZXVzLWFkYXB0ZXIudGVz
|
||||||
|
dDEdMBsGCSqGSIb3DQEJARYOdGVzdEB0ZXN0LnRlc3QwggIiMA0GCSqGSIb3DQEB
|
||||||
|
AQUAA4ICDwAwggIKAoICAQDtLqKuIJqRETLOUSMDBtUDmIBaD2pG9Qv+cOBhQbVS
|
||||||
|
apZRWk8uKZKxqBOxgQ3UxY1szeVkx1Dphe3RN6ndmofiRc23ns1qncbDllgbtflk
|
||||||
|
GFvLKGcVBa6Z/lZ6FCZDWn6K6mJb0a7jtkOMG6+J/5eJHfZ23u/GYL1RKxH+qPPc
|
||||||
|
AwIDAQABMA0GCSqGSIb3DQEBCwUAA4ICAQC0oE4/Yel5GHeuKJ5+U9KEBsLCrBjj
|
||||||
|
jUW8or7SIZ1Gl4J2ak3p3WabhZjqOe10rsIXCNTaC2rEMvkNiP5Om0aeE6bx14jV
|
||||||
|
e9FIfJ7ytAL/PISaZXINgml05m4Su3bUaxCQpajBgqneNp4w57jfeFhcPt35j0H4
|
||||||
|
bxGk/hnIY1MmRULSOFBstmxNZSDNsGlTcZoN3+0KtuqLg6vTNuuJIyx1zd9/QT8t
|
||||||
|
RJ4fgrffJcPJscvq6hEdWmtcJhaDLWOEblsbFfN0J+zK07hHhqRavQrnwaBZgFWa
|
||||||
|
OIqAo6NfZONhCFy9mWFxLvQky1NXr60y220+N1GkEiLRQES7+p1pcKgn0v+f2EfW
|
||||||
|
uN6+LCppWX7FqtkB3OhZkHM6nbE/9GP5T76Kj30Fed/nHkTJ3QORRMQUTs4J6LNk
|
||||||
|
BD1i14MZMCn3UBZh8wX+d63xJHtfMvfac7L655GwLEnWW8JM8h8DDfRYM7JuEURG
|
||||||
|
pSbvoaygyvddT0FKRLcFGhfI7aBSWGcJH5rHdEcUQ+mnloD1RioQqTC+kxUSddJI
|
||||||
|
QNjgYivl9kwW9cJV1jzmKd8GQfg+j1X+jR9icNT5cacvclwnL0Mim0w/ZLfWQYmJ
|
||||||
|
q2ud+GS9+5RtPzWwHR60+Qs3dr8oQGh5wO12qUJ8d5MI+4YGWRjKRyYdio6g1Bhi
|
||||||
|
9WInD4va9cC7fw==
|
||||||
|
-----END CERTIFICATE-----
|
||||||
30
cmd/adapter/testdata/tlscert.crt
vendored
Normal file
30
cmd/adapter/testdata/tlscert.crt
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIFLjCCAxYCCQDMlabDYYlDKzANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJV
|
||||||
|
UzENMAsGA1UECgwEVGVzdDEbMBkGA1UEAwwScHJvbWV0aGV1cy1hZGFwdGVyMR0w
|
||||||
|
GwYJKoZIhvcNAQkBFg50ZXN0QHRlc3QudGVzdDAgFw0yMTAyMjIyMDMwMTNaGA80
|
||||||
|
NzU5MDEyMDIwMzAxM1owWDELMAkGA1UEBhMCVVMxDTALBgNVBAoMBFRlc3QxGzAZ
|
||||||
|
BgNVBAMMEnByb21ldGhldXMtYWRhcHRlcjEdMBsGCSqGSIb3DQEJARYOdGVzdEB0
|
||||||
|
ZXN0LnRlc3QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDIJOO8apS3
|
||||||
|
84bssvnTVHp1VAiPg1tX+E6wPjayVPx+S4LgMKA4QM2kNKoPLQtvGV+lyqfhYp0H
|
||||||
|
cdGzCPVQ6aBlbHuOhInusJaXjgOlNTalThgigzky0t1jFxqaNjFtXqv6ME1Zcb9H
|
||||||
|
VrGMreEfNj8/Ijp7cfsBe7Jv2rBc06aidx9j66+oPTC98XcNnURUGO95UF8SjTQt
|
||||||
|
oi10m17uA7z/JUUSBvDJAg5Z62myZPU2stz38cuthROyEyXRBWimHh7bD17rwqhc
|
||||||
|
WRCfkFORWvwz9GMV5KfFCfWm2D2pm1f3ZWm5/FQbQrlxgxUagwDMoma+F6hQp84a
|
||||||
|
/sYPqqkWDRUK0NGZzWwxjfra8r8H2xFab+5ZFVr9+FhMgy6eelZ1JJc860s35Qpk
|
||||||
|
ZrSRH8RNMqLRG1cnDwHn9Md6joCZgLJhEW9L5xjpCVWkLXK59yA9ry5Jau9/2zDs
|
||||||
|
wlRzYI4TNazbVa84KliEjt3nZ6DgQh3PRtxHDrqJIQSSYu1MtUmPArLtEDDP/BqD
|
||||||
|
fGWCayc/SdxSWW9qU/aOq+D4KMKQXV44qc22f6rd/LKt/fcvDpbfcexXbeeNABQg
|
||||||
|
x1rAnhA8L/rYc4WTTbTrb8jwhaUoqJve6XOsHPVbk/L4CS9ReP1UwvhMM1C+Ast6
|
||||||
|
rr/a2bZkoMK+jmkA2QTwUsjvt/8G4dtVOwIDAQABMA0GCSqGSIb3DQEBCwUAA4IC
|
||||||
|
AQAvrWslCR21l8XRGI+l4z6bpZ7+089KQRemYcHWOKZ/nDTYcydDWQMdMtDZS43d
|
||||||
|
B2Wuu4UfOnK27YwuP4Ojf2hAzeaDBgv7xOcKRZ1K+zOCm+VqbtWV49c/Ow0Rc5KU
|
||||||
|
N7rApohdpXeJBp5TB1qQJsKcBv3gveLAivCFTeD0LiLMVdxjRRl9ZbMXtD3PABDC
|
||||||
|
KKFtE/n2MV/0wroMD9hHs9LckcNjHSrIFaQEy9cESn8q3kngFf3wvc2oM77yCOZ1
|
||||||
|
5y0AN+9ZXyMHHlMjye7GuW0Mpiwo1O4tW2brC0boqSmvSFNW9KRogKvu6Oij9Pm6
|
||||||
|
jJpuUsM0KOnID8m9jJ+Xb+DGC9cgLGHRJc+zw74X2KMQnH4/pZDNbIGG7d8xEoPn
|
||||||
|
RS/EbCoALmUbI2kqflVN88kN4ZUchsoHly5gIdidfo9yjeOihTF0xEEou/tzGW+K
|
||||||
|
AYxwy9uIYhz4lmH894H5nqJWPY/aLxD4M9nFW0yxczCQ8tpjwVYmP3/dCKp1IUXy
|
||||||
|
0h9TjyBRPv9O3JrTLTYBPLisLNqiU+YOZM6wgqmZTPtTCxKMmNlhGWKa8prAhMdb
|
||||||
|
GRxwkO6ylyL/j3J3HgcDHEC22/685L21HVFv8z/DMuj/eba4yyn1FBVXOU9hgLWS
|
||||||
|
LVLoVFFp7RaGSIECcqTyXldoZZpZrA89XDVuqSvHDiCOrg==
|
||||||
|
-----END CERTIFICATE-----
|
||||||
52
cmd/adapter/testdata/tlskey.key
vendored
Normal file
52
cmd/adapter/testdata/tlskey.key
vendored
Normal file
|
|
@ -0,0 +1,52 @@
|
||||||
|
-----BEGIN PRIVATE KEY-----
|
||||||
|
MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDIJOO8apS384bs
|
||||||
|
svnTVHp1VAiPg1tX+E6wPjayVPx+S4LgMKA4QM2kNKoPLQtvGV+lyqfhYp0HcdGz
|
||||||
|
CPVQ6aBlbHuOhInusJaXjgOlNTalThgigzky0t1jFxqaNjFtXqv6ME1Zcb9HVrGM
|
||||||
|
reEfNj8/Ijp7cfsBe7Jv2rBc06aidx9j66+oPTC98XcNnURUGO95UF8SjTQtoi10
|
||||||
|
m17uA7z/JUUSBvDJAg5Z62myZPU2stz38cuthROyEyXRBWimHh7bD17rwqhcWRCf
|
||||||
|
kFORWvwz9GMV5KfFCfWm2D2pm1f3ZWm5/FQbQrlxgxUagwDMoma+F6hQp84a/sYP
|
||||||
|
qqkWDRUK0NGZzWwxjfra8r8H2xFab+5ZFVr9+FhMgy6eelZ1JJc860s35QpkZrSR
|
||||||
|
H8RNMqLRG1cnDwHn9Md6joCZgLJhEW9L5xjpCVWkLXK59yA9ry5Jau9/2zDswlRz
|
||||||
|
YI4TNazbVa84KliEjt3nZ6DgQh3PRtxHDrqJIQSSYu1MtUmPArLtEDDP/BqDfGWC
|
||||||
|
ayc/SdxSWW9qU/aOq+D4KMKQXV44qc22f6rd/LKt/fcvDpbfcexXbeeNABQgx1rA
|
||||||
|
nhA8L/rYc4WTTbTrb8jwhaUoqJve6XOsHPVbk/L4CS9ReP1UwvhMM1C+Ast6rr/a
|
||||||
|
2bZkoMK+jmkA2QTwUsjvt/8G4dtVOwIDAQABAoICAQCFd9RG+exjH4uCnXfsbhGb
|
||||||
|
3IY47igj6frPnS1sjzAyKLkGOGcgHFcGgfhGVouhcxJNxW9e5hxBsq1c70RoyOOl
|
||||||
|
v0pGKCyzeB90wce8jFf8tK9zlH64XdY1FlsvK6Sagt+84Ck01J3yPOX6IppV7h8P
|
||||||
|
Qwws9j2lJ5A+919VB++/uCC+yZVCZEv03um9snq2ekp4ZBiCjpeVNumJMXOE1glb
|
||||||
|
PMdq1iYMZcqcPFkoFhtQdsbUsfJZrL0Nq6c0VJ8M6Fk7TGzIW+9aZiqnvd98t2go
|
||||||
|
XXkWSH148MNYmCvGx0lKOd7foF2WMFDqWbfhDiuiS0qoya3822qepff+ypgnlGHK
|
||||||
|
vr+9pLsWT7TG8pBfbXj47a7TwYAXkRMi+78vFQwoaeiKdehJM1YXZg9vBVS8BV3r
|
||||||
|
+0wYNE4WpdxUvX3aAnJO6ntRU6KCz3/D1+fxUT/w1rKX2Z1uTH5x2UxB6UUGDSF9
|
||||||
|
HiJfDp6RRtXHbQMR6uowM6UYBn0dl9Aso21oc2K4Gpx5QlsZaPi9M6BBMbPUhFcx
|
||||||
|
QH+w7fLmccwneJVGxjHkYOcLVLF7nuH5C2DsffrMubrgwuhSw2b8zy7ZpZ0eJ83D
|
||||||
|
CjJN9EgqwbmH0Or5N91YyVdR0Zm4EtODAo615O1kEMCKasKjpolOx/t9cgtbdkiq
|
||||||
|
pbLruOS+8jEG1erA7nYkQQKCAQEA4yba38hSkfIMUzfrlgF7AkXHbU4iINgKpHti
|
||||||
|
A9QrvEL9W4VHRiA5UTezzblyfMck9w/Hhx74pQjjGLj76L+8ZssCFI8ingNo3ItL
|
||||||
|
/AX3MN68NT4heiy8EvKRwRNWV05WEehZg9tTUKexIDRcDSr/9E+qG/cW5KOIQpYl
|
||||||
|
RIsKW2RUNFd3TVCQVUIzwe/0n6LuO2b7Btow+nfJ7U3mWQmHGYu7+SYjVlvIoQ68
|
||||||
|
jFGviGRineu/J7EiPND7qQzj78AtnXkULf+mjK2JdapRcn2EBNL34QepVCyjfXZf
|
||||||
|
QWm/ykI9nVOKRy1F38OhRHKrBICfWhN2Bgyvw3PPhGcb8EdknwKCAQEA4Y/2bpiz
|
||||||
|
S0H8LPUUsOLZWCadpp8yzvTerB/vuigkQiHM8w4QUwEpL2iXSF36MD8yV/c4ilVN
|
||||||
|
8m1p5prp1YtasTxsOWv7FDEs4oZfum1w3QsAvlrFRhctsACsZ1i4i3mvxQWJ955q
|
||||||
|
zZxs5vhO5CL24rVoQYGVQj/uCSHlyK7ko9AA8XkejTlZMJ5h0Mip+oWNxz3M/VTa
|
||||||
|
sJlYkQrbP0cWxCjKJLEmtVlVSCMeHoILGZzLcol6RVPbaAb57i27SRwY9YIFt1A+
|
||||||
|
OMpHFs4fgDa4A1IlobBwhhd1dAw3CL5QJN+ylDnBYsm1bwBRHx/AKUjpRv+7ZXQb
|
||||||
|
H9ngSivFHrXN5QKCAQBAqzUw9LUdO83qe0ck47MDiJ4oLlBlDVyqSz4yXNs+s8ux
|
||||||
|
nJYYDuCCkNstvJgtkfyiIenqPBUJ1yfgR/nf34ZhtXYYKE/wsIPQFhBB5ejkDuWC
|
||||||
|
OvgI8mdw9YItd7XjEThLzNx/P5fOpI823fE/BnjsMyn44DWyTiRi4KAnjXYbYsre
|
||||||
|
Q/CBIGiW/UwC8K+yKw6r9ruMzd2X0Ta5yq3Dt4Sw7ylK22LAGU1bHPjs8eyJZhr1
|
||||||
|
XsKDKFjY+55KGJNkFFBoPqpSFjByaI1z5FNfxwAo528Or8GzZyn8dBDWbKbfjFBC
|
||||||
|
VCBP90GnXOiytfqeQ4gaeuPlAQOhH3168mfv1kN9AoIBABOZzgFYVaRBjKdfeLfS
|
||||||
|
Tq7BVEvJY8HmN39fmxZjLJtukn/AhhygajLLdPH98KLGqxpHymsC9K4PYfd/GLjM
|
||||||
|
zkm+hW0L/BqKF2tr39+0aO1camkgPCpWE0tLE7A7XnYIUgTd8VpKMt/BKxl7FGfw
|
||||||
|
veF/gBrJJu5F3ep/PpeM0yOFDL/vFX+SLzTxXnClL1gsyOA6d5jACez0tmSMO/co
|
||||||
|
t0q+fKpploKFy8pj+tcN1+cW3/sJBU4G9nb4vDk9UhwNTAHxlYuTdoS61yidKtGa
|
||||||
|
b60iM1D0oyKT4Un/Ubz5xL8fjUYiKrLp8lE+Bs6clLdBtbvMtz0etMi0xy/K0+tS
|
||||||
|
Qx0CggEBALfe2TUfAt9aMqpcidaFwgNFTr61wgOeoLWLt559OeRIeZWKAEB81bnz
|
||||||
|
EJLxDF51Y2tLc/pEXrc0zJzzrFIfk/drYe0uD5RnJjRxE3+spwin6D32ZOZW3KSX
|
||||||
|
1zReW1On80o/LJU6nyDJrNJvay2eL9PyWi47nBdO7MRZi53im72BmmwxaAKXf40l
|
||||||
|
StykjloyFdI+eyGyQUqcs4nFHd3WWmV+lLIDhGDlF5EBUgueCJz1xO54oPj1PKGl
|
||||||
|
vDs7JXdJiS3HDf20GREGwvL1y1kewX+KqdO7aBZhLN3Rx/fZnS/UFC3xCtbikuG4
|
||||||
|
LeU1NmvuCRmWmrgEkqiKs3jgjbEPVQI=
|
||||||
|
-----END PRIVATE KEY-----
|
||||||
|
|
@ -8,7 +8,7 @@ import (
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
yaml "gopkg.in/yaml.v2"
|
yaml "gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/directxman12/k8s-prometheus-adapter/cmd/config-gen/utils"
|
"sigs.k8s.io/prometheus-adapter/cmd/config-gen/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|
|
||||||
|
|
@ -4,65 +4,66 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
prom "github.com/directxman12/k8s-prometheus-adapter/pkg/client"
|
|
||||||
. "github.com/directxman12/k8s-prometheus-adapter/pkg/config"
|
|
||||||
pmodel "github.com/prometheus/common/model"
|
pmodel "github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
"sigs.k8s.io/prometheus-adapter/pkg/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DefaultConfig returns a configuration equivalent to the former
|
// DefaultConfig returns a configuration equivalent to the former
|
||||||
// pre-advanced-config settings. This means that "normal" series labels
|
// pre-advanced-config settings. This means that "normal" series labels
|
||||||
// will be of the form `<prefix><<.Resource>>`, cadvisor series will be
|
// will be of the form `<prefix><<.Resource>>`, cadvisor series will be
|
||||||
// of the form `container_`, and have the label `pod_name`. Any series ending
|
// of the form `container_`, and have the label `pod`. Any series ending
|
||||||
// in total will be treated as a rate metric.
|
// in total will be treated as a rate metric.
|
||||||
func DefaultConfig(rateInterval time.Duration, labelPrefix string) *MetricsDiscoveryConfig {
|
func DefaultConfig(rateInterval time.Duration, labelPrefix string) *config.MetricsDiscoveryConfig {
|
||||||
return &MetricsDiscoveryConfig{
|
return &config.MetricsDiscoveryConfig{
|
||||||
Rules: []DiscoveryRule{
|
Rules: []config.DiscoveryRule{
|
||||||
// container seconds rate metrics
|
// container seconds rate metrics
|
||||||
{
|
{
|
||||||
SeriesQuery: string(prom.MatchSeries("", prom.NameMatches("^container_.*"), prom.LabelNeq("container_name", "POD"), prom.LabelNeq("namespace", ""), prom.LabelNeq("pod_name", ""))),
|
SeriesQuery: string(prom.MatchSeries("", prom.NameMatches("^container_.*"), prom.LabelNeq("container", "POD"), prom.LabelNeq("namespace", ""), prom.LabelNeq("pod", ""))),
|
||||||
Resources: ResourceMapping{
|
Resources: config.ResourceMapping{
|
||||||
Overrides: map[string]GroupResource{
|
Overrides: map[string]config.GroupResource{
|
||||||
"namespace": {Resource: "namespace"},
|
"namespace": {Resource: "namespace"},
|
||||||
"pod_name": {Resource: "pod"},
|
"pod": {Resource: "pod"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Name: NameMapping{Matches: "^container_(.*)_seconds_total$"},
|
Name: config.NameMapping{Matches: "^container_(.*)_seconds_total$"},
|
||||||
MetricsQuery: fmt.Sprintf(`sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[%s])) by (<<.GroupBy>>)`, pmodel.Duration(rateInterval).String()),
|
MetricsQuery: fmt.Sprintf(`sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[%s])) by (<<.GroupBy>>)`, pmodel.Duration(rateInterval).String()),
|
||||||
},
|
},
|
||||||
|
|
||||||
// container rate metrics
|
// container rate metrics
|
||||||
{
|
{
|
||||||
SeriesQuery: string(prom.MatchSeries("", prom.NameMatches("^container_.*"), prom.LabelNeq("container_name", "POD"), prom.LabelNeq("namespace", ""), prom.LabelNeq("pod_name", ""))),
|
SeriesQuery: string(prom.MatchSeries("", prom.NameMatches("^container_.*"), prom.LabelNeq("container", "POD"), prom.LabelNeq("namespace", ""), prom.LabelNeq("pod", ""))),
|
||||||
SeriesFilters: []RegexFilter{{IsNot: "^container_.*_seconds_total$"}},
|
SeriesFilters: []config.RegexFilter{{IsNot: "^container_.*_seconds_total$"}},
|
||||||
Resources: ResourceMapping{
|
Resources: config.ResourceMapping{
|
||||||
Overrides: map[string]GroupResource{
|
Overrides: map[string]config.GroupResource{
|
||||||
"namespace": {Resource: "namespace"},
|
"namespace": {Resource: "namespace"},
|
||||||
"pod_name": {Resource: "pod"},
|
"pod": {Resource: "pod"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Name: NameMapping{Matches: "^container_(.*)_total$"},
|
Name: config.NameMapping{Matches: "^container_(.*)_total$"},
|
||||||
MetricsQuery: fmt.Sprintf(`sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[%s])) by (<<.GroupBy>>)`, pmodel.Duration(rateInterval).String()),
|
MetricsQuery: fmt.Sprintf(`sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[%s])) by (<<.GroupBy>>)`, pmodel.Duration(rateInterval).String()),
|
||||||
},
|
},
|
||||||
|
|
||||||
// container non-cumulative metrics
|
// container non-cumulative metrics
|
||||||
{
|
{
|
||||||
SeriesQuery: string(prom.MatchSeries("", prom.NameMatches("^container_.*"), prom.LabelNeq("container_name", "POD"), prom.LabelNeq("namespace", ""), prom.LabelNeq("pod_name", ""))),
|
SeriesQuery: string(prom.MatchSeries("", prom.NameMatches("^container_.*"), prom.LabelNeq("container", "POD"), prom.LabelNeq("namespace", ""), prom.LabelNeq("pod", ""))),
|
||||||
SeriesFilters: []RegexFilter{{IsNot: "^container_.*_total$"}},
|
SeriesFilters: []config.RegexFilter{{IsNot: "^container_.*_total$"}},
|
||||||
Resources: ResourceMapping{
|
Resources: config.ResourceMapping{
|
||||||
Overrides: map[string]GroupResource{
|
Overrides: map[string]config.GroupResource{
|
||||||
"namespace": {Resource: "namespace"},
|
"namespace": {Resource: "namespace"},
|
||||||
"pod_name": {Resource: "pod"},
|
"pod": {Resource: "pod"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Name: NameMapping{Matches: "^container_(.*)$"},
|
Name: config.NameMapping{Matches: "^container_(.*)$"},
|
||||||
MetricsQuery: `sum(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}) by (<<.GroupBy>>)`,
|
MetricsQuery: `sum(<<.Series>>{<<.LabelMatchers>>,container!="POD"}) by (<<.GroupBy>>)`,
|
||||||
},
|
},
|
||||||
|
|
||||||
// normal non-cumulative metrics
|
// normal non-cumulative metrics
|
||||||
{
|
{
|
||||||
SeriesQuery: string(prom.MatchSeries("", prom.LabelNeq(fmt.Sprintf("%snamespace", labelPrefix), ""), prom.NameNotMatches("^container_.*"))),
|
SeriesQuery: string(prom.MatchSeries("", prom.LabelNeq(fmt.Sprintf("%snamespace", labelPrefix), ""), prom.NameNotMatches("^container_.*"))),
|
||||||
SeriesFilters: []RegexFilter{{IsNot: ".*_total$"}},
|
SeriesFilters: []config.RegexFilter{{IsNot: ".*_total$"}},
|
||||||
Resources: ResourceMapping{
|
Resources: config.ResourceMapping{
|
||||||
Template: fmt.Sprintf("%s<<.Resource>>", labelPrefix),
|
Template: fmt.Sprintf("%s<<.Resource>>", labelPrefix),
|
||||||
},
|
},
|
||||||
MetricsQuery: "sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>)",
|
MetricsQuery: "sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>)",
|
||||||
|
|
@ -71,9 +72,9 @@ func DefaultConfig(rateInterval time.Duration, labelPrefix string) *MetricsDisco
|
||||||
// normal rate metrics
|
// normal rate metrics
|
||||||
{
|
{
|
||||||
SeriesQuery: string(prom.MatchSeries("", prom.LabelNeq(fmt.Sprintf("%snamespace", labelPrefix), ""), prom.NameNotMatches("^container_.*"))),
|
SeriesQuery: string(prom.MatchSeries("", prom.LabelNeq(fmt.Sprintf("%snamespace", labelPrefix), ""), prom.NameNotMatches("^container_.*"))),
|
||||||
SeriesFilters: []RegexFilter{{IsNot: ".*_seconds_total"}},
|
SeriesFilters: []config.RegexFilter{{IsNot: ".*_seconds_total"}},
|
||||||
Name: NameMapping{Matches: "^(.*)_total$"},
|
Name: config.NameMapping{Matches: "^(.*)_total$"},
|
||||||
Resources: ResourceMapping{
|
Resources: config.ResourceMapping{
|
||||||
Template: fmt.Sprintf("%s<<.Resource>>", labelPrefix),
|
Template: fmt.Sprintf("%s<<.Resource>>", labelPrefix),
|
||||||
},
|
},
|
||||||
MetricsQuery: fmt.Sprintf("sum(rate(<<.Series>>{<<.LabelMatchers>>}[%s])) by (<<.GroupBy>>)", pmodel.Duration(rateInterval).String()),
|
MetricsQuery: fmt.Sprintf("sum(rate(<<.Series>>{<<.LabelMatchers>>}[%s])) by (<<.GroupBy>>)", pmodel.Duration(rateInterval).String()),
|
||||||
|
|
@ -82,12 +83,40 @@ func DefaultConfig(rateInterval time.Duration, labelPrefix string) *MetricsDisco
|
||||||
// seconds rate metrics
|
// seconds rate metrics
|
||||||
{
|
{
|
||||||
SeriesQuery: string(prom.MatchSeries("", prom.LabelNeq(fmt.Sprintf("%snamespace", labelPrefix), ""), prom.NameNotMatches("^container_.*"))),
|
SeriesQuery: string(prom.MatchSeries("", prom.LabelNeq(fmt.Sprintf("%snamespace", labelPrefix), ""), prom.NameNotMatches("^container_.*"))),
|
||||||
Name: NameMapping{Matches: "^(.*)_seconds_total$"},
|
Name: config.NameMapping{Matches: "^(.*)_seconds_total$"},
|
||||||
Resources: ResourceMapping{
|
Resources: config.ResourceMapping{
|
||||||
Template: fmt.Sprintf("%s<<.Resource>>", labelPrefix),
|
Template: fmt.Sprintf("%s<<.Resource>>", labelPrefix),
|
||||||
},
|
},
|
||||||
MetricsQuery: fmt.Sprintf("sum(rate(<<.Series>>{<<.LabelMatchers>>}[%s])) by (<<.GroupBy>>)", pmodel.Duration(rateInterval).String()),
|
MetricsQuery: fmt.Sprintf("sum(rate(<<.Series>>{<<.LabelMatchers>>}[%s])) by (<<.GroupBy>>)", pmodel.Duration(rateInterval).String()),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
ResourceRules: &config.ResourceRules{
|
||||||
|
CPU: config.ResourceRule{
|
||||||
|
ContainerQuery: fmt.Sprintf("sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[%s])) by (<<.GroupBy>>)", pmodel.Duration(rateInterval).String()),
|
||||||
|
NodeQuery: fmt.Sprintf("sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[%s])) by (<<.GroupBy>>)", pmodel.Duration(rateInterval).String()),
|
||||||
|
Resources: config.ResourceMapping{
|
||||||
|
Overrides: map[string]config.GroupResource{
|
||||||
|
"namespace": {Resource: "namespace"},
|
||||||
|
"pod": {Resource: "pod"},
|
||||||
|
"instance": {Resource: "node"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ContainerLabel: fmt.Sprintf("%scontainer", labelPrefix),
|
||||||
|
},
|
||||||
|
Memory: config.ResourceRule{
|
||||||
|
ContainerQuery: "sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>)",
|
||||||
|
NodeQuery: "sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>)",
|
||||||
|
Resources: config.ResourceMapping{
|
||||||
|
Overrides: map[string]config.GroupResource{
|
||||||
|
"namespace": {Resource: "namespace"},
|
||||||
|
"pod": {Resource: "pod"},
|
||||||
|
"instance": {Resource: "node"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ContainerLabel: fmt.Sprintf("%scontainer", labelPrefix),
|
||||||
|
},
|
||||||
|
Window: pmodel.Duration(rateInterval),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
3
code-of-conduct.md
Normal file
3
code-of-conduct.md
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Kubernetes Community Code of Conduct
|
||||||
|
|
||||||
|
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
|
||||||
|
|
@ -1,4 +0,0 @@
|
||||||
FROM BASEIMAGE
|
|
||||||
COPY adapter /
|
|
||||||
USER 1001:1001
|
|
||||||
ENTRYPOINT ["/adapter"]
|
|
||||||
|
|
@ -1,20 +1,11 @@
|
||||||
Example Deployment
|
Example Deployment
|
||||||
==================
|
==================
|
||||||
|
|
||||||
1. Make sure you've built the included Dockerfile with `make docker-build`. The image should be tagged as `directxman12/k8s-prometheus-adapter:latest`.
|
1. Make sure you've built the included Dockerfile with `TAG=latest make container`. The image should be tagged as `registry.k8s.io/prometheus-adapter/staging-prometheus-adapter:latest`.
|
||||||
|
|
||||||
2. Create a secret called `cm-adapter-serving-certs` with two values:
|
2. `kubectl create namespace monitoring` to ensure that the namespace that we're installing
|
||||||
`serving.crt` and `serving.key`. These are the serving certificates used
|
|
||||||
by the adapter for serving HTTPS traffic. For more information on how to
|
|
||||||
generate these certificates, see the [auth concepts
|
|
||||||
documentation](https://github.com/kubernetes-incubator/apiserver-builder/blob/master/docs/concepts/auth.md)
|
|
||||||
in the apiserver-builder repository.
|
|
||||||
The kube-prometheus project published two scripts [gencerts.sh](https://github.com/coreos/prometheus-operator/blob/master/contrib/kube-prometheus/experimental/custom-metrics-api/gencerts.sh)
|
|
||||||
and [deploy.sh](https://github.com/coreos/prometheus-operator/blob/master/contrib/kube-prometheus/experimental/custom-metrics-api/deploy.sh) to create the `cm-adapter-serving-certs` secret.
|
|
||||||
|
|
||||||
3. `kubectl create namespace custom-metrics` to ensure that the namespace that we're installing
|
|
||||||
the custom metrics adapter in exists.
|
the custom metrics adapter in exists.
|
||||||
|
|
||||||
4. `kubectl create -f manifests/`, modifying the Deployment as necessary to
|
3. `kubectl create -f manifests/`, modifying the Deployment as necessary to
|
||||||
point to your Prometheus server, and the ConfigMap to contain your desired
|
point to your Prometheus server, and the ConfigMap to contain your desired
|
||||||
metrics discovery configuration.
|
metrics discovery configuration.
|
||||||
|
|
|
||||||
17
deploy/manifests/api-service.yaml
Normal file
17
deploy/manifests/api-service.yaml
Normal file
|
|
@ -0,0 +1,17 @@
|
||||||
|
apiVersion: apiregistration.k8s.io/v1
|
||||||
|
kind: APIService
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
app.kubernetes.io/version: 0.12.0
|
||||||
|
name: v1beta1.metrics.k8s.io
|
||||||
|
spec:
|
||||||
|
group: metrics.k8s.io
|
||||||
|
groupPriorityMinimum: 100
|
||||||
|
insecureSkipTLSVerify: true
|
||||||
|
service:
|
||||||
|
name: prometheus-adapter
|
||||||
|
namespace: monitoring
|
||||||
|
version: v1beta1
|
||||||
|
versionPriority: 100
|
||||||
22
deploy/manifests/cluster-role-aggregated-metrics-reader.yaml
Normal file
22
deploy/manifests/cluster-role-aggregated-metrics-reader.yaml
Normal file
|
|
@ -0,0 +1,22 @@
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
app.kubernetes.io/version: 0.12.0
|
||||||
|
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||||
|
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||||
|
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||||
|
name: system:aggregated-metrics-reader
|
||||||
|
namespace: monitoring
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- metrics.k8s.io
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
- nodes
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
17
deploy/manifests/cluster-role-binding-delegator.yaml
Normal file
17
deploy/manifests/cluster-role-binding-delegator.yaml
Normal file
|
|
@ -0,0 +1,17 @@
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
app.kubernetes.io/version: 0.12.0
|
||||||
|
name: resource-metrics:system:auth-delegator
|
||||||
|
namespace: monitoring
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: system:auth-delegator
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: prometheus-adapter
|
||||||
|
namespace: monitoring
|
||||||
|
|
@ -2,6 +2,9 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
metadata:
|
metadata:
|
||||||
name: hpa-controller-custom-metrics
|
name: hpa-controller-custom-metrics
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
roleRef:
|
roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
17
deploy/manifests/cluster-role-binding.yaml
Normal file
17
deploy/manifests/cluster-role-binding.yaml
Normal file
|
|
@ -0,0 +1,17 @@
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
app.kubernetes.io/version: 0.12.0
|
||||||
|
name: prometheus-adapter
|
||||||
|
namespace: monitoring
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: prometheus-adapter
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: prometheus-adapter
|
||||||
|
namespace: monitoring
|
||||||
15
deploy/manifests/cluster-role-metrics-server-resources.yaml
Normal file
15
deploy/manifests/cluster-role-metrics-server-resources.yaml
Normal file
|
|
@ -0,0 +1,15 @@
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
app.kubernetes.io/version: 0.12.0
|
||||||
|
name: resource-metrics-server-resources
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- metrics.k8s.io
|
||||||
|
resources:
|
||||||
|
- '*'
|
||||||
|
verbs:
|
||||||
|
- '*'
|
||||||
20
deploy/manifests/cluster-role.yaml
Normal file
20
deploy/manifests/cluster-role.yaml
Normal file
|
|
@ -0,0 +1,20 @@
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
kind: ClusterRole
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
app.kubernetes.io/version: 0.12.0
|
||||||
|
name: prometheus-adapter
|
||||||
|
rules:
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
|
- nodes
|
||||||
|
- namespaces
|
||||||
|
- pods
|
||||||
|
- services
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- list
|
||||||
|
- watch
|
||||||
53
deploy/manifests/config-map.yaml
Normal file
53
deploy/manifests/config-map.yaml
Normal file
|
|
@ -0,0 +1,53 @@
|
||||||
|
apiVersion: v1
|
||||||
|
data:
|
||||||
|
config.yaml: |-
|
||||||
|
"resourceRules":
|
||||||
|
"cpu":
|
||||||
|
"containerLabel": "container"
|
||||||
|
"containerQuery": |
|
||||||
|
sum by (<<.GroupBy>>) (
|
||||||
|
irate (
|
||||||
|
container_cpu_usage_seconds_total{<<.LabelMatchers>>,container!="",pod!=""}[4m]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
"nodeQuery": |
|
||||||
|
sum by (<<.GroupBy>>) (
|
||||||
|
irate(
|
||||||
|
node_cpu_usage_seconds_total{<<.LabelMatchers>>}[4m]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
"resources":
|
||||||
|
"overrides":
|
||||||
|
"namespace":
|
||||||
|
"resource": "namespace"
|
||||||
|
"node":
|
||||||
|
"resource": "node"
|
||||||
|
"pod":
|
||||||
|
"resource": "pod"
|
||||||
|
"memory":
|
||||||
|
"containerLabel": "container"
|
||||||
|
"containerQuery": |
|
||||||
|
sum by (<<.GroupBy>>) (
|
||||||
|
container_memory_working_set_bytes{<<.LabelMatchers>>,container!="",pod!=""}
|
||||||
|
)
|
||||||
|
"nodeQuery": |
|
||||||
|
sum by (<<.GroupBy>>) (
|
||||||
|
node_memory_working_set_bytes{<<.LabelMatchers>>}
|
||||||
|
)
|
||||||
|
"resources":
|
||||||
|
"overrides":
|
||||||
|
"node":
|
||||||
|
"resource": "node"
|
||||||
|
"namespace":
|
||||||
|
"resource": "namespace"
|
||||||
|
"pod":
|
||||||
|
"resource": "pod"
|
||||||
|
"window": "5m"
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
app.kubernetes.io/version: 0.12.0
|
||||||
|
name: adapter-config
|
||||||
|
namespace: monitoring
|
||||||
|
|
@ -1,51 +0,0 @@
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: custom-metrics-apiserver
|
|
||||||
name: custom-metrics-apiserver
|
|
||||||
namespace: custom-metrics
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: custom-metrics-apiserver
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: custom-metrics-apiserver
|
|
||||||
name: custom-metrics-apiserver
|
|
||||||
spec:
|
|
||||||
serviceAccountName: custom-metrics-apiserver
|
|
||||||
containers:
|
|
||||||
- name: custom-metrics-apiserver
|
|
||||||
image: directxman12/k8s-prometheus-adapter-amd64
|
|
||||||
args:
|
|
||||||
- --secure-port=6443
|
|
||||||
- --tls-cert-file=/var/run/serving-cert/serving.crt
|
|
||||||
- --tls-private-key-file=/var/run/serving-cert/serving.key
|
|
||||||
- --logtostderr=true
|
|
||||||
- --prometheus-url=http://prometheus.prom.svc:9090/
|
|
||||||
- --metrics-relist-interval=1m
|
|
||||||
- --v=10
|
|
||||||
- --config=/etc/adapter/config.yaml
|
|
||||||
ports:
|
|
||||||
- containerPort: 6443
|
|
||||||
volumeMounts:
|
|
||||||
- mountPath: /var/run/serving-cert
|
|
||||||
name: volume-serving-cert
|
|
||||||
readOnly: true
|
|
||||||
- mountPath: /etc/adapter/
|
|
||||||
name: config
|
|
||||||
readOnly: true
|
|
||||||
- mountPath: /tmp
|
|
||||||
name: tmp-vol
|
|
||||||
volumes:
|
|
||||||
- name: volume-serving-cert
|
|
||||||
secret:
|
|
||||||
secretName: cm-adapter-serving-certs
|
|
||||||
- name: config
|
|
||||||
configMap:
|
|
||||||
name: adapter-config
|
|
||||||
- name: tmp-vol
|
|
||||||
emptyDir: {}
|
|
||||||
|
|
@ -1,12 +0,0 @@
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
metadata:
|
|
||||||
name: custom-metrics-resource-reader
|
|
||||||
roleRef:
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
||||||
kind: ClusterRole
|
|
||||||
name: custom-metrics-resource-reader
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: custom-metrics-apiserver
|
|
||||||
namespace: custom-metrics
|
|
||||||
|
|
@ -1,5 +0,0 @@
|
||||||
kind: ServiceAccount
|
|
||||||
apiVersion: v1
|
|
||||||
metadata:
|
|
||||||
name: custom-metrics-apiserver
|
|
||||||
namespace: custom-metrics
|
|
||||||
|
|
@ -1,11 +0,0 @@
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: custom-metrics-apiserver
|
|
||||||
namespace: custom-metrics
|
|
||||||
spec:
|
|
||||||
ports:
|
|
||||||
- port: 443
|
|
||||||
targetPort: 6443
|
|
||||||
selector:
|
|
||||||
app: custom-metrics-apiserver
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
apiVersion: apiregistration.k8s.io/v1beta1
|
|
||||||
kind: APIService
|
|
||||||
metadata:
|
|
||||||
name: v1beta1.custom.metrics.k8s.io
|
|
||||||
spec:
|
|
||||||
service:
|
|
||||||
name: custom-metrics-apiserver
|
|
||||||
namespace: custom-metrics
|
|
||||||
group: custom.metrics.k8s.io
|
|
||||||
version: v1beta1
|
|
||||||
insecureSkipTLSVerify: true
|
|
||||||
groupPriorityMinimum: 100
|
|
||||||
versionPriority: 100
|
|
||||||
|
|
@ -1,9 +0,0 @@
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
name: custom-metrics-server-resources
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- custom.metrics.k8s.io
|
|
||||||
resources: ["*"]
|
|
||||||
verbs: ["*"]
|
|
||||||
|
|
@ -1,74 +0,0 @@
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: adapter-config
|
|
||||||
namespace: custom-metrics
|
|
||||||
data:
|
|
||||||
config.yaml: |
|
|
||||||
rules:
|
|
||||||
- seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}'
|
|
||||||
seriesFilters: []
|
|
||||||
resources:
|
|
||||||
overrides:
|
|
||||||
namespace:
|
|
||||||
resource: namespace
|
|
||||||
pod_name:
|
|
||||||
resource: pod
|
|
||||||
name:
|
|
||||||
matches: ^container_(.*)_seconds_total$
|
|
||||||
as: ""
|
|
||||||
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[5m]))
|
|
||||||
by (<<.GroupBy>>)
|
|
||||||
- seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}'
|
|
||||||
seriesFilters:
|
|
||||||
- isNot: ^container_.*_seconds_total$
|
|
||||||
resources:
|
|
||||||
overrides:
|
|
||||||
namespace:
|
|
||||||
resource: namespace
|
|
||||||
pod_name:
|
|
||||||
resource: pod
|
|
||||||
name:
|
|
||||||
matches: ^container_(.*)_total$
|
|
||||||
as: ""
|
|
||||||
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[5m]))
|
|
||||||
by (<<.GroupBy>>)
|
|
||||||
- seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}'
|
|
||||||
seriesFilters:
|
|
||||||
- isNot: ^container_.*_total$
|
|
||||||
resources:
|
|
||||||
overrides:
|
|
||||||
namespace:
|
|
||||||
resource: namespace
|
|
||||||
pod_name:
|
|
||||||
resource: pod
|
|
||||||
name:
|
|
||||||
matches: ^container_(.*)$
|
|
||||||
as: ""
|
|
||||||
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}) by (<<.GroupBy>>)
|
|
||||||
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
|
|
||||||
seriesFilters:
|
|
||||||
- isNot: .*_total$
|
|
||||||
resources:
|
|
||||||
template: <<.Resource>>
|
|
||||||
name:
|
|
||||||
matches: ""
|
|
||||||
as: ""
|
|
||||||
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>)
|
|
||||||
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
|
|
||||||
seriesFilters:
|
|
||||||
- isNot: .*_seconds_total
|
|
||||||
resources:
|
|
||||||
template: <<.Resource>>
|
|
||||||
name:
|
|
||||||
matches: ^(.*)_total$
|
|
||||||
as: ""
|
|
||||||
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[5m])) by (<<.GroupBy>>)
|
|
||||||
- seriesQuery: '{namespace!="",__name__!~"^container_.*"}'
|
|
||||||
seriesFilters: []
|
|
||||||
resources:
|
|
||||||
template: <<.Resource>>
|
|
||||||
name:
|
|
||||||
matches: ^(.*)_seconds_total$
|
|
||||||
as: ""
|
|
||||||
metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[5m])) by (<<.GroupBy>>)
|
|
||||||
|
|
@ -1,14 +0,0 @@
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
kind: ClusterRole
|
|
||||||
metadata:
|
|
||||||
name: custom-metrics-resource-reader
|
|
||||||
rules:
|
|
||||||
- apiGroups:
|
|
||||||
- ""
|
|
||||||
resources:
|
|
||||||
- namespaces
|
|
||||||
- pods
|
|
||||||
- services
|
|
||||||
verbs:
|
|
||||||
- get
|
|
||||||
- list
|
|
||||||
89
deploy/manifests/deployment.yaml
Normal file
89
deploy/manifests/deployment.yaml
Normal file
|
|
@ -0,0 +1,89 @@
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
app.kubernetes.io/version: 0.12.0
|
||||||
|
name: prometheus-adapter
|
||||||
|
namespace: monitoring
|
||||||
|
spec:
|
||||||
|
replicas: 2
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
strategy:
|
||||||
|
rollingUpdate:
|
||||||
|
maxSurge: 1
|
||||||
|
maxUnavailable: 1
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
app.kubernetes.io/version: 0.12.0
|
||||||
|
spec:
|
||||||
|
automountServiceAccountToken: true
|
||||||
|
containers:
|
||||||
|
- args:
|
||||||
|
- --cert-dir=/var/run/serving-cert
|
||||||
|
- --config=/etc/adapter/config.yaml
|
||||||
|
- --metrics-relist-interval=1m
|
||||||
|
- --prometheus-url=https://prometheus.monitoring.svc:9090/
|
||||||
|
- --secure-port=6443
|
||||||
|
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA
|
||||||
|
image: registry.k8s.io/prometheus-adapter/prometheus-adapter:v0.12.0
|
||||||
|
livenessProbe:
|
||||||
|
failureThreshold: 5
|
||||||
|
httpGet:
|
||||||
|
path: /livez
|
||||||
|
port: https
|
||||||
|
scheme: HTTPS
|
||||||
|
initialDelaySeconds: 30
|
||||||
|
periodSeconds: 5
|
||||||
|
name: prometheus-adapter
|
||||||
|
ports:
|
||||||
|
- containerPort: 6443
|
||||||
|
name: https
|
||||||
|
readinessProbe:
|
||||||
|
failureThreshold: 5
|
||||||
|
httpGet:
|
||||||
|
path: /readyz
|
||||||
|
port: https
|
||||||
|
scheme: HTTPS
|
||||||
|
initialDelaySeconds: 30
|
||||||
|
periodSeconds: 5
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
cpu: 102m
|
||||||
|
memory: 180Mi
|
||||||
|
securityContext:
|
||||||
|
allowPrivilegeEscalation: false
|
||||||
|
capabilities:
|
||||||
|
drop:
|
||||||
|
- ALL
|
||||||
|
readOnlyRootFilesystem: true
|
||||||
|
terminationMessagePolicy: FallbackToLogsOnError
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /tmp
|
||||||
|
name: tmpfs
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /var/run/serving-cert
|
||||||
|
name: volume-serving-cert
|
||||||
|
readOnly: false
|
||||||
|
- mountPath: /etc/adapter
|
||||||
|
name: config
|
||||||
|
readOnly: false
|
||||||
|
nodeSelector:
|
||||||
|
kubernetes.io/os: linux
|
||||||
|
securityContext: {}
|
||||||
|
serviceAccountName: prometheus-adapter
|
||||||
|
volumes:
|
||||||
|
- emptyDir: {}
|
||||||
|
name: tmpfs
|
||||||
|
- emptyDir: {}
|
||||||
|
name: volume-serving-cert
|
||||||
|
- configMap:
|
||||||
|
name: adapter-config
|
||||||
|
name: config
|
||||||
21
deploy/manifests/network-policy.yaml
Normal file
21
deploy/manifests/network-policy.yaml
Normal file
|
|
@ -0,0 +1,21 @@
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: NetworkPolicy
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
app.kubernetes.io/version: 0.12.0
|
||||||
|
name: prometheus-adapter
|
||||||
|
namespace: monitoring
|
||||||
|
spec:
|
||||||
|
egress:
|
||||||
|
- {}
|
||||||
|
ingress:
|
||||||
|
- {}
|
||||||
|
podSelector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
policyTypes:
|
||||||
|
- Egress
|
||||||
|
- Ingress
|
||||||
15
deploy/manifests/pod-disruption-budget.yaml
Normal file
15
deploy/manifests/pod-disruption-budget.yaml
Normal file
|
|
@ -0,0 +1,15 @@
|
||||||
|
apiVersion: policy/v1
|
||||||
|
kind: PodDisruptionBudget
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
app.kubernetes.io/version: 0.12.0
|
||||||
|
name: prometheus-adapter
|
||||||
|
namespace: monitoring
|
||||||
|
spec:
|
||||||
|
minAvailable: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
|
@ -1,7 +1,11 @@
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: RoleBinding
|
kind: RoleBinding
|
||||||
metadata:
|
metadata:
|
||||||
name: custom-metrics-auth-reader
|
labels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
app.kubernetes.io/version: 0.12.0
|
||||||
|
name: resource-metrics-auth-reader
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
roleRef:
|
roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
|
@ -9,5 +13,5 @@ roleRef:
|
||||||
name: extension-apiserver-authentication-reader
|
name: extension-apiserver-authentication-reader
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: custom-metrics-apiserver
|
name: prometheus-adapter
|
||||||
namespace: custom-metrics
|
namespace: monitoring
|
||||||
10
deploy/manifests/service-account.yaml
Normal file
10
deploy/manifests/service-account.yaml
Normal file
|
|
@ -0,0 +1,10 @@
|
||||||
|
apiVersion: v1
|
||||||
|
automountServiceAccountToken: false
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
app.kubernetes.io/version: 0.12.0
|
||||||
|
name: prometheus-adapter
|
||||||
|
namespace: monitoring
|
||||||
17
deploy/manifests/service.yaml
Normal file
17
deploy/manifests/service.yaml
Normal file
|
|
@ -0,0 +1,17 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
app.kubernetes.io/version: 0.12.0
|
||||||
|
name: prometheus-adapter
|
||||||
|
namespace: monitoring
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- name: https
|
||||||
|
port: 443
|
||||||
|
targetPort: 6443
|
||||||
|
selector:
|
||||||
|
app.kubernetes.io/component: metrics-adapter
|
||||||
|
app.kubernetes.io/name: prometheus-adapter
|
||||||
|
|
@ -2,25 +2,25 @@ Configuration Walkthroughs
|
||||||
==========================
|
==========================
|
||||||
|
|
||||||
*If you're looking for reference documentation on configuration, please
|
*If you're looking for reference documentation on configuration, please
|
||||||
read the the [configuration reference](/docs/config.md)*
|
read the [configuration reference](/docs/config.md)*
|
||||||
|
|
||||||
Per-pod HTTP Requests
|
Per-pod HTTP Requests
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
### Background
|
### Background
|
||||||
|
|
||||||
*The [full walkthrough](/docs/walkthrough.md) sets up a the background for
|
*The [full walkthrough](/docs/walkthrough.md) sets up the background for
|
||||||
something like this*
|
something like this*
|
||||||
|
|
||||||
Suppose we have some frontend webserver, and we're trying to write an
|
Suppose we have some frontend webserver, and we're trying to write a
|
||||||
configuration for the Promtheus adapter so that we can autoscale it based
|
configuration for the Prometheus adapter so that we can autoscale it based
|
||||||
on the HTTP requests per second that it receives.
|
on the HTTP requests per second that it receives.
|
||||||
|
|
||||||
Before starting, we've gone and instrumented our frontend server with
|
Before starting, we've gone and instrumented our frontend server with
|
||||||
a metric, `http_requests_total`. It is exposed with a single label,
|
a metric, `http_requests_total`. It is exposed with a single label,
|
||||||
`method`, breaking down the requests by HTTP verb.
|
`method`, breaking down the requests by HTTP verb.
|
||||||
|
|
||||||
We've configured our Prometheus to collect the metric, and our promethues
|
We've configured our Prometheus to collect the metric, and it
|
||||||
adds the `kubernetes_namespace` and `kubernetes_pod_name` labels,
|
adds the `kubernetes_namespace` and `kubernetes_pod_name` labels,
|
||||||
representing namespace and pod, respectively.
|
representing namespace and pod, respectively.
|
||||||
|
|
||||||
|
|
@ -34,7 +34,7 @@ http_requests_total{method="GET",kubernetes_namespace="production",kubernetes_po
|
||||||
|
|
||||||
The adapter considers metrics in the following ways:
|
The adapter considers metrics in the following ways:
|
||||||
|
|
||||||
1. First, It discovers the metrics available (*Discovery*)
|
1. First, it discovers the metrics available (*Discovery*)
|
||||||
|
|
||||||
2. Then, it figures out which Kubernetes resources each metric is
|
2. Then, it figures out which Kubernetes resources each metric is
|
||||||
associated with (*Association*)
|
associated with (*Association*)
|
||||||
|
|
@ -58,7 +58,7 @@ rules:
|
||||||
If we want to find all `http_requests_total` series ourselves in the
|
If we want to find all `http_requests_total` series ourselves in the
|
||||||
Prometheus dashboard, we'd write
|
Prometheus dashboard, we'd write
|
||||||
`http_requests_total{kubernetes_namespace!="",kubernetes_pod_name!=""}` to
|
`http_requests_total{kubernetes_namespace!="",kubernetes_pod_name!=""}` to
|
||||||
find all find all `http_requests_total` series that were associated with
|
find all `http_requests_total` series that were associated with
|
||||||
a namespace and pod.
|
a namespace and pod.
|
||||||
|
|
||||||
We can add this to our rule in the `seriesQuery` field, to tell the
|
We can add this to our rule in the `seriesQuery` field, to tell the
|
||||||
|
|
@ -99,13 +99,13 @@ all resources currently available in your cluster, you can use the
|
||||||
`kubectl api-resources` command (but the list of available resources can
|
`kubectl api-resources` command (but the list of available resources can
|
||||||
change as you add or remove CRDs or aggregated API servers). For more
|
change as you add or remove CRDs or aggregated API servers). For more
|
||||||
information on resources, see [Kinds, Resources, and
|
information on resources, see [Kinds, Resources, and
|
||||||
Scopes](https://github.com/kubernetes-incubator/custom-metrics-apiserver/blob/master/docs/getting-started.md#kinds-resources-and-scopes)
|
Scopes](https://github.com/kubernetes-sigs/custom-metrics-apiserver/blob/master/docs/getting-started.md#kinds-resources-and-scopes)
|
||||||
in the custom-metrics-apiserver boilerplate guide.
|
in the custom-metrics-apiserver boilerplate guide.
|
||||||
|
|
||||||
Now, cumulative metrics (like those that end in `_total`) aren't
|
Now, cumulative metrics (like those that end in `_total`) aren't
|
||||||
particularly useful for autoscaling, so we want to convert them to rate
|
particularly useful for autoscaling, so we want to convert them to rate
|
||||||
metrics in the API. We'll call the rate version of our metric
|
metrics in the API. We'll call the rate version of our metric
|
||||||
`http_requests_per_second`. We can use the the `name` field to tell the
|
`http_requests_per_second`. We can use the `name` field to tell the
|
||||||
adapter about that:
|
adapter about that:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
|
|
|
||||||
|
|
@ -31,13 +31,13 @@ might look like:
|
||||||
```yaml
|
```yaml
|
||||||
rules:
|
rules:
|
||||||
# this rule matches cumulative cAdvisor metrics measured in seconds
|
# this rule matches cumulative cAdvisor metrics measured in seconds
|
||||||
- seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}'
|
- seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}'
|
||||||
resources:
|
resources:
|
||||||
# skip specifying generic resource<->label mappings, and just
|
# skip specifying generic resource<->label mappings, and just
|
||||||
# attach only pod and namespace resources by mapping label names to group-resources
|
# attach only pod and namespace resources by mapping label names to group-resources
|
||||||
overrides:
|
overrides:
|
||||||
namespace: {resource: "namespace"},
|
namespace: {resource: "namespace"}
|
||||||
pod_name: {resource: "pod"},
|
pod: {resource: "pod"}
|
||||||
# specify that the `container_` and `_seconds_total` suffixes should be removed.
|
# specify that the `container_` and `_seconds_total` suffixes should be removed.
|
||||||
# this also introduces an implicit filter on metric family names
|
# this also introduces an implicit filter on metric family names
|
||||||
name:
|
name:
|
||||||
|
|
@ -48,7 +48,7 @@ rules:
|
||||||
# This is a Go template where the `.Series` and `.LabelMatchers` string values
|
# This is a Go template where the `.Series` and `.LabelMatchers` string values
|
||||||
# are available, and the delimiters are `<<` and `>>` to avoid conflicts with
|
# are available, and the delimiters are `<<` and `>>` to avoid conflicts with
|
||||||
# the prometheus query language
|
# the prometheus query language
|
||||||
metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[2m])) by (<<.GroupBy>>)"
|
metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[2m])) by (<<.GroupBy>>)"
|
||||||
```
|
```
|
||||||
|
|
||||||
Discovery
|
Discovery
|
||||||
|
|
@ -83,9 +83,9 @@ For example:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# match all cAdvisor metrics that aren't measured in seconds
|
# match all cAdvisor metrics that aren't measured in seconds
|
||||||
seriesQuery: '{__name__=~"^container_.*_total",container_name!="POD",namespace!="",pod_name!=""}'
|
seriesQuery: '{__name__=~"^container_.*_total",container!="POD",namespace!="",pod!=""}'
|
||||||
seriesFilters:
|
seriesFilters:
|
||||||
isNot: "^container_.*_seconds_total"
|
- isNot: "^container_.*_seconds_total"
|
||||||
```
|
```
|
||||||
|
|
||||||
Association
|
Association
|
||||||
|
|
@ -119,7 +119,7 @@ group-resource. For instance:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# the microservice label corresponds to the apps.deployment resource
|
# the microservice label corresponds to the apps.deployment resource
|
||||||
resource:
|
resources:
|
||||||
overrides:
|
overrides:
|
||||||
microservice: {group: "apps", resource: "deployment"}
|
microservice: {group: "apps", resource: "deployment"}
|
||||||
```
|
```
|
||||||
|
|
@ -134,7 +134,7 @@ Naming
|
||||||
------
|
------
|
||||||
|
|
||||||
Naming governs the process of converting a Prometheus metric name into
|
Naming governs the process of converting a Prometheus metric name into
|
||||||
a metric in the custom metrics API, and vice version. It's controlled by
|
a metric in the custom metrics API, and vice versa. It's controlled by
|
||||||
the `name` field.
|
the `name` field.
|
||||||
|
|
||||||
Naming is controlled by specifying a pattern to extract an API name from
|
Naming is controlled by specifying a pattern to extract an API name from
|
||||||
|
|
@ -179,7 +179,7 @@ template:
|
||||||
group-resource, plus the label for namespace, if the group-resource is
|
group-resource, plus the label for namespace, if the group-resource is
|
||||||
namespaced.
|
namespaced.
|
||||||
- `GroupBy`: a comma-separated list of labels to group by. Currently,
|
- `GroupBy`: a comma-separated list of labels to group by. Currently,
|
||||||
this contains the group-resoure label used in `LabelMarchers`.
|
this contains the group-resource label used in `LabelMatchers`.
|
||||||
|
|
||||||
For instance, suppose we had a series `http_requests_total` (exposed as
|
For instance, suppose we had a series `http_requests_total` (exposed as
|
||||||
`http_requests_per_second` in the API) with labels `service`, `pod`,
|
`http_requests_per_second` in the API) with labels `service`, `pod`,
|
||||||
|
|
@ -211,5 +211,5 @@ For example:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
# convert cumulative cAdvisor metrics into rates calculated over 2 minutes
|
# convert cumulative cAdvisor metrics into rates calculated over 2 minutes
|
||||||
metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[2m])) by (<<.GroupBy>>)"
|
metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[2m])) by (<<.GroupBy>>)"
|
||||||
```
|
```
|
||||||
|
|
|
||||||
84
docs/externalmetrics.md
Normal file
84
docs/externalmetrics.md
Normal file
|
|
@ -0,0 +1,84 @@
|
||||||
|
External Metrics
|
||||||
|
===========
|
||||||
|
|
||||||
|
It's possible to configure [Autoscaling on metrics not related to Kubernetes objects](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#autoscaling-on-metrics-not-related-to-kubernetes-objects) in Kubernetes. This is done with a special `External Metrics` system. Using external metrics in Kubernetes with the adapter requires you to configure special `external` rules in the configuration.
|
||||||
|
|
||||||
|
The configuration for `external` metrics rules is almost identical to the normal `rules`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
externalRules:
|
||||||
|
- seriesQuery: '{__name__="queue_consumer_lag",name!=""}'
|
||||||
|
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (name)
|
||||||
|
resources:
|
||||||
|
overrides: { namespace: {resource: "namespace"} }
|
||||||
|
```
|
||||||
|
|
||||||
|
Namespacing
|
||||||
|
-----------
|
||||||
|
|
||||||
|
All Kubernetes Horizontal Pod Autoscaler (HPA) resources are namespaced. And when you create an HPA that
|
||||||
|
references an external metric the adapter will automatically add a `namespace` label to the `seriesQuery` you have configured.
|
||||||
|
|
||||||
|
This is done because the External Merics API Specification *requires* a namespace component in the URL:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
kubectl get --raw "/apis/external.metrics.k8s.io/v1beta1/namespaces/default/queue_consumer_lag"
|
||||||
|
```
|
||||||
|
|
||||||
|
Cross-Namespace or No Namespace Queries
|
||||||
|
---------------------------------------
|
||||||
|
|
||||||
|
A semi-common scenario is to have a `workload` in one namespace that needs to scale based on a metric from a different namespace. This is normally not
|
||||||
|
possible with `external` rules because the `namespace` label is set to match that of the source `workload`.
|
||||||
|
|
||||||
|
However, you can explicitly disable the automatic add of the HPA namepace to the query, and instead opt to not set a namespace at all, or to target a different namespace.
|
||||||
|
|
||||||
|
This is done by setting `namespaced: false` in the `resources` section of the `external` rule:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# rules: ...
|
||||||
|
|
||||||
|
externalRules:
|
||||||
|
- seriesQuery: '{__name__="queue_depth",name!=""}'
|
||||||
|
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (name)
|
||||||
|
resources:
|
||||||
|
namespaced: false
|
||||||
|
```
|
||||||
|
|
||||||
|
Given the `external` rules defined above any `External` metric query for `queue_depth` will simply ignore the source `namespace` of the HPA. This allows you to explicilty not put a namespace into an external query, or to set the namespace to one that might be different from that of the HPA.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: autoscaling/v1
|
||||||
|
kind: HorizontalPodAutoscaler
|
||||||
|
metadata:
|
||||||
|
name: external-queue-scaler
|
||||||
|
# the HPA and scaleTargetRef must exist in a namespace
|
||||||
|
namespace: default
|
||||||
|
annotations:
|
||||||
|
# The "External" metric below targets a metricName that has namespaced=false
|
||||||
|
# and this allows the metric to explicitly query a different
|
||||||
|
# namespace than that of the HPA and scaleTargetRef
|
||||||
|
autoscaling.alpha.kubernetes.io/metrics: |
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"type": "External",
|
||||||
|
"external": {
|
||||||
|
"metricName": "queue_depth",
|
||||||
|
"metricSelector": {
|
||||||
|
"matchLabels": {
|
||||||
|
"namespace": "queue",
|
||||||
|
"name": "my-sample-queue"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targetAverageValue": "50"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
spec:
|
||||||
|
maxReplicas: 5
|
||||||
|
minReplicas: 1
|
||||||
|
scaleTargetRef:
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
name: my-app
|
||||||
|
```
|
||||||
|
|
@ -10,13 +10,13 @@ rules:
|
||||||
# can be found in pkg/config/default.go
|
# can be found in pkg/config/default.go
|
||||||
|
|
||||||
# this rule matches cumulative cAdvisor metrics measured in seconds
|
# this rule matches cumulative cAdvisor metrics measured in seconds
|
||||||
- seriesQuery: '{__name__=~"^container_.*",container_name!="POD",namespace!="",pod_name!=""}'
|
- seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}'
|
||||||
resources:
|
resources:
|
||||||
# skip specifying generic resource<->label mappings, and just
|
# skip specifying generic resource<->label mappings, and just
|
||||||
# attach only pod and namespace resources by mapping label names to group-resources
|
# attach only pod and namespace resources by mapping label names to group-resources
|
||||||
overrides:
|
overrides:
|
||||||
namespace: {resource: "namespace"},
|
namespace: {resource: "namespace"}
|
||||||
pod_name: {resource: "pod"},
|
pod: {resource: "pod"}
|
||||||
# specify that the `container_` and `_seconds_total` suffixes should be removed.
|
# specify that the `container_` and `_seconds_total` suffixes should be removed.
|
||||||
# this also introduces an implicit filter on metric family names
|
# this also introduces an implicit filter on metric family names
|
||||||
name:
|
name:
|
||||||
|
|
@ -27,19 +27,19 @@ rules:
|
||||||
# This is a Go template where the `.Series` and `.LabelMatchers` string values
|
# This is a Go template where the `.Series` and `.LabelMatchers` string values
|
||||||
# are available, and the delimiters are `<<` and `>>` to avoid conflicts with
|
# are available, and the delimiters are `<<` and `>>` to avoid conflicts with
|
||||||
# the prometheus query language
|
# the prometheus query language
|
||||||
metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[2m])) by (<<.GroupBy>>)"
|
metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[2m])) by (<<.GroupBy>>)"
|
||||||
|
|
||||||
# this rule matches cumulative cAdvisor metrics not measured in seconds
|
# this rule matches cumulative cAdvisor metrics not measured in seconds
|
||||||
- seriesQuery: '{__name__=~"^container_.*_total",container_name!="POD",namespace!="",pod_name!=""}'
|
- seriesQuery: '{__name__=~"^container_.*_total",container!="POD",namespace!="",pod!=""}'
|
||||||
resources:
|
resources:
|
||||||
overrides:
|
overrides:
|
||||||
namespace: {resource: "namespace"},
|
namespace: {resource: "namespace"}
|
||||||
pod_name: {resource: "pod"},
|
pod: {resource: "pod"}
|
||||||
seriesFilters:
|
seriesFilters:
|
||||||
# since this is a superset of the query above, we introduce an additional filter here
|
# since this is a superset of the query above, we introduce an additional filter here
|
||||||
- isNot: "^container_.*_seconds_total$"
|
- isNot: "^container_.*_seconds_total$"
|
||||||
name: {matches: "^container_(.*)_total$"}
|
name: {matches: "^container_(.*)_total$"}
|
||||||
metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[2m])) by (<<.GroupBy>>)"
|
metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[2m])) by (<<.GroupBy>>)"
|
||||||
|
|
||||||
# this rule matches cumulative non-cAdvisor metrics
|
# this rule matches cumulative non-cAdvisor metrics
|
||||||
- seriesQuery: '{namespace!="",__name__!="^container_.*"}'
|
- seriesQuery: '{namespace!="",__name__!="^container_.*"}'
|
||||||
|
|
@ -52,7 +52,7 @@ rules:
|
||||||
# Group will be converted to a form acceptible for use as a label automatically.
|
# Group will be converted to a form acceptible for use as a label automatically.
|
||||||
template: "<<.Resource>>"
|
template: "<<.Resource>>"
|
||||||
# if we wanted to, we could also specify overrides here
|
# if we wanted to, we could also specify overrides here
|
||||||
metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container_name!="POD"}[2m])) by (<<.GroupBy>>)"
|
metricsQuery: "sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[2m])) by (<<.GroupBy>>)"
|
||||||
|
|
||||||
# this rule matches only a single metric, explicitly naming it something else
|
# this rule matches only a single metric, explicitly naming it something else
|
||||||
# It's series query *must* return only a single metric family
|
# It's series query *must* return only a single metric family
|
||||||
|
|
@ -63,7 +63,21 @@ rules:
|
||||||
overrides:
|
overrides:
|
||||||
# this should still resolve in our cluster
|
# this should still resolve in our cluster
|
||||||
brand: {group: "cheese.io", resource: "brand"}
|
brand: {group: "cheese.io", resource: "brand"}
|
||||||
metricQuery: 'count(cheddar{sharp="true"})'
|
metricsQuery: 'count(cheddar{sharp="true"})'
|
||||||
|
|
||||||
|
# external rules are not tied to a Kubernetes resource and can reference any metric
|
||||||
|
# https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#autoscaling-on-metrics-not-related-to-kubernetes-objects
|
||||||
|
externalRules:
|
||||||
|
- seriesQuery: '{__name__="queue_consumer_lag",name!=""}'
|
||||||
|
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (name)
|
||||||
|
- seriesQuery: '{__name__="queue_depth",topic!=""}'
|
||||||
|
metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (name)
|
||||||
|
# Kubernetes metric queries include a namespace in the query by default
|
||||||
|
# but you can explicitly disable namespaces if needed with "namespaced: false"
|
||||||
|
# this is useful if you have an HPA with an external metric in namespace A
|
||||||
|
# but want to query for metrics from namespace B
|
||||||
|
resources:
|
||||||
|
namespaced: false
|
||||||
|
|
||||||
# TODO: should we be able to map to a constant instance of a resource
|
# TODO: should we be able to map to a constant instance of a resource
|
||||||
# (e.g. `resources: {constant: [{resource: "namespace", name: "kube-system"}}]`)?
|
# (e.g. `resources: {constant: [{resource: "namespace", name: "kube-system"}}]`)?
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ Detailed instructions can be found in the Kubernetes documentation under
|
||||||
[Horizontal Pod
|
[Horizontal Pod
|
||||||
Autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics).
|
Autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics).
|
||||||
|
|
||||||
Make sure that you've properly configured metrics-server (as is default in
|
Make sure that you've properly configured metrics-server (as default in
|
||||||
Kubernetes 1.9+), or enabling custom metrics autoscaling support will
|
Kubernetes 1.9+), or enabling custom metrics autoscaling support will
|
||||||
disable CPU autoscaling support.
|
disable CPU autoscaling support.
|
||||||
|
|
||||||
|
|
@ -34,21 +34,24 @@ significantly different.
|
||||||
In order to follow this walkthrough, you'll need container images for
|
In order to follow this walkthrough, you'll need container images for
|
||||||
Prometheus and the custom metrics adapter.
|
Prometheus and the custom metrics adapter.
|
||||||
|
|
||||||
It's easiest to deploy Prometheus with the [Prometheus
|
The [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator),
|
||||||
Operator](https://coreos.com/operators/prometheus/docs/latest/), which
|
|
||||||
makes it easy to get up and running with Prometheus. This walkthrough
|
makes it easy to get up and running with Prometheus. This walkthrough
|
||||||
will assume you're planning on doing that -- if you've deployed it by hand
|
will assume you're planning on doing that -- if you've deployed it by hand
|
||||||
instead, you'll need to make a few adjustments to the way you expose
|
instead, you'll need to make a few adjustments to the way you expose
|
||||||
metrics to Prometheus.
|
metrics to Prometheus.
|
||||||
|
|
||||||
The adapter has different images for each arch, and can be found at
|
The adapter has different images for each arch, which can be found at
|
||||||
`directxman12/k8s-prometheus-adapter-${ARCH}`. For instance, if you're on
|
`gcr.io/k8s-staging-prometheus-adapter/prometheus-adapter-${ARCH}`. For
|
||||||
an x86_64 machine, use the `directxman12/k8s-prometheus-adapter-amd64`
|
instance, if you're on an x86_64 machine, use
|
||||||
image.
|
`gcr.io/k8s-staging-prometheus-adapter/prometheus-adapter-amd64` image.
|
||||||
|
|
||||||
If you're feeling adventurous, you can build the latest version of the
|
There is also an official multi arch image available at
|
||||||
custom metrics adapter by running `make docker-build` or `make
|
`registry.k8s.io/prometheus-adapter/prometheus-adapter:${VERSION}`.
|
||||||
build-local-image`.
|
|
||||||
|
|
||||||
|
If you're feeling adventurous, you can build the latest version of
|
||||||
|
prometheus-adapter by running `make container` or get the latest image from the
|
||||||
|
staging registry `gcr.io/k8s-staging-prometheus-adapter/prometheus-adapter`.
|
||||||
|
|
||||||
Special thanks to [@luxas](https://github.com/luxas) for providing the
|
Special thanks to [@luxas](https://github.com/luxas) for providing the
|
||||||
demo application for this walkthrough.
|
demo application for this walkthrough.
|
||||||
|
|
@ -90,14 +93,38 @@ spec:
|
||||||
name: metrics-provider
|
name: metrics-provider
|
||||||
ports:
|
ports:
|
||||||
- name: http
|
- name: http
|
||||||
port: 8080
|
containerPort: 8080
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
|
||||||
|
<summary>sample-app.service.yaml</summary>
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: sample-app
|
||||||
|
name: sample-app
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
port: 80
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 8080
|
||||||
|
selector:
|
||||||
|
app: sample-app
|
||||||
|
type: ClusterIP
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ kubectl create -f sample-app.deploy.yaml
|
$ kubectl create -f sample-app.deploy.yaml
|
||||||
$ kubectl create service clusterip sample-app --tcp=80:8080
|
$ kubectl create -f sample-app.service.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
Now, check your app, which exposes metrics and counts the number of
|
Now, check your app, which exposes metrics and counts the number of
|
||||||
|
|
@ -115,11 +142,11 @@ a HorizontalPodAutoscaler like this to accomplish the autoscaling:
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
|
|
||||||
<summary>sample-app-hpa.yaml</summary>
|
<summary>sample-app.hpa.yaml</summary>
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
kind: HorizontalPodAutoscaler
|
kind: HorizontalPodAutoscaler
|
||||||
apiVersion: autoscaling/v2beta1
|
apiVersion: autoscaling/v2
|
||||||
metadata:
|
metadata:
|
||||||
name: sample-app
|
name: sample-app
|
||||||
spec:
|
spec:
|
||||||
|
|
@ -138,10 +165,13 @@ spec:
|
||||||
- type: Pods
|
- type: Pods
|
||||||
pods:
|
pods:
|
||||||
# use the metric that you used above: pods/http_requests
|
# use the metric that you used above: pods/http_requests
|
||||||
metricName: http_requests
|
metric:
|
||||||
|
name: http_requests
|
||||||
# target 500 milli-requests per second,
|
# target 500 milli-requests per second,
|
||||||
# which is 1 request every two seconds
|
# which is 1 request every two seconds
|
||||||
targetAverageValue: 500m
|
target:
|
||||||
|
type: Value
|
||||||
|
averageValue: 500m
|
||||||
```
|
```
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
@ -149,7 +179,7 @@ spec:
|
||||||
If you try creating that now (and take a look at your controller-manager
|
If you try creating that now (and take a look at your controller-manager
|
||||||
logs), you'll see that the that the HorizontalPodAutoscaler controller is
|
logs), you'll see that the that the HorizontalPodAutoscaler controller is
|
||||||
attempting to fetch metrics from
|
attempting to fetch metrics from
|
||||||
`/apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/*/http_requests?selector=app%3Dsample-app`,
|
`/apis/custom.metrics.k8s.io/v1beta2/namespaces/default/pods/*/http_requests?selector=app%3Dsample-app`,
|
||||||
but right now, nothing's serving that API.
|
but right now, nothing's serving that API.
|
||||||
|
|
||||||
Before you can autoscale your application, you'll need to make sure that
|
Before you can autoscale your application, you'll need to make sure that
|
||||||
|
|
@ -166,15 +196,15 @@ Prometheus adapter to serve metrics out of Prometheus.
|
||||||
### Launching Prometheus
|
### Launching Prometheus
|
||||||
|
|
||||||
First, you'll need to deploy the Prometheus Operator. Check out the
|
First, you'll need to deploy the Prometheus Operator. Check out the
|
||||||
[getting started
|
[quick start
|
||||||
guide](https://coreos.com/operators/prometheus/docs/latest/user-guides/getting-started.html)
|
guide](https://github.com/prometheus-operator/prometheus-operator#quickstart)
|
||||||
for the Operator to deploy a copy of Prometheus.
|
for the Operator to deploy a copy of Prometheus.
|
||||||
|
|
||||||
This walkthrough assumes that Prometheus is deployed in the `prom`
|
This walkthrough assumes that Prometheus is deployed in the `monitoring`
|
||||||
namespace. Most of the sample commands and files are namespace-agnostic,
|
namespace. Most of the sample commands and files are namespace-agnostic,
|
||||||
but there are a few commands or pieces of configuration that rely on
|
but there are a few commands or pieces of configuration that rely on that
|
||||||
namespace. If you're using a different namespace, simply substitute that
|
namespace. If you're using a different namespace, simply substitute that
|
||||||
in for `prom` when it appears.
|
in for `monitoring` when it appears.
|
||||||
|
|
||||||
### Monitoring Your Application
|
### Monitoring Your Application
|
||||||
|
|
||||||
|
|
@ -186,7 +216,7 @@ service:
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
|
|
||||||
<summary>service-monitor.yaml</summary>
|
<summary>sample-app.monitor.yaml</summary>
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
kind: ServiceMonitor
|
kind: ServiceMonitor
|
||||||
|
|
@ -206,18 +236,18 @@ spec:
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ kubectl create -f service-monitor.yaml
|
$ kubectl create -f sample-app.monitor.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
Now, you should see your metrics appear in your Prometheus instance. Look
|
Now, you should see your metrics (`http_requests_total`) appear in your Prometheus instance. Look
|
||||||
them up via the dashboard, and make sure they have the `namespace` and
|
them up via the dashboard, and make sure they have the `namespace` and
|
||||||
`pod` labels.
|
`pod` labels. If not, check the labels on the service monitor match the ones on the Prometheus CRD.
|
||||||
|
|
||||||
### Launching the Adapter
|
### Launching the Adapter
|
||||||
|
|
||||||
Now that you've got a running copy of Prometheus that's monitoring your
|
Now that you've got a running copy of Prometheus that's monitoring your
|
||||||
application, you'll need to deploy the adapter, which knows how to
|
application, you'll need to deploy the adapter, which knows how to
|
||||||
communicate with both Kubernetes and Promethues, acting as a translator
|
communicate with both Kubernetes and Prometheus, acting as a translator
|
||||||
between the two.
|
between the two.
|
||||||
|
|
||||||
The [deploy/manifests](/deploy/manifests) directory contains the
|
The [deploy/manifests](/deploy/manifests) directory contains the
|
||||||
|
|
@ -229,7 +259,46 @@ the steps to deploy the adapter. Note that if you're deploying on
|
||||||
a non-x86_64 (amd64) platform, you'll need to change the `image` field in
|
a non-x86_64 (amd64) platform, you'll need to change the `image` field in
|
||||||
the Deployment to be the appropriate image for your platform.
|
the Deployment to be the appropriate image for your platform.
|
||||||
|
|
||||||
The default adapter configuration should work for this walkthrough and
|
However an update to the adapter config is necessary in order to
|
||||||
|
expose custom metrics.
|
||||||
|
|
||||||
|
<details>
|
||||||
|
|
||||||
|
<summary>prom-adapter.config.yaml</summary>
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: adapter-config
|
||||||
|
namespace: monitoring
|
||||||
|
data:
|
||||||
|
config.yaml: |-
|
||||||
|
"rules":
|
||||||
|
- "seriesQuery": |
|
||||||
|
{namespace!="",__name__!~"^container_.*"}
|
||||||
|
"resources":
|
||||||
|
"template": "<<.Resource>>"
|
||||||
|
"name":
|
||||||
|
"matches": "^(.*)_total"
|
||||||
|
"as": ""
|
||||||
|
"metricsQuery": |
|
||||||
|
sum by (<<.GroupBy>>) (
|
||||||
|
irate (
|
||||||
|
<<.Series>>{<<.LabelMatchers>>}[1m]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ kubectl apply -f prom-adapter.config.yaml
|
||||||
|
# Restart prom-adapter pods
|
||||||
|
$ kubectl rollout restart deployment prometheus-adapter -n monitoring
|
||||||
|
```
|
||||||
|
|
||||||
|
This adapter configuration should work for this walkthrough together with
|
||||||
a standard Prometheus Operator configuration, but if you've got custom
|
a standard Prometheus Operator configuration, but if you've got custom
|
||||||
relabelling rules, or your labels above weren't exactly `namespace` and
|
relabelling rules, or your labels above weren't exactly `namespace` and
|
||||||
`pod`, you may need to edit the configuration in the ConfigMap. The
|
`pod`, you may need to edit the configuration in the ConfigMap. The
|
||||||
|
|
@ -238,11 +307,36 @@ overview of how configuration works.
|
||||||
|
|
||||||
### The Registered API
|
### The Registered API
|
||||||
|
|
||||||
As part of the creation of the adapter Deployment and associated objects
|
We also need to register the custom metrics API with the API aggregator (part of
|
||||||
(performed above), we registered the API with the API aggregator (part of
|
the main Kubernetes API server). For that we need to create an APIService resource
|
||||||
the main Kubernetes API server).
|
|
||||||
|
|
||||||
The API is registered as `custom.metrics.k8s.io/v1beta1`, and you can find
|
<details>
|
||||||
|
|
||||||
|
<summary>api-service.yaml</summary>
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
apiVersion: apiregistration.k8s.io/v1
|
||||||
|
kind: APIService
|
||||||
|
metadata:
|
||||||
|
name: v1beta2.custom.metrics.k8s.io
|
||||||
|
spec:
|
||||||
|
group: custom.metrics.k8s.io
|
||||||
|
groupPriorityMinimum: 100
|
||||||
|
insecureSkipTLSVerify: true
|
||||||
|
service:
|
||||||
|
name: prometheus-adapter
|
||||||
|
namespace: monitoring
|
||||||
|
version: v1beta2
|
||||||
|
versionPriority: 100
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ kubectl create -f api-service.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
The API is registered as `custom.metrics.k8s.io/v1beta2`, and you can find
|
||||||
more information about aggregation at [Concepts:
|
more information about aggregation at [Concepts:
|
||||||
Aggregation](https://github.com/kubernetes-incubator/apiserver-builder/blob/master/docs/concepts/aggregation.md).
|
Aggregation](https://github.com/kubernetes-incubator/apiserver-builder/blob/master/docs/concepts/aggregation.md).
|
||||||
|
|
||||||
|
|
@ -253,7 +347,7 @@ With that all set, your custom metrics API should show up in discovery.
|
||||||
Try fetching the discovery information for it:
|
Try fetching the discovery information for it:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1
|
$ kubectl get --raw /apis/custom.metrics.k8s.io/v1beta2
|
||||||
```
|
```
|
||||||
|
|
||||||
Since you've set up Prometheus to collect your app's metrics, you should
|
Since you've set up Prometheus to collect your app's metrics, you should
|
||||||
|
|
@ -267,12 +361,12 @@ sends a raw GET request to the Kubernetes API server, automatically
|
||||||
injecting auth information:
|
injecting auth information:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ kubectl get --raw "/apis/custom.metrics.k8s.io/v1beta1/namespaces/default/pods/*/http_requests?selector=app%3Dsample-app"
|
$ kubectl get --raw "/apis/custom.metrics.k8s.io/v1beta2/namespaces/default/pods/*/http_requests?selector=app%3Dsample-app"
|
||||||
```
|
```
|
||||||
|
|
||||||
Because of the adapter's configuration, the cumulative metric
|
Because of the adapter's configuration, the cumulative metric
|
||||||
`http_requests_total` has been converted into a rate metric,
|
`http_requests_total` has been converted into a rate metric,
|
||||||
`pods/http_requests`, which measures requests per second over a 2 minute
|
`pods/http_requests`, which measures requests per second over a 1 minute
|
||||||
interval. The value should currently be close to zero, since there's no
|
interval. The value should currently be close to zero, since there's no
|
||||||
traffic to your app, except for the regular metrics collection from
|
traffic to your app, except for the regular metrics collection from
|
||||||
Prometheus.
|
Prometheus.
|
||||||
|
|
@ -323,7 +417,7 @@ and make decisions based on it.
|
||||||
If you didn't create the HorizontalPodAutoscaler above, create it now:
|
If you didn't create the HorizontalPodAutoscaler above, create it now:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ kubectl create -f sample-app-hpa.yaml
|
$ kubectl create -f sample-app.hpa.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
Wait a little bit, and then examine the HPA:
|
Wait a little bit, and then examine the HPA:
|
||||||
|
|
@ -369,4 +463,4 @@ setting different labels or using the `Object` metric source type.
|
||||||
|
|
||||||
For more information on how metrics are exposed by the Prometheus adapter,
|
For more information on how metrics are exposed by the Prometheus adapter,
|
||||||
see [config documentation](/docs/config.md), and check the [default
|
see [config documentation](/docs/config.md), and check the [default
|
||||||
configuration](/deploy/manifests/custom-metrics-config-map.yaml).
|
configuration](/deploy/manifests/config-map.yaml).
|
||||||
|
|
|
||||||
118
go.mod
Normal file
118
go.mod
Normal file
|
|
@ -0,0 +1,118 @@
|
||||||
|
module sigs.k8s.io/prometheus-adapter
|
||||||
|
|
||||||
|
go 1.22.1
|
||||||
|
|
||||||
|
toolchain go1.22.2
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/onsi/ginkgo v1.16.5
|
||||||
|
github.com/onsi/gomega v1.33.1
|
||||||
|
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.73.2
|
||||||
|
github.com/prometheus-operator/prometheus-operator/pkg/client v0.73.2
|
||||||
|
github.com/prometheus/client_golang v1.18.0
|
||||||
|
github.com/prometheus/common v0.46.0
|
||||||
|
github.com/spf13/cobra v1.8.0
|
||||||
|
github.com/stretchr/testify v1.9.0
|
||||||
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
|
k8s.io/api v0.30.0
|
||||||
|
k8s.io/apimachinery v0.30.0
|
||||||
|
k8s.io/apiserver v0.30.0
|
||||||
|
k8s.io/client-go v0.30.0
|
||||||
|
k8s.io/component-base v0.30.0
|
||||||
|
k8s.io/klog/v2 v2.120.1
|
||||||
|
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f
|
||||||
|
k8s.io/metrics v0.30.0
|
||||||
|
sigs.k8s.io/custom-metrics-apiserver v1.30.0
|
||||||
|
sigs.k8s.io/metrics-server v0.7.1
|
||||||
|
)
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||||
|
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||||
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||||
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||||
|
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
|
github.com/coreos/go-semver v0.3.1 // indirect
|
||||||
|
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
|
github.com/emicklei/go-restful/v3 v3.12.0 // indirect
|
||||||
|
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
|
||||||
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
|
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||||
|
github.com/go-logr/logr v1.4.1 // indirect
|
||||||
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
|
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||||
|
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||||
|
github.com/go-openapi/swag v0.23.0 // indirect
|
||||||
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
|
github.com/golang/protobuf v1.5.4 // indirect
|
||||||
|
github.com/google/cel-go v0.17.8 // indirect
|
||||||
|
github.com/google/gnostic-models v0.6.8 // indirect
|
||||||
|
github.com/google/go-cmp v0.6.0 // indirect
|
||||||
|
github.com/google/gofuzz v1.2.0 // indirect
|
||||||
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 // indirect
|
||||||
|
github.com/imdario/mergo v0.3.16 // indirect
|
||||||
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
|
github.com/mailru/easyjson v0.7.7 // indirect
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||||
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
|
github.com/nxadm/tail v1.4.8 // indirect
|
||||||
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
|
github.com/prometheus/client_model v0.5.0 // indirect
|
||||||
|
github.com/prometheus/procfs v0.12.0 // indirect
|
||||||
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
|
github.com/stoewer/go-strcase v1.3.0 // indirect
|
||||||
|
go.etcd.io/etcd/api/v3 v3.5.11 // indirect
|
||||||
|
go.etcd.io/etcd/client/pkg/v3 v3.5.11 // indirect
|
||||||
|
go.etcd.io/etcd/client/v3 v3.5.11 // indirect
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
|
||||||
|
go.opentelemetry.io/otel v1.21.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/metric v1.21.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/sdk v1.21.0 // indirect
|
||||||
|
go.opentelemetry.io/otel/trace v1.21.0 // indirect
|
||||||
|
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||||
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
|
go.uber.org/zap v1.26.0 // indirect
|
||||||
|
golang.org/x/crypto v0.31.0 // indirect
|
||||||
|
golang.org/x/exp v0.0.0-20231226003508-02704c960a9b // indirect
|
||||||
|
golang.org/x/mod v0.17.0 // indirect
|
||||||
|
golang.org/x/net v0.25.0 // indirect
|
||||||
|
golang.org/x/oauth2 v0.18.0 // indirect
|
||||||
|
golang.org/x/sync v0.10.0 // indirect
|
||||||
|
golang.org/x/sys v0.28.0 // indirect
|
||||||
|
golang.org/x/term v0.27.0 // indirect
|
||||||
|
golang.org/x/text v0.21.0 // indirect
|
||||||
|
golang.org/x/time v0.5.0 // indirect
|
||||||
|
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
|
||||||
|
google.golang.org/appengine v1.6.8 // indirect
|
||||||
|
google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 // indirect
|
||||||
|
google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 // indirect
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect
|
||||||
|
google.golang.org/grpc v1.60.1 // indirect
|
||||||
|
google.golang.org/protobuf v1.33.0 // indirect
|
||||||
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
k8s.io/apiextensions-apiserver v0.29.3 // indirect
|
||||||
|
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect
|
||||||
|
k8s.io/kms v0.30.0 // indirect
|
||||||
|
k8s.io/utils v0.0.0-20240423183400-0849a56e8f22 // indirect
|
||||||
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect
|
||||||
|
sigs.k8s.io/controller-runtime v0.17.2 // indirect
|
||||||
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||||
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||||
|
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||||
|
)
|
||||||
383
go.sum
Normal file
383
go.sum
Normal file
|
|
@ -0,0 +1,383 @@
|
||||||
|
cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM=
|
||||||
|
cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk=
|
||||||
|
cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI=
|
||||||
|
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||||
|
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||||
|
github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
|
||||||
|
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||||
|
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||||
|
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
|
||||||
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||||
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||||
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
|
github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM=
|
||||||
|
github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ=
|
||||||
|
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||||
|
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
|
||||||
|
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
|
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||||
|
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||||
|
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||||
|
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
|
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
|
github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk=
|
||||||
|
github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
|
||||||
|
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
|
||||||
|
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
|
||||||
|
github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||||
|
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||||
|
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
|
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||||
|
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||||
|
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
|
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||||
|
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
|
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
|
||||||
|
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
|
||||||
|
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||||
|
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||||
|
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
|
||||||
|
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
|
||||||
|
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||||
|
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||||
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||||
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||||
|
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||||
|
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||||
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
|
||||||
|
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||||
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||||
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
|
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||||
|
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||||
|
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||||
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
|
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||||
|
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||||
|
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||||
|
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||||
|
github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto=
|
||||||
|
github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
|
||||||
|
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||||
|
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||||
|
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||||
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
|
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||||
|
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
|
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||||
|
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
|
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
|
||||||
|
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||||
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||||
|
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
|
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||||
|
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||||
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
|
||||||
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1 h1:6UKoz5ujsI55KNpsJH3UwCq3T8kKbZwNZBNPuTTje8U=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.1/go.mod h1:YvJ2f6MplWDhfxiUC3KpyTy76kYUZA4W3pTv/wdKQ9Y=
|
||||||
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
|
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||||
|
github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||||
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
|
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
|
||||||
|
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||||
|
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||||
|
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||||
|
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||||
|
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||||
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
|
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||||
|
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||||
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
|
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||||
|
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||||
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
|
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||||
|
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||||
|
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||||
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||||
|
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||||
|
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g=
|
||||||
|
github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc=
|
||||||
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
|
github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
|
||||||
|
github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
|
||||||
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.73.2 h1:GwlGJPK6vf1UIohpc72KJVkKYlzki1UgE3xC4bWbf20=
|
||||||
|
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.73.2/go.mod h1:yJ3CawR/A5qEYFEeCOUVYLTwYxmacfHQhJS+b/2QiaM=
|
||||||
|
github.com/prometheus-operator/prometheus-operator/pkg/client v0.73.2 h1:cKSYjDjk9Rn/VIFKCh+CCd771ip7VTJzA3fAuKTxY2Q=
|
||||||
|
github.com/prometheus-operator/prometheus-operator/pkg/client v0.73.2/go.mod h1:mkLwGPvmexoEm6j3bk8gkWNIIFzN2uCs9tRFU2Vsu/I=
|
||||||
|
github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
|
||||||
|
github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
|
||||||
|
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||||
|
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||||
|
github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y=
|
||||||
|
github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
|
||||||
|
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||||
|
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||||
|
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||||
|
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||||
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
|
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||||
|
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
|
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
|
||||||
|
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
|
||||||
|
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||||
|
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||||
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
|
github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs=
|
||||||
|
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
|
||||||
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
|
||||||
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
|
||||||
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||||
|
go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA=
|
||||||
|
go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||||
|
go.etcd.io/etcd/api/v3 v3.5.11 h1:B54KwXbWDHyD3XYAwprxNzTe7vlhR69LuBgZnMVvS7E=
|
||||||
|
go.etcd.io/etcd/api/v3 v3.5.11/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4=
|
||||||
|
go.etcd.io/etcd/client/pkg/v3 v3.5.11 h1:bT2xVspdiCj2910T0V+/KHcVKjkUrCZVtk8J2JF2z1A=
|
||||||
|
go.etcd.io/etcd/client/pkg/v3 v3.5.11/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4=
|
||||||
|
go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4=
|
||||||
|
go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA=
|
||||||
|
go.etcd.io/etcd/client/v3 v3.5.11 h1:ajWtgoNSZJ1gmS8k+icvPtqsqEav+iUorF7b0qozgUU=
|
||||||
|
go.etcd.io/etcd/client/v3 v3.5.11/go.mod h1:a6xQUEqFJ8vztO1agJh/KQKOMfFI8og52ZconzcDJwE=
|
||||||
|
go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM=
|
||||||
|
go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs=
|
||||||
|
go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA=
|
||||||
|
go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc=
|
||||||
|
go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg=
|
||||||
|
go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
|
||||||
|
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
|
||||||
|
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0=
|
||||||
|
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
|
||||||
|
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
|
||||||
|
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
|
||||||
|
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E=
|
||||||
|
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
|
||||||
|
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
|
||||||
|
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||||
|
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||||
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
|
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||||
|
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
|
||||||
|
go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
|
||||||
|
go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
|
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||||
|
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||||
|
golang.org/x/exp v0.0.0-20231226003508-02704c960a9b h1:kLiC65FbiHWFAOu+lxwNPujcsl8VYyTYYEZnsOO1WK4=
|
||||||
|
golang.org/x/exp v0.0.0-20231226003508-02704c960a9b/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
|
||||||
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||||
|
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
|
||||||
|
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||||
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
|
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||||
|
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||||
|
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
|
||||||
|
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
|
||||||
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||||
|
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||||
|
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||||
|
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
|
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||||
|
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||||
|
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||||
|
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||||
|
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||||
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
|
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||||
|
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||||
|
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||||
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
|
||||||
|
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
|
||||||
|
google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 h1:YJ5pD9rF8o9Qtta0Cmy9rdBwkSjrTCT6XTiUQVOtIos=
|
||||||
|
google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0vFTBPy+tFSGvXEd3z+BcoG1k7EHbqm+YBsY=
|
||||||
|
google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 h1:s1w3X6gQxwrLEpxnLd/qXTVLgQE2yXwaOaoa6IlY/+o=
|
||||||
|
google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0/go.mod h1:CAny0tYF+0/9rmDB9fahA9YLzX3+AEVl1qXbv5hhj6c=
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU=
|
||||||
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA=
|
||||||
|
google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
|
||||||
|
google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||||
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
|
||||||
|
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
|
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||||
|
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
|
||||||
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||||
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
k8s.io/api v0.30.0 h1:siWhRq7cNjy2iHssOB9SCGNCl2spiF1dO3dABqZ8niA=
|
||||||
|
k8s.io/api v0.30.0/go.mod h1:OPlaYhoHs8EQ1ql0R/TsUgaRPhpKNxIMrKQfWUp8QSE=
|
||||||
|
k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO91YeI=
|
||||||
|
k8s.io/apiextensions-apiserver v0.29.3/go.mod h1:po0XiY5scnpJfFizNGo6puNU6Fq6D70UJY2Cb2KwAVc=
|
||||||
|
k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA=
|
||||||
|
k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
|
||||||
|
k8s.io/apiserver v0.30.0 h1:QCec+U72tMQ+9tR6A0sMBB5Vh6ImCEkoKkTDRABWq6M=
|
||||||
|
k8s.io/apiserver v0.30.0/go.mod h1:smOIBq8t0MbKZi7O7SyIpjPsiKJ8qa+llcFCluKyqiY=
|
||||||
|
k8s.io/client-go v0.30.0 h1:sB1AGGlhY/o7KCyCEQ0bPWzYDL0pwOZO4vAtTSh/gJQ=
|
||||||
|
k8s.io/client-go v0.30.0/go.mod h1:g7li5O5256qe6TYdAMyX/otJqMhIiGgTapdLchhmOaY=
|
||||||
|
k8s.io/component-base v0.30.0 h1:cj6bp38g0ainlfYtaOQuRELh5KSYjhKxM+io7AUIk4o=
|
||||||
|
k8s.io/component-base v0.30.0/go.mod h1:V9x/0ePFNaKeKYA3bOvIbrNoluTSG+fSJKjLdjOoeXQ=
|
||||||
|
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo=
|
||||||
|
k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8=
|
||||||
|
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
||||||
|
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||||
|
k8s.io/kms v0.30.0 h1:ZlnD/ei5lpvUlPw6eLfVvH7d8i9qZ6HwUQgydNVks8g=
|
||||||
|
k8s.io/kms v0.30.0/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4=
|
||||||
|
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f h1:0LQagt0gDpKqvIkAMPaRGcXawNMouPECM1+F9BVxEaM=
|
||||||
|
k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f/go.mod h1:S9tOR0FxgyusSNR+MboCuiDpVWkAifZvaYI1Q2ubgro=
|
||||||
|
k8s.io/metrics v0.30.0 h1:tqB+T0GJY288KahaO3Eb41HaDVeLR18gBmyPo0R417s=
|
||||||
|
k8s.io/metrics v0.30.0/go.mod h1:nSDA8V19WHhCTBhRYuyzJT9yPJBxSpqbyrGCCQ4jPj4=
|
||||||
|
k8s.io/utils v0.0.0-20240423183400-0849a56e8f22 h1:ao5hUqGhsqdm+bYbjH/pRkCs0unBGe9UyDahzs9zQzQ=
|
||||||
|
k8s.io/utils v0.0.0-20240423183400-0849a56e8f22/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||||
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 h1:/U5vjBbQn3RChhv7P11uhYvCSm5G2GaIi5AIGBS6r4c=
|
||||||
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0/go.mod h1:z7+wmGM2dfIiLRfrC6jb5kV2Mq/sK1ZP303cxzkV5Y4=
|
||||||
|
sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0=
|
||||||
|
sigs.k8s.io/controller-runtime v0.17.2/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s=
|
||||||
|
sigs.k8s.io/custom-metrics-apiserver v1.30.0 h1:BCgg2QfInoWXvoJgPK8TxrSS9r5wR4NNvr7M+9sUOYo=
|
||||||
|
sigs.k8s.io/custom-metrics-apiserver v1.30.0/go.mod h1:QXOKIL83M545uITzoZn4OC1C7nr0WhLh70A38pbzUpk=
|
||||||
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||||
|
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||||
|
sigs.k8s.io/metrics-server v0.7.1 h1:LhdCzkaI7VI7/N7pR4hDauTuWyc9Pxr+ihjTDuS9GIo=
|
||||||
|
sigs.k8s.io/metrics-server v0.7.1/go.mod h1:vt+pIEbw5tpmyRR46WJb3pRm1JEzf/HxRN+VClTKuqI=
|
||||||
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||||
|
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||||
|
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||||
|
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||||
|
|
@ -1,4 +1,5 @@
|
||||||
Copyright 2011-2016 Canonical Ltd.
|
/*
|
||||||
|
Copyright The Kubernetes Authors.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|
@ -11,3 +12,4 @@ distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
See the License for the specific language governing permissions and
|
See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
@ -1,41 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright 2017 The Kubernetes Authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
set -o errexit
|
|
||||||
set -o nounset
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
verify=0
|
|
||||||
if [[ ${1:-} = "--verify" || ${1:-} = "-v" ]]; then
|
|
||||||
verify=1
|
|
||||||
fi
|
|
||||||
|
|
||||||
find_files() {
|
|
||||||
find . -not \( \( \
|
|
||||||
-wholename './_output' \
|
|
||||||
-o -wholename './vendor' \
|
|
||||||
\) -prune \) -name '*.go'
|
|
||||||
}
|
|
||||||
|
|
||||||
if [[ $verify -eq 1 ]]; then
|
|
||||||
diff=$(find_files | xargs gofmt -s -d 2>&1)
|
|
||||||
if [[ -n "${diff}" ]]; then
|
|
||||||
echo "gofmt -s -w $(echo "${diff}" | awk '/^diff / { print $2 }' | tr '\n' ' ')"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
find_files | xargs gofmt -s -w
|
|
||||||
fi
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2016 The etcd Authors
|
// Copyright 2021 The Kubernetes Authors.
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
|
@ -12,9 +12,13 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
package rpctypes
|
//go:build tools
|
||||||
|
// +build tools
|
||||||
|
|
||||||
var (
|
// Package tools tracks dependencies for tools that used in the build process.
|
||||||
MetadataRequireLeaderKey = "hasleader"
|
// See https://github.com/golang/go/wiki/Modules
|
||||||
MetadataHasLeader = "true"
|
package tools
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "k8s.io/kube-openapi/cmd/openapi-gen"
|
||||||
)
|
)
|
||||||
16693
pkg/api/generated/openapi/zz_generated.openapi.go
Normal file
16693
pkg/api/generated/openapi/zz_generated.openapi.go
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -1,3 +1,6 @@
|
||||||
|
//go:build codegen
|
||||||
|
// +build codegen
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Copyright 2018 The Kubernetes Authors.
|
Copyright 2018 The Kubernetes Authors.
|
||||||
|
|
||||||
|
|
@ -14,6 +17,10 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Package truncate provides an implementation for the audit.Backend interface
|
// Package is only a stub to ensure k8s.io/kube-openapi/cmd/openapi-gen is vendored
|
||||||
// that truncates audit events and sends them to the delegate audit.Backend.
|
// so the same version of kube-openapi is used to generate and render the openapi spec
|
||||||
package truncate // import "k8s.io/apiserver/plugin/pkg/audit/truncate"
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "k8s.io/kube-openapi/cmd/openapi-gen"
|
||||||
|
)
|
||||||
|
|
@ -21,14 +21,14 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// APIClient is a raw client to the Prometheus Query API.
|
// APIClient is a raw client to the Prometheus Query API.
|
||||||
|
|
@ -47,17 +47,31 @@ type GenericAPIClient interface {
|
||||||
type httpAPIClient struct {
|
type httpAPIClient struct {
|
||||||
client *http.Client
|
client *http.Client
|
||||||
baseURL *url.URL
|
baseURL *url.URL
|
||||||
|
headers http.Header
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *httpAPIClient) Do(ctx context.Context, verb, endpoint string, query url.Values) (APIResponse, error) {
|
func (c *httpAPIClient) Do(ctx context.Context, verb, endpoint string, query url.Values) (APIResponse, error) {
|
||||||
u := *c.baseURL
|
u := *c.baseURL
|
||||||
u.Path = path.Join(c.baseURL.Path, endpoint)
|
u.Path = path.Join(c.baseURL.Path, endpoint)
|
||||||
|
var reqBody io.Reader
|
||||||
|
if verb == http.MethodGet {
|
||||||
u.RawQuery = query.Encode()
|
u.RawQuery = query.Encode()
|
||||||
req, err := http.NewRequest(verb, u.String(), nil)
|
} else if verb == http.MethodPost {
|
||||||
|
reqBody = strings.NewReader(query.Encode())
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequestWithContext(ctx, verb, u.String(), reqBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return APIResponse{}, fmt.Errorf("error constructing HTTP request to Prometheus: %v", err)
|
return APIResponse{}, fmt.Errorf("error constructing HTTP request to Prometheus: %v", err)
|
||||||
}
|
}
|
||||||
req.WithContext(ctx)
|
for key, values := range c.headers {
|
||||||
|
for _, value := range values {
|
||||||
|
req.Header.Add(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if verb == http.MethodPost {
|
||||||
|
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||||
|
}
|
||||||
|
|
||||||
resp, err := c.client.Do(req)
|
resp, err := c.client.Do(req)
|
||||||
defer func() {
|
defer func() {
|
||||||
|
|
@ -70,8 +84,8 @@ func (c *httpAPIClient) Do(ctx context.Context, verb, endpoint string, query url
|
||||||
return APIResponse{}, err
|
return APIResponse{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if glog.V(6) {
|
if klog.V(6).Enabled() {
|
||||||
glog.Infof("%s %s %s", verb, u.String(), resp.Status)
|
klog.Infof("%s %s %s", verb, u.String(), resp.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
code := resp.StatusCode
|
code := resp.StatusCode
|
||||||
|
|
@ -85,12 +99,12 @@ func (c *httpAPIClient) Do(ctx context.Context, verb, endpoint string, query url
|
||||||
}
|
}
|
||||||
|
|
||||||
var body io.Reader = resp.Body
|
var body io.Reader = resp.Body
|
||||||
if glog.V(8) {
|
if klog.V(8).Enabled() {
|
||||||
data, err := ioutil.ReadAll(body)
|
data, err := io.ReadAll(body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return APIResponse{}, fmt.Errorf("unable to log response body: %v", err)
|
return APIResponse{}, fmt.Errorf("unable to log response body: %v", err)
|
||||||
}
|
}
|
||||||
glog.Infof("Response Body: %s", string(data))
|
klog.Infof("Response Body: %s", string(data))
|
||||||
body = bytes.NewReader(data)
|
body = bytes.NewReader(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -113,10 +127,11 @@ func (c *httpAPIClient) Do(ctx context.Context, verb, endpoint string, query url
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGenericAPIClient builds a new generic Prometheus API client for the given base URL and HTTP Client.
|
// NewGenericAPIClient builds a new generic Prometheus API client for the given base URL and HTTP Client.
|
||||||
func NewGenericAPIClient(client *http.Client, baseURL *url.URL) GenericAPIClient {
|
func NewGenericAPIClient(client *http.Client, baseURL *url.URL, headers http.Header) GenericAPIClient {
|
||||||
return &httpAPIClient{
|
return &httpAPIClient{
|
||||||
client: client,
|
client: client,
|
||||||
baseURL: baseURL,
|
baseURL: baseURL,
|
||||||
|
headers: headers,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -129,19 +144,21 @@ const (
|
||||||
// queryClient is a Client that connects to the Prometheus HTTP API.
|
// queryClient is a Client that connects to the Prometheus HTTP API.
|
||||||
type queryClient struct {
|
type queryClient struct {
|
||||||
api GenericAPIClient
|
api GenericAPIClient
|
||||||
|
verb string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClientForAPI creates a Client for the given generic Prometheus API client.
|
// NewClientForAPI creates a Client for the given generic Prometheus API client.
|
||||||
func NewClientForAPI(client GenericAPIClient) Client {
|
func NewClientForAPI(client GenericAPIClient, verb string) Client {
|
||||||
return &queryClient{
|
return &queryClient{
|
||||||
api: client,
|
api: client,
|
||||||
|
verb: verb,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient creates a Client for the given HTTP client and base URL (the location of the Prometheus server).
|
// NewClient creates a Client for the given HTTP client and base URL (the location of the Prometheus server).
|
||||||
func NewClient(client *http.Client, baseURL *url.URL) Client {
|
func NewClient(client *http.Client, baseURL *url.URL, headers http.Header, verb string) Client {
|
||||||
genericClient := NewGenericAPIClient(client, baseURL)
|
genericClient := NewGenericAPIClient(client, baseURL, headers)
|
||||||
return NewClientForAPI(genericClient)
|
return NewClientForAPI(genericClient, verb)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *queryClient) Series(ctx context.Context, interval model.Interval, selectors ...Selector) ([]Series, error) {
|
func (h *queryClient) Series(ctx context.Context, interval model.Interval, selectors ...Selector) ([]Series, error) {
|
||||||
|
|
@ -157,7 +174,7 @@ func (h *queryClient) Series(ctx context.Context, interval model.Interval, selec
|
||||||
vals.Add("match[]", string(selector))
|
vals.Add("match[]", string(selector))
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := h.api.Do(ctx, "GET", seriesURL, vals)
|
res, err := h.api.Do(ctx, h.verb, seriesURL, vals)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -177,7 +194,7 @@ func (h *queryClient) Query(ctx context.Context, t model.Time, query Selector) (
|
||||||
vals.Set("timeout", model.Duration(timeout).String())
|
vals.Set("timeout", model.Duration(timeout).String())
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := h.api.Do(ctx, "GET", queryURL, vals)
|
res, err := h.api.Do(ctx, h.verb, queryURL, vals)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return QueryResult{}, err
|
return QueryResult{}, err
|
||||||
}
|
}
|
||||||
|
|
@ -204,7 +221,7 @@ func (h *queryClient) QueryRange(ctx context.Context, r Range, query Selector) (
|
||||||
vals.Set("timeout", model.Duration(timeout).String())
|
vals.Set("timeout", model.Duration(timeout).String())
|
||||||
}
|
}
|
||||||
|
|
||||||
res, err := h.api.Do(ctx, "GET", queryRangeURL, vals)
|
res, err := h.api.Do(ctx, h.verb, queryRangeURL, vals)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return QueryResult{}, err
|
return QueryResult{}, err
|
||||||
}
|
}
|
||||||
|
|
@ -218,7 +235,7 @@ func (h *queryClient) QueryRange(ctx context.Context, r Range, query Selector) (
|
||||||
// when present
|
// when present
|
||||||
func timeoutFromContext(ctx context.Context) (time.Duration, bool) {
|
func timeoutFromContext(ctx context.Context) (time.Duration, bool) {
|
||||||
if deadline, hasDeadline := ctx.Deadline(); hasDeadline {
|
if deadline, hasDeadline := ctx.Deadline(); hasDeadline {
|
||||||
return time.Now().Sub(deadline), true
|
return time.Since(deadline), true
|
||||||
}
|
}
|
||||||
|
|
||||||
return time.Duration(0), false
|
return time.Duration(0), false
|
||||||
|
|
|
||||||
78
pkg/client/fake/client.go
Normal file
78
pkg/client/fake/client.go
Normal file
|
|
@ -0,0 +1,78 @@
|
||||||
|
/*
|
||||||
|
Copyright 2018 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package fake
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
pmodel "github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FakePrometheusClient is a fake instance of prom.Client
|
||||||
|
type FakePrometheusClient struct {
|
||||||
|
// AcceptableInterval is the interval in which to return queries
|
||||||
|
AcceptableInterval pmodel.Interval
|
||||||
|
// ErrQueries are queries that result in an error (whether from Query or Series)
|
||||||
|
ErrQueries map[prom.Selector]error
|
||||||
|
// Series are non-error responses to partial Series calls
|
||||||
|
SeriesResults map[prom.Selector][]prom.Series
|
||||||
|
// QueryResults are non-error responses to Query
|
||||||
|
QueryResults map[prom.Selector]prom.QueryResult
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *FakePrometheusClient) Series(_ context.Context, interval pmodel.Interval, selectors ...prom.Selector) ([]prom.Series, error) {
|
||||||
|
if (interval.Start != 0 && interval.Start < c.AcceptableInterval.Start) || (interval.End != 0 && interval.End > c.AcceptableInterval.End) {
|
||||||
|
return nil, fmt.Errorf("interval [%v, %v] for query is outside range [%v, %v]", interval.Start, interval.End, c.AcceptableInterval.Start, c.AcceptableInterval.End)
|
||||||
|
}
|
||||||
|
res := []prom.Series{}
|
||||||
|
for _, sel := range selectors {
|
||||||
|
if err, found := c.ErrQueries[sel]; found {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if series, found := c.SeriesResults[sel]; found {
|
||||||
|
res = append(res, series...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *FakePrometheusClient) Query(_ context.Context, t pmodel.Time, query prom.Selector) (prom.QueryResult, error) {
|
||||||
|
if t < c.AcceptableInterval.Start || t > c.AcceptableInterval.End {
|
||||||
|
return prom.QueryResult{}, fmt.Errorf("time %v for query is outside range [%v, %v]", t, c.AcceptableInterval.Start, c.AcceptableInterval.End)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err, found := c.ErrQueries[query]; found {
|
||||||
|
return prom.QueryResult{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if res, found := c.QueryResults[query]; found {
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return prom.QueryResult{
|
||||||
|
Type: pmodel.ValVector,
|
||||||
|
Vector: &pmodel.Vector{},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *FakePrometheusClient) QueryRange(_ context.Context, r prom.Range, query prom.Selector) (prom.QueryResult, error) {
|
||||||
|
return prom.QueryResult{}, nil
|
||||||
|
}
|
||||||
|
|
@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
See the License for the specific language governing permissions and
|
See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package client
|
package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
@ -22,14 +23,14 @@ import (
|
||||||
|
|
||||||
// LabelNeq produces a not-equal label selector expression.
|
// LabelNeq produces a not-equal label selector expression.
|
||||||
// Label is passed verbatim, and value is double-quote escaped
|
// Label is passed verbatim, and value is double-quote escaped
|
||||||
// using Go's escaping is used on value (as per the PromQL rules).
|
// using Go's escaping (as per the PromQL rules).
|
||||||
func LabelNeq(label string, value string) string {
|
func LabelNeq(label string, value string) string {
|
||||||
return fmt.Sprintf("%s!=%q", label, value)
|
return fmt.Sprintf("%s!=%q", label, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LabelEq produces a equal label selector expression.
|
// LabelEq produces a equal label selector expression.
|
||||||
// Label is passed verbatim, and value is double-quote escaped
|
// Label is passed verbatim, and value is double-quote escaped
|
||||||
// using Go's escaping is used on value (as per the PromQL rules).
|
// using Go's escaping (as per the PromQL rules).
|
||||||
func LabelEq(label string, value string) string {
|
func LabelEq(label string, value string) string {
|
||||||
return fmt.Sprintf("%s=%q", label, value)
|
return fmt.Sprintf("%s=%q", label, value)
|
||||||
}
|
}
|
||||||
|
|
@ -52,7 +53,7 @@ func NameMatches(expr string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NameNotMatches produces a label selector expression that checks that the series name doesn't matches the given expression.
|
// NameNotMatches produces a label selector expression that checks that the series name doesn't matches the given expression.
|
||||||
// It's a convinience wrapper around LabelNotMatches.
|
// It's a convenience wrapper around LabelNotMatches.
|
||||||
func NameNotMatches(expr string) string {
|
func NameNotMatches(expr string) string {
|
||||||
return LabelNotMatches("__name__", expr)
|
return LabelNotMatches("__name__", expr)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -13,6 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
See the License for the specific language governing permissions and
|
See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package client
|
package client
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
|
||||||
|
|
@ -13,34 +13,51 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
See the License for the specific language governing permissions and
|
See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package metrics
|
package metrics
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
"github.com/directxman12/k8s-prometheus-adapter/pkg/client"
|
apimetrics "k8s.io/apiserver/pkg/endpoints/metrics"
|
||||||
|
"k8s.io/component-base/metrics"
|
||||||
|
"k8s.io/component-base/metrics/legacyregistry"
|
||||||
|
|
||||||
|
"sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// queryLatency is the total latency of any query going through the
|
// queryLatency is the total latency of any query going through the
|
||||||
// various endpoints (query, range-query, series). It includes some deserialization
|
// various endpoints (query, range-query, series). It includes some deserialization
|
||||||
// overhead and HTTP overhead.
|
// overhead and HTTP overhead.
|
||||||
queryLatency = prometheus.NewHistogramVec(
|
queryLatency = metrics.NewHistogramVec(
|
||||||
prometheus.HistogramOpts{
|
&metrics.HistogramOpts{
|
||||||
Name: "cmgateway_prometheus_query_latency_seconds",
|
Namespace: "prometheus_adapter",
|
||||||
|
Subsystem: "prometheus_client",
|
||||||
|
Name: "request_duration_seconds",
|
||||||
Help: "Prometheus client query latency in seconds. Broken down by target prometheus endpoint and target server",
|
Help: "Prometheus client query latency in seconds. Broken down by target prometheus endpoint and target server",
|
||||||
Buckets: prometheus.ExponentialBuckets(0.0001, 2, 10),
|
Buckets: prometheus.DefBuckets,
|
||||||
},
|
},
|
||||||
[]string{"endpoint", "server"},
|
[]string{"path", "server"},
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func MetricsHandler() (http.HandlerFunc, error) {
|
||||||
prometheus.MustRegister(queryLatency)
|
registry := metrics.NewKubeRegistry()
|
||||||
|
err := registry.Register(queryLatency)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
apimetrics.Register()
|
||||||
|
return func(w http.ResponseWriter, req *http.Request) {
|
||||||
|
legacyregistry.Handler().ServeHTTP(w, req)
|
||||||
|
metrics.HandlerFor(registry, metrics.HandlerOpts{}).ServeHTTP(w, req)
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// instrumentedClient is a client.GenericAPIClient which instruments calls to Do,
|
// instrumentedClient is a client.GenericAPIClient which instruments calls to Do,
|
||||||
|
|
@ -62,7 +79,7 @@ func (c *instrumentedGenericClient) Do(ctx context.Context, verb, endpoint strin
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
queryLatency.With(prometheus.Labels{"endpoint": endpoint, "server": c.serverName}).Observe(endTime.Sub(startTime).Seconds())
|
queryLatency.With(prometheus.Labels{"path": endpoint, "server": c.serverName}).Observe(endTime.Sub(startTime).Seconds())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var resp client.APIResponse
|
var resp client.APIResponse
|
||||||
|
|
|
||||||
|
|
@ -25,10 +25,10 @@ type ErrorType string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ErrBadData ErrorType = "bad_data"
|
ErrBadData ErrorType = "bad_data"
|
||||||
ErrTimeout = "timeout"
|
ErrTimeout ErrorType = "timeout"
|
||||||
ErrCanceled = "canceled"
|
ErrCanceled ErrorType = "canceled"
|
||||||
ErrExec = "execution"
|
ErrExec ErrorType = "execution"
|
||||||
ErrBadResponse = "bad_response"
|
ErrBadResponse ErrorType = "bad_response"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Error is an error returned by the API.
|
// Error is an error returned by the API.
|
||||||
|
|
@ -46,7 +46,7 @@ type ResponseStatus string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
ResponseSucceeded ResponseStatus = "succeeded"
|
ResponseSucceeded ResponseStatus = "succeeded"
|
||||||
ResponseError = "error"
|
ResponseError ResponseStatus = "error"
|
||||||
)
|
)
|
||||||
|
|
||||||
// APIResponse represents the raw response returned by the API.
|
// APIResponse represents the raw response returned by the API.
|
||||||
|
|
|
||||||
|
|
@ -1,44 +1,50 @@
|
||||||
package config
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
pmodel "github.com/prometheus/common/model"
|
||||||
|
)
|
||||||
|
|
||||||
type MetricsDiscoveryConfig struct {
|
type MetricsDiscoveryConfig struct {
|
||||||
// Rules specifies how to discover and map Prometheus metrics to
|
// Rules specifies how to discover and map Prometheus metrics to
|
||||||
// custom metrics API resources. The rules are applied independently,
|
// custom metrics API resources. The rules are applied independently,
|
||||||
// and thus must be mutually exclusive. Rules will the same SeriesQuery
|
// and thus must be mutually exclusive. Rules with the same SeriesQuery
|
||||||
// will make only a single API call.
|
// will make only a single API call.
|
||||||
Rules []DiscoveryRule `yaml:"rules"`
|
Rules []DiscoveryRule `json:"rules" yaml:"rules"`
|
||||||
|
ResourceRules *ResourceRules `json:"resourceRules,omitempty" yaml:"resourceRules,omitempty"`
|
||||||
|
ExternalRules []DiscoveryRule `json:"externalRules,omitempty" yaml:"externalRules,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// DiscoveryRule describes on set of rules for transforming Prometheus metrics to/from
|
// DiscoveryRule describes a set of rules for transforming Prometheus metrics to/from
|
||||||
// custom metrics API resources.
|
// custom metrics API resources.
|
||||||
type DiscoveryRule struct {
|
type DiscoveryRule struct {
|
||||||
// SeriesQuery specifies which metrics this rule should consider via a Prometheus query
|
// SeriesQuery specifies which metrics this rule should consider via a Prometheus query
|
||||||
// series selector query.
|
// series selector query.
|
||||||
SeriesQuery string `yaml:"seriesQuery"`
|
SeriesQuery string `json:"seriesQuery" yaml:"seriesQuery"`
|
||||||
// SeriesFilters specifies additional regular expressions to be applied on
|
// SeriesFilters specifies additional regular expressions to be applied on
|
||||||
// the series names returned from the query. This is useful for constraints
|
// the series names returned from the query. This is useful for constraints
|
||||||
// that can't be represented in the SeriesQuery (e.g. series matching `container_.+`
|
// that can't be represented in the SeriesQuery (e.g. series matching `container_.+`
|
||||||
// not matching `container_.+_total`. A filter will be automatically appended to
|
// not matching `container_.+_total`. A filter will be automatically appended to
|
||||||
// match the form specified in Name.
|
// match the form specified in Name.
|
||||||
SeriesFilters []RegexFilter `yaml:"seriesFilters"`
|
SeriesFilters []RegexFilter `json:"seriesFilters" yaml:"seriesFilters"`
|
||||||
// Resources specifies how associated Kubernetes resources should be discovered for
|
// Resources specifies how associated Kubernetes resources should be discovered for
|
||||||
// the given metrics.
|
// the given metrics.
|
||||||
Resources ResourceMapping `yaml:"resources"`
|
Resources ResourceMapping `json:"resources" yaml:"resources"`
|
||||||
// Name specifies how the metric name should be transformed between custom metric
|
// Name specifies how the metric name should be transformed between custom metric
|
||||||
// API resources, and Prometheus metric names.
|
// API resources, and Prometheus metric names.
|
||||||
Name NameMapping `yaml:"name"`
|
Name NameMapping `json:"name" yaml:"name"`
|
||||||
// MetricsQuery specifies modifications to the metrics query, such as converting
|
// MetricsQuery specifies modifications to the metrics query, such as converting
|
||||||
// cumulative metrics to rate metrics. It is a template where `.LabelMatchers` is
|
// cumulative metrics to rate metrics. It is a template where `.LabelMatchers` is
|
||||||
// a the comma-separated base label matchers and `.Series` is the series name, and
|
// a the comma-separated base label matchers and `.Series` is the series name, and
|
||||||
// `.GroupBy` is the comma-separated expected group-by label names. The delimeters
|
// `.GroupBy` is the comma-separated expected group-by label names. The delimeters
|
||||||
// are `<<` and `>>`.
|
// are `<<` and `>>`.
|
||||||
MetricsQuery string `yaml:"metricsQuery,omitempty"`
|
MetricsQuery string `json:"metricsQuery,omitempty" yaml:"metricsQuery,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegexFilter is a filter that matches positively or negatively against a regex.
|
// RegexFilter is a filter that matches positively or negatively against a regex.
|
||||||
// Only one field may be set at a time.
|
// Only one field may be set at a time.
|
||||||
type RegexFilter struct {
|
type RegexFilter struct {
|
||||||
Is string `yaml:"is,omitempty"`
|
Is string `json:"is,omitempty" yaml:"is,omitempty"`
|
||||||
IsNot string `yaml:"isNot,omitempty"`
|
IsNot string `json:"isNot,omitempty" yaml:"isNot,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResourceMapping specifies how to map Kubernetes resources to Prometheus labels
|
// ResourceMapping specifies how to map Kubernetes resources to Prometheus labels
|
||||||
|
|
@ -48,16 +54,18 @@ type ResourceMapping struct {
|
||||||
// the `.Group` and `.Resource` fields. The `.Group` field will have
|
// the `.Group` and `.Resource` fields. The `.Group` field will have
|
||||||
// dots replaced with underscores, and the `.Resource` field will be
|
// dots replaced with underscores, and the `.Resource` field will be
|
||||||
// singularized. The delimiters are `<<` and `>>`.
|
// singularized. The delimiters are `<<` and `>>`.
|
||||||
Template string `yaml:"template,omitempty"`
|
Template string `json:"template,omitempty" yaml:"template,omitempty"`
|
||||||
// Overrides specifies exceptions to the above template, mapping label names
|
// Overrides specifies exceptions to the above template, mapping label names
|
||||||
// to group-resources
|
// to group-resources
|
||||||
Overrides map[string]GroupResource `yaml:"overrides,omitempty"`
|
Overrides map[string]GroupResource `json:"overrides,omitempty" yaml:"overrides,omitempty"`
|
||||||
|
// Namespaced ignores the source namespace of the requester and requires one in the query
|
||||||
|
Namespaced *bool `json:"namespaced,omitempty" yaml:"namespaced,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// GroupResource represents a Kubernetes group-resource.
|
// GroupResource represents a Kubernetes group-resource.
|
||||||
type GroupResource struct {
|
type GroupResource struct {
|
||||||
Group string `yaml:"group,omitempty"`
|
Group string `json:"group,omitempty" yaml:"group,omitempty"`
|
||||||
Resource string `yaml:"resource"`
|
Resource string `json:"resource" yaml:"resource"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NameMapping specifies how to convert Prometheus metrics
|
// NameMapping specifies how to convert Prometheus metrics
|
||||||
|
|
@ -66,10 +74,38 @@ type NameMapping struct {
|
||||||
// Matches is a regular expression that is used to match
|
// Matches is a regular expression that is used to match
|
||||||
// Prometheus series names. It may be left blank, in which
|
// Prometheus series names. It may be left blank, in which
|
||||||
// case it is equivalent to `.*`.
|
// case it is equivalent to `.*`.
|
||||||
Matches string `yaml:"matches"`
|
Matches string `json:"matches" yaml:"matches"`
|
||||||
// As is the name used in the API. Captures from Matches
|
// As is the name used in the API. Captures from Matches
|
||||||
// are available for use here. If not specified, it defaults
|
// are available for use here. If not specified, it defaults
|
||||||
// to $0 if no capture groups are present in Matches, or $1
|
// to $0 if no capture groups are present in Matches, or $1
|
||||||
// if only one is present, and will error if multiple are.
|
// if only one is present, and will error if multiple are.
|
||||||
As string `yaml:"as"`
|
As string `json:"as" yaml:"as"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourceRules describe the rules for querying resource metrics
|
||||||
|
// API results. It's assumed that the same metrics can be used
|
||||||
|
// to aggregate across different resources.
|
||||||
|
type ResourceRules struct {
|
||||||
|
CPU ResourceRule `json:"cpu" yaml:"cpu"`
|
||||||
|
Memory ResourceRule `json:"memory" yaml:"memory"`
|
||||||
|
// Window is the window size reported by the resource metrics API. It should match the value used
|
||||||
|
// in your containerQuery and nodeQuery if you use a `rate` function.
|
||||||
|
Window pmodel.Duration `json:"window" yaml:"window"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourceRule describes how to query metrics for some particular
|
||||||
|
// system resource metric.
|
||||||
|
type ResourceRule struct {
|
||||||
|
// Container is the query used to fetch the metrics for containers.
|
||||||
|
ContainerQuery string `json:"containerQuery" yaml:"containerQuery"`
|
||||||
|
// NodeQuery is the query used to fetch the metrics for nodes
|
||||||
|
// (for instance, simply aggregating by node label is insufficient for
|
||||||
|
// cadvisor metrics -- you need to select the `/` container).
|
||||||
|
NodeQuery string `json:"nodeQuery" yaml:"nodeQuery"`
|
||||||
|
// Resources specifies how associated Kubernetes resources should be discovered for
|
||||||
|
// the given metrics.
|
||||||
|
Resources ResourceMapping `json:"resources" yaml:"resources"`
|
||||||
|
// ContainerLabel indicates the name of the Prometheus label containing the container name
|
||||||
|
// (since "container" is not a resource, this can't go in the `resources` block, but is similar).
|
||||||
|
ContainerLabel string `json:"containerLabel" yaml:"containerLabel"`
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
yaml "gopkg.in/yaml.v2"
|
yaml "gopkg.in/yaml.v2"
|
||||||
|
|
@ -11,11 +11,11 @@ import (
|
||||||
// FromFile loads the configuration from a particular file.
|
// FromFile loads the configuration from a particular file.
|
||||||
func FromFile(filename string) (*MetricsDiscoveryConfig, error) {
|
func FromFile(filename string) (*MetricsDiscoveryConfig, error) {
|
||||||
file, err := os.Open(filename)
|
file, err := os.Open(filename)
|
||||||
defer file.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to load metrics discovery config file: %v", err)
|
return nil, fmt.Errorf("unable to load metrics discovery config file: %v", err)
|
||||||
}
|
}
|
||||||
contents, err := ioutil.ReadAll(file)
|
defer file.Close()
|
||||||
|
contents, err := io.ReadAll(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to load metrics discovery config file: %v", err)
|
return nil, fmt.Errorf("unable to load metrics discovery config file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,475 +0,0 @@
|
||||||
package provider
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"text/template"
|
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
"github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/provider"
|
|
||||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
|
|
||||||
prom "github.com/directxman12/k8s-prometheus-adapter/pkg/client"
|
|
||||||
"github.com/directxman12/k8s-prometheus-adapter/pkg/config"
|
|
||||||
pmodel "github.com/prometheus/common/model"
|
|
||||||
)
|
|
||||||
|
|
||||||
var nsGroupResource = schema.GroupResource{Resource: "namespaces"}
|
|
||||||
var groupNameSanitizer = strings.NewReplacer(".", "_", "-", "_")
|
|
||||||
|
|
||||||
// MetricNamer knows how to convert Prometheus series names and label names to
|
|
||||||
// metrics API resources, and vice-versa. MetricNamers should be safe to access
|
|
||||||
// concurrently. Returned group-resources are "normalized" as per the
|
|
||||||
// MetricInfo#Normalized method. Group-resources passed as arguments must
|
|
||||||
// themselves be normalized.
|
|
||||||
type MetricNamer interface {
|
|
||||||
// Selector produces the appropriate Prometheus series selector to match all
|
|
||||||
// series handlable by this namer.
|
|
||||||
Selector() prom.Selector
|
|
||||||
// FilterSeries checks to see which of the given series match any additional
|
|
||||||
// constrains beyond the series query. It's assumed that the series given
|
|
||||||
// already matche the series query.
|
|
||||||
FilterSeries(series []prom.Series) []prom.Series
|
|
||||||
// ResourcesForSeries returns the group-resources associated with the given series,
|
|
||||||
// as well as whether or not the given series has the "namespace" resource).
|
|
||||||
ResourcesForSeries(series prom.Series) (res []schema.GroupResource, namespaced bool)
|
|
||||||
// LabelForResource returns the appropriate label for the given resource.
|
|
||||||
LabelForResource(resource schema.GroupResource) (pmodel.LabelName, error)
|
|
||||||
// MetricNameForSeries returns the name (as presented in the API) for a given series.
|
|
||||||
MetricNameForSeries(series prom.Series) (string, error)
|
|
||||||
// QueryForSeries returns the query for a given series (not API metric name), with
|
|
||||||
// the given namespace name (if relevant), resource, and resource names.
|
|
||||||
QueryForSeries(series string, resource schema.GroupResource, namespace string, names ...string) (prom.Selector, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// labelGroupResExtractor extracts schema.GroupResources from series labels.
|
|
||||||
type labelGroupResExtractor struct {
|
|
||||||
regex *regexp.Regexp
|
|
||||||
|
|
||||||
resourceInd int
|
|
||||||
groupInd *int
|
|
||||||
mapper apimeta.RESTMapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// newLabelGroupResExtractor creates a new labelGroupResExtractor for labels whose form
|
|
||||||
// matches the given template. It does so by creating a regular expression from the template,
|
|
||||||
// so anything in the template which limits resource or group name length will cause issues.
|
|
||||||
func newLabelGroupResExtractor(labelTemplate *template.Template) (*labelGroupResExtractor, error) {
|
|
||||||
labelRegexBuff := new(bytes.Buffer)
|
|
||||||
if err := labelTemplate.Execute(labelRegexBuff, schema.GroupResource{"(?P<group>.+?)", "(?P<resource>.+?)"}); err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to convert label template to matcher: %v", err)
|
|
||||||
}
|
|
||||||
if labelRegexBuff.Len() == 0 {
|
|
||||||
return nil, fmt.Errorf("unable to convert label template to matcher: empty template")
|
|
||||||
}
|
|
||||||
labelRegexRaw := "^" + labelRegexBuff.String() + "$"
|
|
||||||
labelRegex, err := regexp.Compile(labelRegexRaw)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to convert label template to matcher: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var groupInd *int
|
|
||||||
var resInd *int
|
|
||||||
|
|
||||||
for i, name := range labelRegex.SubexpNames() {
|
|
||||||
switch name {
|
|
||||||
case "group":
|
|
||||||
ind := i // copy to avoid iteration variable reference
|
|
||||||
groupInd = &ind
|
|
||||||
case "resource":
|
|
||||||
ind := i // copy to avoid iteration variable reference
|
|
||||||
resInd = &ind
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if resInd == nil {
|
|
||||||
return nil, fmt.Errorf("must include at least `{{.Resource}}` in the label template")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &labelGroupResExtractor{
|
|
||||||
regex: labelRegex,
|
|
||||||
resourceInd: *resInd,
|
|
||||||
groupInd: groupInd,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GroupResourceForLabel extracts a schema.GroupResource from the given label, if possible.
|
|
||||||
// The second argument indicates whether or not a potential group-resource was found in this label.
|
|
||||||
func (e *labelGroupResExtractor) GroupResourceForLabel(lbl pmodel.LabelName) (schema.GroupResource, bool) {
|
|
||||||
matchGroups := e.regex.FindStringSubmatch(string(lbl))
|
|
||||||
if matchGroups != nil {
|
|
||||||
group := ""
|
|
||||||
if e.groupInd != nil {
|
|
||||||
group = matchGroups[*e.groupInd]
|
|
||||||
}
|
|
||||||
|
|
||||||
return schema.GroupResource{
|
|
||||||
Group: group,
|
|
||||||
Resource: matchGroups[e.resourceInd],
|
|
||||||
}, true
|
|
||||||
}
|
|
||||||
|
|
||||||
return schema.GroupResource{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *metricNamer) Selector() prom.Selector {
|
|
||||||
return r.seriesQuery
|
|
||||||
}
|
|
||||||
|
|
||||||
// reMatcher either positively or negatively matches a regex
|
|
||||||
type reMatcher struct {
|
|
||||||
regex *regexp.Regexp
|
|
||||||
positive bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newReMatcher(cfg config.RegexFilter) (*reMatcher, error) {
|
|
||||||
if cfg.Is != "" && cfg.IsNot != "" {
|
|
||||||
return nil, fmt.Errorf("cannot have both an `is` (%q) and `isNot` (%q) expression in a single filter", cfg.Is, cfg.IsNot)
|
|
||||||
}
|
|
||||||
if cfg.Is == "" && cfg.IsNot == "" {
|
|
||||||
return nil, fmt.Errorf("must have either an `is` or `isNot` expression in a filter")
|
|
||||||
}
|
|
||||||
|
|
||||||
var positive bool
|
|
||||||
var regexRaw string
|
|
||||||
if cfg.Is != "" {
|
|
||||||
positive = true
|
|
||||||
regexRaw = cfg.Is
|
|
||||||
} else {
|
|
||||||
positive = false
|
|
||||||
regexRaw = cfg.IsNot
|
|
||||||
}
|
|
||||||
|
|
||||||
regex, err := regexp.Compile(regexRaw)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to compile series filter %q: %v", regexRaw, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &reMatcher{
|
|
||||||
regex: regex,
|
|
||||||
positive: positive,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *reMatcher) Matches(val string) bool {
|
|
||||||
return m.regex.MatchString(val) == m.positive
|
|
||||||
}
|
|
||||||
|
|
||||||
type metricNamer struct {
|
|
||||||
seriesQuery prom.Selector
|
|
||||||
labelTemplate *template.Template
|
|
||||||
labelResExtractor *labelGroupResExtractor
|
|
||||||
metricsQueryTemplate *template.Template
|
|
||||||
nameMatches *regexp.Regexp
|
|
||||||
nameAs string
|
|
||||||
seriesMatchers []*reMatcher
|
|
||||||
|
|
||||||
labelResourceMu sync.RWMutex
|
|
||||||
labelToResource map[pmodel.LabelName]schema.GroupResource
|
|
||||||
resourceToLabel map[schema.GroupResource]pmodel.LabelName
|
|
||||||
mapper apimeta.RESTMapper
|
|
||||||
}
|
|
||||||
|
|
||||||
// queryTemplateArgs are the arguments for the metrics query template.
|
|
||||||
type queryTemplateArgs struct {
|
|
||||||
Series string
|
|
||||||
LabelMatchers string
|
|
||||||
LabelValuesByName map[string][]string
|
|
||||||
GroupBy string
|
|
||||||
GroupBySlice []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *metricNamer) FilterSeries(initialSeries []prom.Series) []prom.Series {
|
|
||||||
if len(n.seriesMatchers) == 0 {
|
|
||||||
return initialSeries
|
|
||||||
}
|
|
||||||
|
|
||||||
finalSeries := make([]prom.Series, 0, len(initialSeries))
|
|
||||||
SeriesLoop:
|
|
||||||
for _, series := range initialSeries {
|
|
||||||
for _, matcher := range n.seriesMatchers {
|
|
||||||
if !matcher.Matches(series.Name) {
|
|
||||||
continue SeriesLoop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
finalSeries = append(finalSeries, series)
|
|
||||||
}
|
|
||||||
|
|
||||||
return finalSeries
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *metricNamer) QueryForSeries(series string, resource schema.GroupResource, namespace string, names ...string) (prom.Selector, error) {
|
|
||||||
var exprs []string
|
|
||||||
valuesByName := map[string][]string{}
|
|
||||||
|
|
||||||
if namespace != "" {
|
|
||||||
namespaceLbl, err := n.LabelForResource(nsGroupResource)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
exprs = append(exprs, prom.LabelEq(string(namespaceLbl), namespace))
|
|
||||||
valuesByName[string(namespaceLbl)] = []string{namespace}
|
|
||||||
}
|
|
||||||
|
|
||||||
resourceLbl, err := n.LabelForResource(resource)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
matcher := prom.LabelEq
|
|
||||||
targetValue := names[0]
|
|
||||||
if len(names) > 1 {
|
|
||||||
matcher = prom.LabelMatches
|
|
||||||
targetValue = strings.Join(names, "|")
|
|
||||||
}
|
|
||||||
exprs = append(exprs, matcher(string(resourceLbl), targetValue))
|
|
||||||
valuesByName[string(resourceLbl)] = names
|
|
||||||
|
|
||||||
args := queryTemplateArgs{
|
|
||||||
Series: series,
|
|
||||||
LabelMatchers: strings.Join(exprs, ","),
|
|
||||||
LabelValuesByName: valuesByName,
|
|
||||||
GroupBy: string(resourceLbl),
|
|
||||||
GroupBySlice: []string{string(resourceLbl)},
|
|
||||||
}
|
|
||||||
queryBuff := new(bytes.Buffer)
|
|
||||||
if err := n.metricsQueryTemplate.Execute(queryBuff, args); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if queryBuff.Len() == 0 {
|
|
||||||
return "", fmt.Errorf("empty query produced by metrics query template")
|
|
||||||
}
|
|
||||||
|
|
||||||
return prom.Selector(queryBuff.String()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *metricNamer) ResourcesForSeries(series prom.Series) ([]schema.GroupResource, bool) {
|
|
||||||
// use an updates map to avoid having to drop the read lock to update the cache
|
|
||||||
// until the end. Since we'll probably have few updates after the first run,
|
|
||||||
// this should mean that we rarely have to hold the write lock.
|
|
||||||
var resources []schema.GroupResource
|
|
||||||
updates := make(map[pmodel.LabelName]schema.GroupResource)
|
|
||||||
namespaced := false
|
|
||||||
|
|
||||||
// use an anon func to get the right defer behavior
|
|
||||||
func() {
|
|
||||||
n.labelResourceMu.RLock()
|
|
||||||
defer n.labelResourceMu.RUnlock()
|
|
||||||
|
|
||||||
for lbl := range series.Labels {
|
|
||||||
var groupRes schema.GroupResource
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
// check if we have an override
|
|
||||||
if groupRes, ok = n.labelToResource[lbl]; ok {
|
|
||||||
resources = append(resources, groupRes)
|
|
||||||
} else if groupRes, ok = updates[lbl]; ok {
|
|
||||||
resources = append(resources, groupRes)
|
|
||||||
} else if n.labelResExtractor != nil {
|
|
||||||
// if not, check if it matches the form we expect, and if so,
|
|
||||||
// convert to a group-resource.
|
|
||||||
if groupRes, ok = n.labelResExtractor.GroupResourceForLabel(lbl); ok {
|
|
||||||
info, _, err := provider.CustomMetricInfo{GroupResource: groupRes}.Normalized(n.mapper)
|
|
||||||
if err != nil {
|
|
||||||
// this is likely to show up for a lot of labels, so make it a verbose info log
|
|
||||||
glog.V(9).Infof("unable to normalize group-resource %s from label %q, skipping: %v", groupRes.String(), lbl, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
groupRes = info.GroupResource
|
|
||||||
resources = append(resources, groupRes)
|
|
||||||
updates[lbl] = groupRes
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if groupRes == nsGroupResource {
|
|
||||||
namespaced = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// update the cache for next time. This should only be called by discovery,
|
|
||||||
// so we don't really have to worry about the grap between read and write locks
|
|
||||||
// (plus, we don't care if someone else updates the cache first, since the results
|
|
||||||
// are necessarily the same, so at most we've done extra work).
|
|
||||||
if len(updates) > 0 {
|
|
||||||
n.labelResourceMu.Lock()
|
|
||||||
defer n.labelResourceMu.Unlock()
|
|
||||||
|
|
||||||
for lbl, groupRes := range updates {
|
|
||||||
n.labelToResource[lbl] = groupRes
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return resources, namespaced
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *metricNamer) LabelForResource(resource schema.GroupResource) (pmodel.LabelName, error) {
|
|
||||||
n.labelResourceMu.RLock()
|
|
||||||
// check if we have a cached copy or override
|
|
||||||
lbl, ok := n.resourceToLabel[resource]
|
|
||||||
n.labelResourceMu.RUnlock() // release before we call makeLabelForResource
|
|
||||||
if ok {
|
|
||||||
return lbl, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NB: we don't actually care about the gap between releasing read lock
|
|
||||||
// and acquiring the write lock -- if we do duplicate work sometimes, so be
|
|
||||||
// it, as long as we're correct.
|
|
||||||
|
|
||||||
// otherwise, use the template and save the result
|
|
||||||
lbl, err := n.makeLabelForResource(resource)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("unable to convert resource %s into label: %v", resource.String(), err)
|
|
||||||
}
|
|
||||||
return lbl, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeLabelForResource constructs a label name for the given resource, and saves the result.
|
|
||||||
// It must *not* be called under an existing lock.
|
|
||||||
func (n *metricNamer) makeLabelForResource(resource schema.GroupResource) (pmodel.LabelName, error) {
|
|
||||||
if n.labelTemplate == nil {
|
|
||||||
return "", fmt.Errorf("no generic resource label form specified for this metric")
|
|
||||||
}
|
|
||||||
buff := new(bytes.Buffer)
|
|
||||||
|
|
||||||
singularRes, err := n.mapper.ResourceSingularizer(resource.Resource)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("unable to singularize resource %s: %v", resource.String(), err)
|
|
||||||
}
|
|
||||||
convResource := schema.GroupResource{
|
|
||||||
Group: groupNameSanitizer.Replace(resource.Group),
|
|
||||||
Resource: singularRes,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := n.labelTemplate.Execute(buff, convResource); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if buff.Len() == 0 {
|
|
||||||
return "", fmt.Errorf("empty label produced by label template")
|
|
||||||
}
|
|
||||||
lbl := pmodel.LabelName(buff.String())
|
|
||||||
|
|
||||||
n.labelResourceMu.Lock()
|
|
||||||
defer n.labelResourceMu.Unlock()
|
|
||||||
|
|
||||||
n.resourceToLabel[resource] = lbl
|
|
||||||
n.labelToResource[lbl] = resource
|
|
||||||
return lbl, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *metricNamer) MetricNameForSeries(series prom.Series) (string, error) {
|
|
||||||
matches := n.nameMatches.FindStringSubmatchIndex(series.Name)
|
|
||||||
if matches == nil {
|
|
||||||
return "", fmt.Errorf("series name %q did not match expected pattern %q", series.Name, n.nameMatches.String())
|
|
||||||
}
|
|
||||||
outNameBytes := n.nameMatches.ExpandString(nil, n.nameAs, series.Name, matches)
|
|
||||||
return string(outNameBytes), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamersFromConfig produces a MetricNamer for each rule in the given config.
|
|
||||||
func NamersFromConfig(cfg *config.MetricsDiscoveryConfig, mapper apimeta.RESTMapper) ([]MetricNamer, error) {
|
|
||||||
namers := make([]MetricNamer, len(cfg.Rules))
|
|
||||||
|
|
||||||
for i, rule := range cfg.Rules {
|
|
||||||
var labelTemplate *template.Template
|
|
||||||
var labelResExtractor *labelGroupResExtractor
|
|
||||||
var err error
|
|
||||||
if rule.Resources.Template != "" {
|
|
||||||
labelTemplate, err = template.New("resource-label").Delims("<<", ">>").Parse(rule.Resources.Template)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to parse label template %q associated with series query %q: %v", rule.Resources.Template, rule.SeriesQuery, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
labelResExtractor, err = newLabelGroupResExtractor(labelTemplate)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to generate label format from template %q associated with series query %q: %v", rule.Resources.Template, rule.SeriesQuery, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
metricsQueryTemplate, err := template.New("metrics-query").Delims("<<", ">>").Parse(rule.MetricsQuery)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to parse metrics query template %q associated with series query %q: %v", rule.MetricsQuery, rule.SeriesQuery, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
seriesMatchers := make([]*reMatcher, len(rule.SeriesFilters))
|
|
||||||
for i, filterRaw := range rule.SeriesFilters {
|
|
||||||
matcher, err := newReMatcher(filterRaw)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to generate series name filter associated with series query %q: %v", rule.SeriesQuery, err)
|
|
||||||
}
|
|
||||||
seriesMatchers[i] = matcher
|
|
||||||
}
|
|
||||||
if rule.Name.Matches != "" {
|
|
||||||
matcher, err := newReMatcher(config.RegexFilter{Is: rule.Name.Matches})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to generate series name filter from name rules associated with series query %q: %v", rule.SeriesQuery, err)
|
|
||||||
}
|
|
||||||
seriesMatchers = append(seriesMatchers, matcher)
|
|
||||||
}
|
|
||||||
|
|
||||||
var nameMatches *regexp.Regexp
|
|
||||||
if rule.Name.Matches != "" {
|
|
||||||
nameMatches, err = regexp.Compile(rule.Name.Matches)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to compile series name match expression %q associated with series query %q: %v", rule.Name.Matches, rule.SeriesQuery, err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// this will always succeed
|
|
||||||
nameMatches = regexp.MustCompile(".*")
|
|
||||||
}
|
|
||||||
nameAs := rule.Name.As
|
|
||||||
if nameAs == "" {
|
|
||||||
// check if we have an obvious default
|
|
||||||
subexpNames := nameMatches.SubexpNames()
|
|
||||||
if len(subexpNames) == 1 {
|
|
||||||
// no capture groups, use the whole thing
|
|
||||||
nameAs = "$0"
|
|
||||||
} else if len(subexpNames) == 2 {
|
|
||||||
// one capture group, use that
|
|
||||||
nameAs = "$1"
|
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("must specify an 'as' value for name matcher %q associated with series query %q", rule.Name.Matches, rule.SeriesQuery)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
namer := &metricNamer{
|
|
||||||
seriesQuery: prom.Selector(rule.SeriesQuery),
|
|
||||||
labelTemplate: labelTemplate,
|
|
||||||
labelResExtractor: labelResExtractor,
|
|
||||||
metricsQueryTemplate: metricsQueryTemplate,
|
|
||||||
mapper: mapper,
|
|
||||||
nameMatches: nameMatches,
|
|
||||||
nameAs: nameAs,
|
|
||||||
seriesMatchers: seriesMatchers,
|
|
||||||
|
|
||||||
labelToResource: make(map[pmodel.LabelName]schema.GroupResource),
|
|
||||||
resourceToLabel: make(map[schema.GroupResource]pmodel.LabelName),
|
|
||||||
}
|
|
||||||
|
|
||||||
// invert the structure for consistency with the template
|
|
||||||
for lbl, groupRes := range rule.Resources.Overrides {
|
|
||||||
infoRaw := provider.CustomMetricInfo{
|
|
||||||
GroupResource: schema.GroupResource{
|
|
||||||
Group: groupRes.Group,
|
|
||||||
Resource: groupRes.Resource,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
info, _, err := infoRaw.Normalized(mapper)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to normalize group-resource %v: %v", groupRes, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
namer.labelToResource[pmodel.LabelName(lbl)] = info.GroupResource
|
|
||||||
namer.resourceToLabel[info.GroupResource] = pmodel.LabelName(lbl)
|
|
||||||
}
|
|
||||||
|
|
||||||
namers[i] = namer
|
|
||||||
}
|
|
||||||
|
|
||||||
return namers, nil
|
|
||||||
}
|
|
||||||
|
|
@ -19,25 +19,28 @@ package provider
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
"github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/provider"
|
|
||||||
pmodel "github.com/prometheus/common/model"
|
pmodel "github.com/prometheus/common/model"
|
||||||
|
|
||||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/dynamic"
|
"k8s.io/client-go/dynamic"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/metrics/pkg/apis/custom_metrics"
|
"k8s.io/metrics/pkg/apis/custom_metrics"
|
||||||
|
|
||||||
prom "github.com/directxman12/k8s-prometheus-adapter/pkg/client"
|
"sigs.k8s.io/custom-metrics-apiserver/pkg/provider"
|
||||||
|
"sigs.k8s.io/custom-metrics-apiserver/pkg/provider/helpers"
|
||||||
|
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
"sigs.k8s.io/prometheus-adapter/pkg/naming"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Runnable represents something that can be run until told to stop.
|
// Runnable represents something that can be run until told to stop.
|
||||||
|
|
@ -56,9 +59,10 @@ type prometheusProvider struct {
|
||||||
SeriesRegistry
|
SeriesRegistry
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPrometheusProvider(mapper apimeta.RESTMapper, kubeClient dynamic.Interface, promClient prom.Client, namers []MetricNamer, updateInterval time.Duration) (provider.CustomMetricsProvider, Runnable) {
|
func NewPrometheusProvider(mapper apimeta.RESTMapper, kubeClient dynamic.Interface, promClient prom.Client, namers []naming.MetricNamer, updateInterval time.Duration, maxAge time.Duration) (provider.CustomMetricsProvider, Runnable) {
|
||||||
lister := &cachingMetricsLister{
|
lister := &cachingMetricsLister{
|
||||||
updateInterval: updateInterval,
|
updateInterval: updateInterval,
|
||||||
|
maxAge: maxAge,
|
||||||
promClient: promClient,
|
promClient: promClient,
|
||||||
namers: namers,
|
namers: namers,
|
||||||
|
|
||||||
|
|
@ -76,89 +80,96 @@ func NewPrometheusProvider(mapper apimeta.RESTMapper, kubeClient dynamic.Interfa
|
||||||
}, lister
|
}, lister
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *prometheusProvider) metricFor(value pmodel.SampleValue, groupResource schema.GroupResource, namespace string, name string, metricName string) (*custom_metrics.MetricValue, error) {
|
func (p *prometheusProvider) metricFor(value pmodel.SampleValue, name types.NamespacedName, info provider.CustomMetricInfo, metricSelector labels.Selector) (*custom_metrics.MetricValue, error) {
|
||||||
kind, err := p.mapper.KindFor(groupResource.WithVersion(""))
|
ref, err := helpers.ReferenceFor(p.mapper, name, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &custom_metrics.MetricValue{
|
var q *resource.Quantity
|
||||||
DescribedObject: custom_metrics.ObjectReference{
|
if math.IsNaN(float64(value)) {
|
||||||
APIVersion: groupResource.Group + "/" + runtime.APIVersionInternal,
|
q = resource.NewQuantity(0, resource.DecimalSI)
|
||||||
Kind: kind.Kind,
|
} else {
|
||||||
Name: name,
|
q = resource.NewMilliQuantity(int64(value*1000.0), resource.DecimalSI)
|
||||||
Namespace: namespace,
|
}
|
||||||
|
|
||||||
|
metric := &custom_metrics.MetricValue{
|
||||||
|
DescribedObject: ref,
|
||||||
|
Metric: custom_metrics.MetricIdentifier{
|
||||||
|
Name: info.Metric,
|
||||||
},
|
},
|
||||||
MetricName: metricName,
|
// TODO(directxman12): use the right timestamp
|
||||||
Timestamp: metav1.Time{time.Now()},
|
Timestamp: metav1.Time{Time: time.Now()},
|
||||||
Value: *resource.NewMilliQuantity(int64(value*1000.0), resource.DecimalSI),
|
Value: *q,
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *prometheusProvider) metricsFor(valueSet pmodel.Vector, info provider.CustomMetricInfo, list runtime.Object) (*custom_metrics.MetricValueList, error) {
|
if !metricSelector.Empty() {
|
||||||
if !apimeta.IsListType(list) {
|
sel, err := metav1.ParseToLabelSelector(metricSelector.String())
|
||||||
return nil, apierr.NewInternalError(fmt.Errorf("result of label selector list operation was not a list"))
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
metric.Metric.Selector = sel
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return metric, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *prometheusProvider) metricsFor(valueSet pmodel.Vector, namespace string, names []string, info provider.CustomMetricInfo, metricSelector labels.Selector) (*custom_metrics.MetricValueList, error) {
|
||||||
values, found := p.MatchValuesToNames(info, valueSet)
|
values, found := p.MatchValuesToNames(info, valueSet)
|
||||||
if !found {
|
if !found {
|
||||||
return nil, provider.NewMetricNotFoundError(info.GroupResource, info.Metric)
|
return nil, provider.NewMetricNotFoundError(info.GroupResource, info.Metric)
|
||||||
}
|
}
|
||||||
res := []custom_metrics.MetricValue{}
|
res := []custom_metrics.MetricValue{}
|
||||||
|
|
||||||
err := apimeta.EachListItem(list, func(item runtime.Object) error {
|
for _, name := range names {
|
||||||
objUnstructured := item.(*unstructured.Unstructured)
|
if _, found := values[name]; !found {
|
||||||
objName := objUnstructured.GetName()
|
continue
|
||||||
if _, found := values[objName]; !found {
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
value, err := p.metricFor(values[objName], info.GroupResource, objUnstructured.GetNamespace(), objName, info.Metric)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
res = append(res, *value)
|
|
||||||
|
|
||||||
return nil
|
value, err := p.metricFor(values[name], types.NamespacedName{Namespace: namespace, Name: name}, info, metricSelector)
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
res = append(res, *value)
|
||||||
|
}
|
||||||
|
|
||||||
return &custom_metrics.MetricValueList{
|
return &custom_metrics.MetricValueList{
|
||||||
Items: res,
|
Items: res,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *prometheusProvider) buildQuery(info provider.CustomMetricInfo, namespace string, names ...string) (pmodel.Vector, error) {
|
func (p *prometheusProvider) buildQuery(ctx context.Context, info provider.CustomMetricInfo, namespace string, metricSelector labels.Selector, names ...string) (pmodel.Vector, error) {
|
||||||
query, found := p.QueryForMetric(info, namespace, names...)
|
query, found := p.QueryForMetric(info, namespace, metricSelector, names...)
|
||||||
if !found {
|
if !found {
|
||||||
return nil, provider.NewMetricNotFoundError(info.GroupResource, info.Metric)
|
return nil, provider.NewMetricNotFoundError(info.GroupResource, info.Metric)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: use an actual context
|
// TODO: use an actual context
|
||||||
queryResults, err := p.promClient.Query(context.TODO(), pmodel.Now(), query)
|
queryResults, err := p.promClient.Query(ctx, pmodel.Now(), query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("unable to fetch metrics from prometheus: %v", err)
|
klog.Errorf("unable to fetch metrics from prometheus: %v", err)
|
||||||
// don't leak implementation details to the user
|
// don't leak implementation details to the user
|
||||||
return nil, apierr.NewInternalError(fmt.Errorf("unable to fetch metrics"))
|
return nil, apierr.NewInternalError(fmt.Errorf("unable to fetch metrics"))
|
||||||
}
|
}
|
||||||
|
|
||||||
if queryResults.Type != pmodel.ValVector {
|
if queryResults.Type != pmodel.ValVector {
|
||||||
glog.Errorf("unexpected results from prometheus: expected %s, got %s on results %v", pmodel.ValVector, queryResults.Type, queryResults)
|
klog.Errorf("unexpected results from prometheus: expected %s, got %s on results %v", pmodel.ValVector, queryResults.Type, queryResults)
|
||||||
return nil, apierr.NewInternalError(fmt.Errorf("unable to fetch metrics"))
|
return nil, apierr.NewInternalError(fmt.Errorf("unable to fetch metrics"))
|
||||||
}
|
}
|
||||||
|
|
||||||
return *queryResults.Vector, nil
|
return *queryResults.Vector, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *prometheusProvider) getSingle(info provider.CustomMetricInfo, namespace, name string) (*custom_metrics.MetricValue, error) {
|
func (p *prometheusProvider) GetMetricByName(ctx context.Context, name types.NamespacedName, info provider.CustomMetricInfo, metricSelector labels.Selector) (*custom_metrics.MetricValue, error) {
|
||||||
queryResults, err := p.buildQuery(info, namespace, name)
|
// construct a query
|
||||||
|
queryResults, err := p.buildQuery(ctx, info, name.Namespace, metricSelector, name.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// associate the metrics
|
||||||
if len(queryResults) < 1 {
|
if len(queryResults) < 1 {
|
||||||
return nil, provider.NewMetricNotFoundForError(info.GroupResource, info.Metric, name)
|
return nil, provider.NewMetricNotFoundForError(info.GroupResource, info.Metric, name.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
namedValues, found := p.MatchValuesToNames(info, queryResults)
|
namedValues, found := p.MatchValuesToNames(info, queryResults)
|
||||||
|
|
@ -167,100 +178,36 @@ func (p *prometheusProvider) getSingle(info provider.CustomMetricInfo, namespace
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(namedValues) > 1 {
|
if len(namedValues) > 1 {
|
||||||
glog.V(2).Infof("Got more than one result (%v results) when fetching metric %s for %q, using the first one with a matching name...", len(queryResults), info.String(), name)
|
klog.V(2).Infof("Got more than one result (%v results) when fetching metric %s for %q, using the first one with a matching name...", len(queryResults), info.String(), name)
|
||||||
}
|
}
|
||||||
|
|
||||||
resultValue, nameFound := namedValues[name]
|
resultValue, nameFound := namedValues[name.Name]
|
||||||
if !nameFound {
|
if !nameFound {
|
||||||
glog.Errorf("None of the results returned by when fetching metric %s for %q matched the resource name", info.String(), name)
|
klog.Errorf("None of the results returned by when fetching metric %s for %q matched the resource name", info.String(), name)
|
||||||
return nil, provider.NewMetricNotFoundForError(info.GroupResource, info.Metric, name)
|
return nil, provider.NewMetricNotFoundForError(info.GroupResource, info.Metric, name.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
return p.metricFor(resultValue, info.GroupResource, "", name, info.Metric)
|
// return the resulting metric
|
||||||
|
return p.metricFor(resultValue, name, info, metricSelector)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *prometheusProvider) getMultiple(info provider.CustomMetricInfo, namespace string, selector labels.Selector) (*custom_metrics.MetricValueList, error) {
|
func (p *prometheusProvider) GetMetricBySelector(ctx context.Context, namespace string, selector labels.Selector, info provider.CustomMetricInfo, metricSelector labels.Selector) (*custom_metrics.MetricValueList, error) {
|
||||||
fullResources, err := p.mapper.ResourcesFor(info.GroupResource.WithVersion(""))
|
// fetch a list of relevant resource names
|
||||||
if err == nil && len(fullResources) == 0 {
|
resourceNames, err := helpers.ListObjectNames(p.mapper, p.kubeClient, namespace, selector, info)
|
||||||
err = fmt.Errorf("no fully versioned resources known for group-resource %v", info.GroupResource)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("unable to find preferred version to list matching resource names: %v", err)
|
klog.Errorf("unable to list matching resource names: %v", err)
|
||||||
// don't leak implementation details to the user
|
// don't leak implementation details to the user
|
||||||
return nil, apierr.NewInternalError(fmt.Errorf("unable to list matching resources"))
|
return nil, apierr.NewInternalError(fmt.Errorf("unable to list matching resources"))
|
||||||
}
|
}
|
||||||
var client dynamic.ResourceInterface
|
|
||||||
if namespace != "" {
|
|
||||||
client = p.kubeClient.Resource(fullResources[0]).Namespace(namespace)
|
|
||||||
} else {
|
|
||||||
client = p.kubeClient.Resource(fullResources[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
// actually list the objects matching the label selector
|
|
||||||
matchingObjectsRaw, err := client.List(metav1.ListOptions{LabelSelector: selector.String()})
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("unable to list matching resource names: %v", err)
|
|
||||||
// don't leak implementation details to the user
|
|
||||||
return nil, apierr.NewInternalError(fmt.Errorf("unable to list matching resources"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// make sure we have a list
|
|
||||||
if !apimeta.IsListType(matchingObjectsRaw) {
|
|
||||||
return nil, apierr.NewInternalError(fmt.Errorf("result of label selector list operation was not a list"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// convert a list of objects into the corresponding list of names
|
|
||||||
resourceNames := []string{}
|
|
||||||
err = apimeta.EachListItem(matchingObjectsRaw, func(item runtime.Object) error {
|
|
||||||
objName := item.(*unstructured.Unstructured).GetName()
|
|
||||||
resourceNames = append(resourceNames, objName)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// construct the actual query
|
// construct the actual query
|
||||||
queryResults, err := p.buildQuery(info, namespace, resourceNames...)
|
queryResults, err := p.buildQuery(ctx, info, namespace, metricSelector, resourceNames...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return p.metricsFor(queryResults, info, matchingObjectsRaw)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *prometheusProvider) GetRootScopedMetricByName(groupResource schema.GroupResource, name string, metricName string) (*custom_metrics.MetricValue, error) {
|
// return the resulting metrics
|
||||||
info := provider.CustomMetricInfo{
|
return p.metricsFor(queryResults, namespace, resourceNames, info, metricSelector)
|
||||||
GroupResource: groupResource,
|
|
||||||
Metric: metricName,
|
|
||||||
Namespaced: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.getSingle(info, "", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *prometheusProvider) GetRootScopedMetricBySelector(groupResource schema.GroupResource, selector labels.Selector, metricName string) (*custom_metrics.MetricValueList, error) {
|
|
||||||
info := provider.CustomMetricInfo{
|
|
||||||
GroupResource: groupResource,
|
|
||||||
Metric: metricName,
|
|
||||||
Namespaced: false,
|
|
||||||
}
|
|
||||||
return p.getMultiple(info, "", selector)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *prometheusProvider) GetNamespacedMetricByName(groupResource schema.GroupResource, namespace string, name string, metricName string) (*custom_metrics.MetricValue, error) {
|
|
||||||
info := provider.CustomMetricInfo{
|
|
||||||
GroupResource: groupResource,
|
|
||||||
Metric: metricName,
|
|
||||||
Namespaced: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.getSingle(info, namespace, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *prometheusProvider) GetNamespacedMetricBySelector(groupResource schema.GroupResource, namespace string, selector labels.Selector, metricName string) (*custom_metrics.MetricValueList, error) {
|
|
||||||
info := provider.CustomMetricInfo{
|
|
||||||
GroupResource: groupResource,
|
|
||||||
Metric: metricName,
|
|
||||||
Namespaced: true,
|
|
||||||
}
|
|
||||||
return p.getMultiple(info, namespace, selector)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type cachingMetricsLister struct {
|
type cachingMetricsLister struct {
|
||||||
|
|
@ -268,7 +215,8 @@ type cachingMetricsLister struct {
|
||||||
|
|
||||||
promClient prom.Client
|
promClient prom.Client
|
||||||
updateInterval time.Duration
|
updateInterval time.Duration
|
||||||
namers []MetricNamer
|
maxAge time.Duration
|
||||||
|
namers []naming.MetricNamer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *cachingMetricsLister) Run() {
|
func (l *cachingMetricsLister) Run() {
|
||||||
|
|
@ -289,7 +237,7 @@ type selectorSeries struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *cachingMetricsLister) updateMetrics() error {
|
func (l *cachingMetricsLister) updateMetrics() error {
|
||||||
startTime := pmodel.Now().Add(-1 * l.updateInterval)
|
startTime := pmodel.Now().Add(-1 * l.maxAge)
|
||||||
|
|
||||||
// don't do duplicate queries when it's just the matchers that change
|
// don't do duplicate queries when it's just the matchers that change
|
||||||
seriesCacheByQuery := make(map[prom.Selector][]prom.Series)
|
seriesCacheByQuery := make(map[prom.Selector][]prom.Series)
|
||||||
|
|
@ -308,7 +256,7 @@ func (l *cachingMetricsLister) updateMetrics() error {
|
||||||
}
|
}
|
||||||
selectors[sel] = struct{}{}
|
selectors[sel] = struct{}{}
|
||||||
go func() {
|
go func() {
|
||||||
series, err := l.promClient.Series(context.TODO(), pmodel.Interval{startTime, 0}, sel)
|
series, err := l.promClient.Series(context.TODO(), pmodel.Interval{Start: startTime, End: 0}, sel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs <- fmt.Errorf("unable to fetch metrics for query %q: %v", sel, err)
|
errs <- fmt.Errorf("unable to fetch metrics for query %q: %v", sel, err)
|
||||||
return
|
return
|
||||||
|
|
@ -341,7 +289,7 @@ func (l *cachingMetricsLister) updateMetrics() error {
|
||||||
newSeries[i] = namer.FilterSeries(series)
|
newSeries[i] = namer.FilterSeries(series)
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(10).Infof("Set available metric list from Prometheus to: %v", newSeries)
|
klog.V(10).Infof("Set available metric list from Prometheus to: %v", newSeries)
|
||||||
|
|
||||||
return l.SetSeries(newSeries, l.namers)
|
return l.SetSeries(newSeries, l.namers)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -14,8 +14,16 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// +k8s:deepcopy-gen=package
|
package provider_test
|
||||||
// +k8s:openapi-gen=true
|
|
||||||
|
|
||||||
// +groupName=scheduling.k8s.io
|
import (
|
||||||
package v1beta1 // import "k8s.io/api/scheduling/v1beta1"
|
"testing"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProvider(t *testing.T) {
|
||||||
|
RegisterFailHandler(Fail)
|
||||||
|
RunSpecs(t, "Custom Metrics Provider Suite")
|
||||||
|
}
|
||||||
|
|
@ -17,93 +17,43 @@ limitations under the License.
|
||||||
package provider
|
package provider
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"sort"
|
|
||||||
"testing"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/provider"
|
. "github.com/onsi/ginkgo"
|
||||||
"github.com/stretchr/testify/assert"
|
. "github.com/onsi/gomega"
|
||||||
"github.com/stretchr/testify/require"
|
pmodel "github.com/prometheus/common/model"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
fakedyn "k8s.io/client-go/dynamic/fake"
|
fakedyn "k8s.io/client-go/dynamic/fake"
|
||||||
|
|
||||||
config "github.com/directxman12/k8s-prometheus-adapter/cmd/config-gen/utils"
|
"sigs.k8s.io/custom-metrics-apiserver/pkg/provider"
|
||||||
prom "github.com/directxman12/k8s-prometheus-adapter/pkg/client"
|
|
||||||
pmodel "github.com/prometheus/common/model"
|
config "sigs.k8s.io/prometheus-adapter/cmd/config-gen/utils"
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
fakeprom "sigs.k8s.io/prometheus-adapter/pkg/client/fake"
|
||||||
|
"sigs.k8s.io/prometheus-adapter/pkg/naming"
|
||||||
)
|
)
|
||||||
|
|
||||||
const fakeProviderUpdateInterval = 2 * time.Second
|
const fakeProviderUpdateInterval = 2 * time.Second
|
||||||
|
const fakeProviderStartDuration = 2 * time.Second
|
||||||
|
|
||||||
// fakePromClient is a fake instance of prom.Client
|
func setupPrometheusProvider() (provider.CustomMetricsProvider, *fakeprom.FakePrometheusClient) {
|
||||||
type fakePromClient struct {
|
fakeProm := &fakeprom.FakePrometheusClient{}
|
||||||
// acceptibleInterval is the interval in which to return queries
|
|
||||||
acceptibleInterval pmodel.Interval
|
|
||||||
// errQueries are queries that result in an error (whether from Query or Series)
|
|
||||||
errQueries map[prom.Selector]error
|
|
||||||
// series are non-error responses to partial Series calls
|
|
||||||
series map[prom.Selector][]prom.Series
|
|
||||||
// queryResults are non-error responses to Query
|
|
||||||
queryResults map[prom.Selector]prom.QueryResult
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakePromClient) Series(_ context.Context, interval pmodel.Interval, selectors ...prom.Selector) ([]prom.Series, error) {
|
|
||||||
if (interval.Start != 0 && interval.Start < c.acceptibleInterval.Start) || (interval.End != 0 && interval.End > c.acceptibleInterval.End) {
|
|
||||||
return nil, fmt.Errorf("interval [%v, %v] for query is outside range [%v, %v]", interval.Start, interval.End, c.acceptibleInterval.Start, c.acceptibleInterval.End)
|
|
||||||
}
|
|
||||||
res := []prom.Series{}
|
|
||||||
for _, sel := range selectors {
|
|
||||||
if err, found := c.errQueries[sel]; found {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if series, found := c.series[sel]; found {
|
|
||||||
res = append(res, series...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *fakePromClient) Query(_ context.Context, t pmodel.Time, query prom.Selector) (prom.QueryResult, error) {
|
|
||||||
if t < c.acceptibleInterval.Start || t > c.acceptibleInterval.End {
|
|
||||||
return prom.QueryResult{}, fmt.Errorf("time %v for query is outside range [%v, %v]", t, c.acceptibleInterval.Start, c.acceptibleInterval.End)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err, found := c.errQueries[query]; found {
|
|
||||||
return prom.QueryResult{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if res, found := c.queryResults[query]; found {
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return prom.QueryResult{
|
|
||||||
Type: pmodel.ValVector,
|
|
||||||
Vector: &pmodel.Vector{},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
func (c *fakePromClient) QueryRange(_ context.Context, r prom.Range, query prom.Selector) (prom.QueryResult, error) {
|
|
||||||
return prom.QueryResult{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func setupPrometheusProvider(t *testing.T) (provider.CustomMetricsProvider, *fakePromClient) {
|
|
||||||
fakeProm := &fakePromClient{}
|
|
||||||
fakeKubeClient := &fakedyn.FakeDynamicClient{}
|
fakeKubeClient := &fakedyn.FakeDynamicClient{}
|
||||||
|
|
||||||
cfg := config.DefaultConfig(1*time.Minute, "")
|
cfg := config.DefaultConfig(1*time.Minute, "")
|
||||||
namers, err := NamersFromConfig(cfg, restMapper())
|
namers, err := naming.NamersFromConfig(cfg.Rules, restMapper())
|
||||||
require.NoError(t, err)
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
prov, _ := NewPrometheusProvider(restMapper(), fakeKubeClient, fakeProm, namers, fakeProviderUpdateInterval)
|
prov, _ := NewPrometheusProvider(restMapper(), fakeKubeClient, fakeProm, namers, fakeProviderUpdateInterval, fakeProviderStartDuration)
|
||||||
|
|
||||||
containerSel := prom.MatchSeries("", prom.NameMatches("^container_.*"), prom.LabelNeq("container_name", "POD"), prom.LabelNeq("namespace", ""), prom.LabelNeq("pod_name", ""))
|
containerSel := prom.MatchSeries("", prom.NameMatches("^container_.*"), prom.LabelNeq("container", "POD"), prom.LabelNeq("namespace", ""), prom.LabelNeq("pod", ""))
|
||||||
namespacedSel := prom.MatchSeries("", prom.LabelNeq("namespace", ""), prom.NameNotMatches("^container_.*"))
|
namespacedSel := prom.MatchSeries("", prom.LabelNeq("namespace", ""), prom.NameNotMatches("^container_.*"))
|
||||||
fakeProm.series = map[prom.Selector][]prom.Series{
|
fakeProm.SeriesResults = map[prom.Selector][]prom.Series{
|
||||||
containerSel: {
|
containerSel: {
|
||||||
{
|
{
|
||||||
Name: "container_some_usage",
|
Name: "container_some_usage",
|
||||||
Labels: pmodel.LabelSet{"pod_name": "somepod", "namespace": "somens", "container_name": "somecont"},
|
Labels: pmodel.LabelSet{"pod": "somepod", "namespace": "somens", "container": "somecont"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
namespacedSel: {
|
namespacedSel: {
|
||||||
|
|
@ -129,39 +79,35 @@ func setupPrometheusProvider(t *testing.T) (provider.CustomMetricsProvider, *fak
|
||||||
return prov, fakeProm
|
return prov, fakeProm
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestListAllMetrics(t *testing.T) {
|
var _ = Describe("Custom Metrics Provider", func() {
|
||||||
// setup
|
It("should be able to list all metrics", func() {
|
||||||
prov, fakeProm := setupPrometheusProvider(t)
|
By("setting up the provider")
|
||||||
|
prov, fakeProm := setupPrometheusProvider()
|
||||||
|
|
||||||
// assume we have no updates
|
By("ensuring that no metrics are present before we start listing")
|
||||||
require.Len(t, prov.ListAllMetrics(), 0, "assume: should have no metrics updates at the start")
|
Expect(prov.ListAllMetrics()).To(BeEmpty())
|
||||||
|
|
||||||
// set the acceptible interval (now until the next update, with a bit of wiggle room)
|
By("setting the acceptable interval to now until the next update, with a bit of wiggle room")
|
||||||
startTime := pmodel.Now().Add(-1*fakeProviderUpdateInterval - fakeProviderUpdateInterval/10)
|
startTime := pmodel.Now().Add(-1*fakeProviderUpdateInterval - fakeProviderUpdateInterval/10)
|
||||||
fakeProm.acceptibleInterval = pmodel.Interval{Start: startTime, End: 0}
|
fakeProm.AcceptableInterval = pmodel.Interval{Start: startTime, End: 0}
|
||||||
|
|
||||||
// update the metrics (without actually calling RunUntil, so we can avoid timing issues)
|
By("updating the list of available metrics")
|
||||||
|
// don't call RunUntil to avoid timing issue
|
||||||
lister := prov.(*prometheusProvider).SeriesRegistry.(*cachingMetricsLister)
|
lister := prov.(*prometheusProvider).SeriesRegistry.(*cachingMetricsLister)
|
||||||
require.NoError(t, lister.updateMetrics())
|
Expect(lister.updateMetrics()).To(Succeed())
|
||||||
|
|
||||||
// list/sort the metrics
|
By("listing all metrics, and checking that they contain the expected results")
|
||||||
actualMetrics := prov.ListAllMetrics()
|
Expect(prov.ListAllMetrics()).To(ConsistOf(
|
||||||
sort.Sort(metricInfoSorter(actualMetrics))
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "services"}, Namespaced: true, Metric: "ingress_hits"},
|
||||||
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Group: "extensions", Resource: "ingresses"}, Namespaced: true, Metric: "ingress_hits"},
|
||||||
expectedMetrics := []provider.CustomMetricInfo{
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "pods"}, Namespaced: true, Metric: "ingress_hits"},
|
||||||
{schema.GroupResource{Resource: "services"}, true, "ingress_hits"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "namespaces"}, Namespaced: false, Metric: "ingress_hits"},
|
||||||
{schema.GroupResource{Group: "extensions", Resource: "ingresses"}, true, "ingress_hits"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "services"}, Namespaced: true, Metric: "service_proxy_packets"},
|
||||||
{schema.GroupResource{Resource: "pods"}, true, "ingress_hits"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "namespaces"}, Namespaced: false, Metric: "service_proxy_packets"},
|
||||||
{schema.GroupResource{Resource: "namespaces"}, false, "ingress_hits"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Group: "extensions", Resource: "deployments"}, Namespaced: true, Metric: "work_queue_wait"},
|
||||||
{schema.GroupResource{Resource: "services"}, true, "service_proxy_packets"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "namespaces"}, Namespaced: false, Metric: "work_queue_wait"},
|
||||||
{schema.GroupResource{Resource: "namespaces"}, false, "service_proxy_packets"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "namespaces"}, Namespaced: false, Metric: "some_usage"},
|
||||||
{schema.GroupResource{Group: "extensions", Resource: "deployments"}, true, "work_queue_wait"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "pods"}, Namespaced: true, Metric: "some_usage"},
|
||||||
{schema.GroupResource{Resource: "namespaces"}, false, "work_queue_wait"},
|
))
|
||||||
{schema.GroupResource{Resource: "namespaces"}, false, "some_usage"},
|
})
|
||||||
{schema.GroupResource{Resource: "pods"}, true, "some_usage"},
|
})
|
||||||
}
|
|
||||||
sort.Sort(metricInfoSorter(expectedMetrics))
|
|
||||||
|
|
||||||
// assert that we got what we expected
|
|
||||||
assert.Equal(t, expectedMetrics, actualMetrics)
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -20,12 +20,16 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/provider"
|
|
||||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
|
||||||
|
|
||||||
prom "github.com/directxman12/k8s-prometheus-adapter/pkg/client"
|
|
||||||
"github.com/golang/glog"
|
|
||||||
pmodel "github.com/prometheus/common/model"
|
pmodel "github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
|
"sigs.k8s.io/custom-metrics-apiserver/pkg/provider"
|
||||||
|
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
"sigs.k8s.io/prometheus-adapter/pkg/naming"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NB: container metrics sourced from cAdvisor don't consistently follow naming conventions,
|
// NB: container metrics sourced from cAdvisor don't consistently follow naming conventions,
|
||||||
|
|
@ -45,12 +49,12 @@ const (
|
||||||
type SeriesRegistry interface {
|
type SeriesRegistry interface {
|
||||||
// SetSeries replaces the known series in this registry.
|
// SetSeries replaces the known series in this registry.
|
||||||
// Each slice in series should correspond to a MetricNamer in namers.
|
// Each slice in series should correspond to a MetricNamer in namers.
|
||||||
SetSeries(series [][]prom.Series, namers []MetricNamer) error
|
SetSeries(series [][]prom.Series, namers []naming.MetricNamer) error
|
||||||
// ListAllMetrics lists all metrics known to this registry
|
// ListAllMetrics lists all metrics known to this registry
|
||||||
ListAllMetrics() []provider.CustomMetricInfo
|
ListAllMetrics() []provider.CustomMetricInfo
|
||||||
// SeriesForMetric looks up the minimum required series information to make a query for the given metric
|
// SeriesForMetric looks up the minimum required series information to make a query for the given metric
|
||||||
// against the given resource (namespace may be empty for non-namespaced resources)
|
// against the given resource (namespace may be empty for non-namespaced resources)
|
||||||
QueryForMetric(info provider.CustomMetricInfo, namespace string, resourceNames ...string) (query prom.Selector, found bool)
|
QueryForMetric(info provider.CustomMetricInfo, namespace string, metricSelector labels.Selector, resourceNames ...string) (query prom.Selector, found bool)
|
||||||
// MatchValuesToNames matches result values to resource names for the given metric and value set
|
// MatchValuesToNames matches result values to resource names for the given metric and value set
|
||||||
MatchValuesToNames(metricInfo provider.CustomMetricInfo, values pmodel.Vector) (matchedValues map[string]pmodel.SampleValue, found bool)
|
MatchValuesToNames(metricInfo provider.CustomMetricInfo, values pmodel.Vector) (matchedValues map[string]pmodel.SampleValue, found bool)
|
||||||
}
|
}
|
||||||
|
|
@ -60,7 +64,7 @@ type seriesInfo struct {
|
||||||
seriesName string
|
seriesName string
|
||||||
|
|
||||||
// namer is the MetricNamer used to name this series
|
// namer is the MetricNamer used to name this series
|
||||||
namer MetricNamer
|
namer naming.MetricNamer
|
||||||
}
|
}
|
||||||
|
|
||||||
// overridableSeriesRegistry is a basic SeriesRegistry
|
// overridableSeriesRegistry is a basic SeriesRegistry
|
||||||
|
|
@ -75,7 +79,7 @@ type basicSeriesRegistry struct {
|
||||||
mapper apimeta.RESTMapper
|
mapper apimeta.RESTMapper
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *basicSeriesRegistry) SetSeries(newSeriesSlices [][]prom.Series, namers []MetricNamer) error {
|
func (r *basicSeriesRegistry) SetSeries(newSeriesSlices [][]prom.Series, namers []naming.MetricNamer) error {
|
||||||
if len(newSeriesSlices) != len(namers) {
|
if len(newSeriesSlices) != len(namers) {
|
||||||
return fmt.Errorf("need one set of series per namer")
|
return fmt.Errorf("need one set of series per namer")
|
||||||
}
|
}
|
||||||
|
|
@ -88,7 +92,7 @@ func (r *basicSeriesRegistry) SetSeries(newSeriesSlices [][]prom.Series, namers
|
||||||
resources, namespaced := namer.ResourcesForSeries(series)
|
resources, namespaced := namer.ResourcesForSeries(series)
|
||||||
name, err := namer.MetricNameForSeries(series)
|
name, err := namer.MetricNameForSeries(series)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("unable to name series %q, skipping: %v", series.String(), err)
|
klog.Errorf("unable to name series %q, skipping: %v", series.String(), err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, resource := range resources {
|
for _, resource := range resources {
|
||||||
|
|
@ -98,8 +102,8 @@ func (r *basicSeriesRegistry) SetSeries(newSeriesSlices [][]prom.Series, namers
|
||||||
Metric: name,
|
Metric: name,
|
||||||
}
|
}
|
||||||
|
|
||||||
// namespace metrics aren't counted as namespaced
|
// some metrics aren't counted as namespaced
|
||||||
if resource == nsGroupResource {
|
if resource == naming.NsGroupResource || resource == naming.NodeGroupResource || resource == naming.PVGroupResource {
|
||||||
info.Namespaced = false
|
info.Namespaced = false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -134,30 +138,30 @@ func (r *basicSeriesRegistry) ListAllMetrics() []provider.CustomMetricInfo {
|
||||||
return r.metrics
|
return r.metrics
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *basicSeriesRegistry) QueryForMetric(metricInfo provider.CustomMetricInfo, namespace string, resourceNames ...string) (prom.Selector, bool) {
|
func (r *basicSeriesRegistry) QueryForMetric(metricInfo provider.CustomMetricInfo, namespace string, metricSelector labels.Selector, resourceNames ...string) (prom.Selector, bool) {
|
||||||
r.mu.RLock()
|
r.mu.RLock()
|
||||||
defer r.mu.RUnlock()
|
defer r.mu.RUnlock()
|
||||||
|
|
||||||
if len(resourceNames) == 0 {
|
if len(resourceNames) == 0 {
|
||||||
glog.Errorf("no resource names requested while producing a query for metric %s", metricInfo.String())
|
klog.Errorf("no resource names requested while producing a query for metric %s", metricInfo.String())
|
||||||
return "", false
|
return "", false
|
||||||
}
|
}
|
||||||
|
|
||||||
metricInfo, _, err := metricInfo.Normalized(r.mapper)
|
metricInfo, _, err := metricInfo.Normalized(r.mapper)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("unable to normalize group resource while producing a query: %v", err)
|
klog.Errorf("unable to normalize group resource while producing a query: %v", err)
|
||||||
return "", false
|
return "", false
|
||||||
}
|
}
|
||||||
|
|
||||||
info, infoFound := r.info[metricInfo]
|
info, infoFound := r.info[metricInfo]
|
||||||
if !infoFound {
|
if !infoFound {
|
||||||
glog.V(10).Infof("metric %v not registered", metricInfo)
|
klog.V(10).Infof("metric %v not registered", metricInfo)
|
||||||
return "", false
|
return "", false
|
||||||
}
|
}
|
||||||
|
|
||||||
query, err := info.namer.QueryForSeries(info.seriesName, metricInfo.GroupResource, namespace, resourceNames...)
|
query, err := info.namer.QueryForSeries(info.seriesName, metricInfo.GroupResource, namespace, metricSelector, resourceNames...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("unable to construct query for metric %s: %v", metricInfo.String(), err)
|
klog.Errorf("unable to construct query for metric %s: %v", metricInfo.String(), err)
|
||||||
return "", false
|
return "", false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -170,7 +174,7 @@ func (r *basicSeriesRegistry) MatchValuesToNames(metricInfo provider.CustomMetri
|
||||||
|
|
||||||
metricInfo, _, err := metricInfo.Normalized(r.mapper)
|
metricInfo, _, err := metricInfo.Normalized(r.mapper)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("unable to normalize group resource while matching values to names: %v", err)
|
klog.Errorf("unable to normalize group resource while matching values to names: %v", err)
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -181,7 +185,7 @@ func (r *basicSeriesRegistry) MatchValuesToNames(metricInfo provider.CustomMetri
|
||||||
|
|
||||||
resourceLbl, err := info.namer.LabelForResource(metricInfo.GroupResource)
|
resourceLbl, err := info.namer.LabelForResource(metricInfo.GroupResource)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("unable to construct resource label for metric %s: %v", metricInfo.String(), err)
|
klog.Errorf("unable to construct resource label for metric %s: %v", metricInfo.String(), err)
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -17,21 +17,26 @@ limitations under the License.
|
||||||
package provider
|
package provider
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sort"
|
"fmt"
|
||||||
"testing"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/provider"
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
pmodel "github.com/prometheus/common/model"
|
pmodel "github.com/prometheus/common/model"
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
coreapi "k8s.io/api/core/v1"
|
coreapi "k8s.io/api/core/v1"
|
||||||
extapi "k8s.io/api/extensions/v1beta1"
|
extapi "k8s.io/api/extensions/v1beta1"
|
||||||
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/apimachinery/pkg/selection"
|
||||||
|
|
||||||
config "github.com/directxman12/k8s-prometheus-adapter/cmd/config-gen/utils"
|
"sigs.k8s.io/custom-metrics-apiserver/pkg/provider"
|
||||||
prom "github.com/directxman12/k8s-prometheus-adapter/pkg/client"
|
|
||||||
|
config "sigs.k8s.io/prometheus-adapter/cmd/config-gen/utils"
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
"sigs.k8s.io/prometheus-adapter/pkg/naming"
|
||||||
)
|
)
|
||||||
|
|
||||||
// restMapper creates a RESTMapper with just the types we need for
|
// restMapper creates a RESTMapper with just the types we need for
|
||||||
|
|
@ -51,10 +56,10 @@ func restMapper() apimeta.RESTMapper {
|
||||||
return mapper
|
return mapper
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupMetricNamer(t testing.TB) []MetricNamer {
|
func setupMetricNamer() []naming.MetricNamer {
|
||||||
cfg := config.DefaultConfig(1*time.Minute, "kube_")
|
cfg := config.DefaultConfig(1*time.Minute, "kube_")
|
||||||
namers, err := NamersFromConfig(cfg, restMapper())
|
namers, err := naming.NamersFromConfig(cfg.Rules, restMapper())
|
||||||
require.NoError(t, err)
|
Expect(err).NotTo(HaveOccurred())
|
||||||
return namers
|
return namers
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -63,23 +68,23 @@ var seriesRegistryTestSeries = [][]prom.Series{
|
||||||
{
|
{
|
||||||
{
|
{
|
||||||
Name: "container_some_time_seconds_total",
|
Name: "container_some_time_seconds_total",
|
||||||
Labels: pmodel.LabelSet{"pod_name": "somepod", "namespace": "somens", "container_name": "somecont"},
|
Labels: pmodel.LabelSet{"pod": "somepod", "namespace": "somens", "container": "somecont"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
{
|
{
|
||||||
Name: "container_some_count_total",
|
Name: "container_some_count_total",
|
||||||
Labels: pmodel.LabelSet{"pod_name": "somepod", "namespace": "somens", "container_name": "somecont"},
|
Labels: pmodel.LabelSet{"pod": "somepod", "namespace": "somens", "container": "somecont"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
{
|
{
|
||||||
Name: "container_some_usage",
|
Name: "container_some_usage",
|
||||||
Labels: pmodel.LabelSet{"pod_name": "somepod", "namespace": "somens", "container_name": "somecont"},
|
Labels: pmodel.LabelSet{"pod": "somepod", "namespace": "somens", "container": "somecont"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// guage metrics
|
// gauge metrics
|
||||||
{
|
{
|
||||||
Name: "node_gigawatts",
|
Name: "node_gigawatts",
|
||||||
Labels: pmodel.LabelSet{"kube_node": "somenode"},
|
Labels: pmodel.LabelSet{"kube_node": "somenode"},
|
||||||
|
|
@ -117,204 +122,183 @@ var seriesRegistryTestSeries = [][]prom.Series{
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSeriesRegistry(t *testing.T) {
|
type regTestCase struct {
|
||||||
assert := assert.New(t)
|
|
||||||
require := require.New(t)
|
|
||||||
|
|
||||||
namers := setupMetricNamer(t)
|
|
||||||
registry := &basicSeriesRegistry{
|
|
||||||
mapper: restMapper(),
|
|
||||||
}
|
|
||||||
|
|
||||||
// set up the registry
|
|
||||||
require.NoError(registry.SetSeries(seriesRegistryTestSeries, namers))
|
|
||||||
|
|
||||||
// make sure each metric got registered and can form queries
|
|
||||||
testCases := []struct {
|
|
||||||
title string
|
title string
|
||||||
info provider.CustomMetricInfo
|
info provider.CustomMetricInfo
|
||||||
namespace string
|
namespace string
|
||||||
resourceNames []string
|
resourceNames []string
|
||||||
|
metricSelector labels.Selector
|
||||||
|
|
||||||
expectedQuery string
|
expectedQuery string
|
||||||
}{
|
}
|
||||||
|
|
||||||
|
func mustNewLabelRequirement(key string, op selection.Operator, vals []string) *labels.Requirement {
|
||||||
|
req, err := labels.NewRequirement(key, op, vals)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = Describe("Series Registry", func() {
|
||||||
|
var (
|
||||||
|
registry *basicSeriesRegistry
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
namers := setupMetricNamer()
|
||||||
|
registry = &basicSeriesRegistry{
|
||||||
|
mapper: restMapper(),
|
||||||
|
}
|
||||||
|
Expect(registry.SetSeries(seriesRegistryTestSeries, namers)).To(Succeed())
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("with the default configuration rules", func() {
|
||||||
|
// make sure each metric got registered and can form queries
|
||||||
|
testCases := []regTestCase{
|
||||||
// container metrics
|
// container metrics
|
||||||
{
|
{
|
||||||
title: "container metrics gauge / multiple resource names",
|
title: "container metrics gauge / multiple resource names",
|
||||||
info: provider.CustomMetricInfo{schema.GroupResource{Resource: "pods"}, true, "some_usage"},
|
info: provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "pods"}, Namespaced: true, Metric: "some_usage"},
|
||||||
namespace: "somens",
|
namespace: "somens",
|
||||||
resourceNames: []string{"somepod1", "somepod2"},
|
resourceNames: []string{"somepod1", "somepod2"},
|
||||||
|
metricSelector: labels.Everything(),
|
||||||
|
|
||||||
expectedQuery: "sum(container_some_usage{namespace=\"somens\",pod_name=~\"somepod1|somepod2\",container_name!=\"POD\"}) by (pod_name)",
|
expectedQuery: "sum(container_some_usage{namespace=\"somens\",pod=~\"somepod1|somepod2\",container!=\"POD\"}) by (pod)",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
title: "container metrics counter",
|
title: "container metrics counter",
|
||||||
info: provider.CustomMetricInfo{schema.GroupResource{Resource: "pods"}, true, "some_count"},
|
info: provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "pods"}, Namespaced: true, Metric: "some_count"},
|
||||||
namespace: "somens",
|
namespace: "somens",
|
||||||
resourceNames: []string{"somepod1", "somepod2"},
|
resourceNames: []string{"somepod1", "somepod2"},
|
||||||
|
metricSelector: labels.Everything(),
|
||||||
|
|
||||||
expectedQuery: "sum(rate(container_some_count_total{namespace=\"somens\",pod_name=~\"somepod1|somepod2\",container_name!=\"POD\"}[1m])) by (pod_name)",
|
expectedQuery: "sum(rate(container_some_count_total{namespace=\"somens\",pod=~\"somepod1|somepod2\",container!=\"POD\"}[1m])) by (pod)",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
title: "container metrics seconds counter",
|
title: "container metrics seconds counter",
|
||||||
info: provider.CustomMetricInfo{schema.GroupResource{Resource: "pods"}, true, "some_time"},
|
info: provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "pods"}, Namespaced: true, Metric: "some_time"},
|
||||||
namespace: "somens",
|
namespace: "somens",
|
||||||
resourceNames: []string{"somepod1", "somepod2"},
|
resourceNames: []string{"somepod1", "somepod2"},
|
||||||
|
metricSelector: labels.Everything(),
|
||||||
|
|
||||||
expectedQuery: "sum(rate(container_some_time_seconds_total{namespace=\"somens\",pod_name=~\"somepod1|somepod2\",container_name!=\"POD\"}[1m])) by (pod_name)",
|
expectedQuery: "sum(rate(container_some_time_seconds_total{namespace=\"somens\",pod=~\"somepod1|somepod2\",container!=\"POD\"}[1m])) by (pod)",
|
||||||
},
|
},
|
||||||
// namespaced metrics
|
// namespaced metrics
|
||||||
{
|
{
|
||||||
title: "namespaced metrics counter / multidimensional (service)",
|
title: "namespaced metrics counter / multidimensional (service)",
|
||||||
info: provider.CustomMetricInfo{schema.GroupResource{Resource: "service"}, true, "ingress_hits"},
|
info: provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "service"}, Namespaced: true, Metric: "ingress_hits"},
|
||||||
namespace: "somens",
|
namespace: "somens",
|
||||||
resourceNames: []string{"somesvc"},
|
resourceNames: []string{"somesvc"},
|
||||||
|
metricSelector: labels.Everything(),
|
||||||
|
|
||||||
expectedQuery: "sum(rate(ingress_hits_total{kube_namespace=\"somens\",kube_service=\"somesvc\"}[1m])) by (kube_service)",
|
expectedQuery: "sum(rate(ingress_hits_total{kube_namespace=\"somens\",kube_service=\"somesvc\"}[1m])) by (kube_service)",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
title: "namespaced metrics counter / multidimensional (service) / selection using labels",
|
||||||
|
info: provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "service"}, Namespaced: true, Metric: "ingress_hits"},
|
||||||
|
namespace: "somens",
|
||||||
|
resourceNames: []string{"somesvc"},
|
||||||
|
metricSelector: labels.NewSelector().Add(
|
||||||
|
*mustNewLabelRequirement("param1", selection.Equals, []string{"value1"}),
|
||||||
|
),
|
||||||
|
expectedQuery: "sum(rate(ingress_hits_total{param1=\"value1\",kube_namespace=\"somens\",kube_service=\"somesvc\"}[1m])) by (kube_service)",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
title: "namespaced metrics counter / multidimensional (ingress)",
|
title: "namespaced metrics counter / multidimensional (ingress)",
|
||||||
info: provider.CustomMetricInfo{schema.GroupResource{Group: "extensions", Resource: "ingress"}, true, "ingress_hits"},
|
info: provider.CustomMetricInfo{GroupResource: schema.GroupResource{Group: "extensions", Resource: "ingress"}, Namespaced: true, Metric: "ingress_hits"},
|
||||||
namespace: "somens",
|
namespace: "somens",
|
||||||
resourceNames: []string{"someingress"},
|
resourceNames: []string{"someingress"},
|
||||||
|
metricSelector: labels.Everything(),
|
||||||
|
|
||||||
expectedQuery: "sum(rate(ingress_hits_total{kube_namespace=\"somens\",kube_ingress=\"someingress\"}[1m])) by (kube_ingress)",
|
expectedQuery: "sum(rate(ingress_hits_total{kube_namespace=\"somens\",kube_ingress=\"someingress\"}[1m])) by (kube_ingress)",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
title: "namespaced metrics counter / multidimensional (pod)",
|
title: "namespaced metrics counter / multidimensional (pod)",
|
||||||
info: provider.CustomMetricInfo{schema.GroupResource{Resource: "pod"}, true, "ingress_hits"},
|
info: provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "pod"}, Namespaced: true, Metric: "ingress_hits"},
|
||||||
namespace: "somens",
|
namespace: "somens",
|
||||||
resourceNames: []string{"somepod"},
|
resourceNames: []string{"somepod"},
|
||||||
|
metricSelector: labels.Everything(),
|
||||||
|
|
||||||
expectedQuery: "sum(rate(ingress_hits_total{kube_namespace=\"somens\",kube_pod=\"somepod\"}[1m])) by (kube_pod)",
|
expectedQuery: "sum(rate(ingress_hits_total{kube_namespace=\"somens\",kube_pod=\"somepod\"}[1m])) by (kube_pod)",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
title: "namespaced metrics gauge",
|
title: "namespaced metrics gauge",
|
||||||
info: provider.CustomMetricInfo{schema.GroupResource{Resource: "service"}, true, "service_proxy_packets"},
|
info: provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "service"}, Namespaced: true, Metric: "service_proxy_packets"},
|
||||||
namespace: "somens",
|
namespace: "somens",
|
||||||
resourceNames: []string{"somesvc"},
|
resourceNames: []string{"somesvc"},
|
||||||
|
metricSelector: labels.Everything(),
|
||||||
|
|
||||||
expectedQuery: "sum(service_proxy_packets{kube_namespace=\"somens\",kube_service=\"somesvc\"}) by (kube_service)",
|
expectedQuery: "sum(service_proxy_packets{kube_namespace=\"somens\",kube_service=\"somesvc\"}) by (kube_service)",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
title: "namespaced metrics seconds counter",
|
title: "namespaced metrics seconds counter",
|
||||||
info: provider.CustomMetricInfo{schema.GroupResource{Group: "extensions", Resource: "deployment"}, true, "work_queue_wait"},
|
info: provider.CustomMetricInfo{GroupResource: schema.GroupResource{Group: "extensions", Resource: "deployment"}, Namespaced: true, Metric: "work_queue_wait"},
|
||||||
namespace: "somens",
|
namespace: "somens",
|
||||||
resourceNames: []string{"somedep"},
|
resourceNames: []string{"somedep"},
|
||||||
|
metricSelector: labels.Everything(),
|
||||||
|
|
||||||
expectedQuery: "sum(rate(work_queue_wait_seconds_total{kube_namespace=\"somens\",kube_deployment=\"somedep\"}[1m])) by (kube_deployment)",
|
expectedQuery: "sum(rate(work_queue_wait_seconds_total{kube_namespace=\"somens\",kube_deployment=\"somedep\"}[1m])) by (kube_deployment)",
|
||||||
},
|
},
|
||||||
// non-namespaced series
|
// non-namespaced series
|
||||||
{
|
{
|
||||||
title: "root scoped metrics gauge",
|
title: "root scoped metrics gauge",
|
||||||
info: provider.CustomMetricInfo{schema.GroupResource{Resource: "node"}, false, "node_gigawatts"},
|
info: provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "node"}, Namespaced: false, Metric: "node_gigawatts"},
|
||||||
resourceNames: []string{"somenode"},
|
resourceNames: []string{"somenode"},
|
||||||
|
metricSelector: labels.Everything(),
|
||||||
|
|
||||||
expectedQuery: "sum(node_gigawatts{kube_node=\"somenode\"}) by (kube_node)",
|
expectedQuery: "sum(node_gigawatts{kube_node=\"somenode\"}) by (kube_node)",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
title: "root scoped metrics counter",
|
title: "root scoped metrics counter",
|
||||||
info: provider.CustomMetricInfo{schema.GroupResource{Resource: "persistentvolume"}, false, "volume_claims"},
|
info: provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "persistentvolume"}, Namespaced: false, Metric: "volume_claims"},
|
||||||
resourceNames: []string{"somepv"},
|
resourceNames: []string{"somepv"},
|
||||||
|
metricSelector: labels.Everything(),
|
||||||
|
|
||||||
expectedQuery: "sum(rate(volume_claims_total{kube_persistentvolume=\"somepv\"}[1m])) by (kube_persistentvolume)",
|
expectedQuery: "sum(rate(volume_claims_total{kube_persistentvolume=\"somepv\"}[1m])) by (kube_persistentvolume)",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
title: "root scoped metrics seconds counter",
|
title: "root scoped metrics seconds counter",
|
||||||
info: provider.CustomMetricInfo{schema.GroupResource{Resource: "node"}, false, "node_fan"},
|
info: provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "node"}, Namespaced: false, Metric: "node_fan"},
|
||||||
resourceNames: []string{"somenode"},
|
resourceNames: []string{"somenode"},
|
||||||
|
metricSelector: labels.Everything(),
|
||||||
|
|
||||||
expectedQuery: "sum(rate(node_fan_seconds_total{kube_node=\"somenode\"}[1m])) by (kube_node)",
|
expectedQuery: "sum(rate(node_fan_seconds_total{kube_node=\"somenode\"}[1m])) by (kube_node)",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, testCase := range testCases {
|
for _, tc := range testCases {
|
||||||
outputQuery, found := registry.QueryForMetric(testCase.info, testCase.namespace, testCase.resourceNames...)
|
tc := tc // copy to avoid iteration variable issues
|
||||||
if !assert.True(found, "%s: metric %v should available", testCase.title, testCase.info) {
|
It(fmt.Sprintf("should build a query for %s", tc.title), func() {
|
||||||
continue
|
By(fmt.Sprintf("composing the query for the %s metric on %v in namespace %s", tc.info, tc.resourceNames, tc.namespace))
|
||||||
|
outputQuery, found := registry.QueryForMetric(tc.info, tc.namespace, tc.metricSelector, tc.resourceNames...)
|
||||||
|
Expect(found).To(BeTrue(), "metric %s should be available", tc.info)
|
||||||
|
|
||||||
|
By("verifying that the query is as expected")
|
||||||
|
Expect(outputQuery).To(Equal(prom.Selector(tc.expectedQuery)))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.Equal(prom.Selector(testCase.expectedQuery), outputQuery, "%s: metric %v should have produced the correct query for %v in namespace %s", testCase.title, testCase.info, testCase.resourceNames, testCase.namespace)
|
It("should list all metrics", func() {
|
||||||
}
|
Expect(registry.ListAllMetrics()).To(ConsistOf(
|
||||||
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "pods"}, Namespaced: true, Metric: "some_count"},
|
||||||
allMetrics := registry.ListAllMetrics()
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "namespaces"}, Namespaced: false, Metric: "some_count"},
|
||||||
expectedMetrics := []provider.CustomMetricInfo{
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "pods"}, Namespaced: true, Metric: "some_time"},
|
||||||
{schema.GroupResource{Resource: "pods"}, true, "some_count"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "namespaces"}, Namespaced: false, Metric: "some_time"},
|
||||||
{schema.GroupResource{Resource: "namespaces"}, false, "some_count"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "pods"}, Namespaced: true, Metric: "some_usage"},
|
||||||
{schema.GroupResource{Resource: "pods"}, true, "some_time"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "namespaces"}, Namespaced: false, Metric: "some_usage"},
|
||||||
{schema.GroupResource{Resource: "namespaces"}, false, "some_time"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "services"}, Namespaced: true, Metric: "ingress_hits"},
|
||||||
{schema.GroupResource{Resource: "pods"}, true, "some_usage"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Group: "extensions", Resource: "ingresses"}, Namespaced: true, Metric: "ingress_hits"},
|
||||||
{schema.GroupResource{Resource: "namespaces"}, false, "some_usage"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "pods"}, Namespaced: true, Metric: "ingress_hits"},
|
||||||
{schema.GroupResource{Resource: "services"}, true, "ingress_hits"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "namespaces"}, Namespaced: false, Metric: "ingress_hits"},
|
||||||
{schema.GroupResource{Group: "extensions", Resource: "ingresses"}, true, "ingress_hits"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "services"}, Namespaced: true, Metric: "service_proxy_packets"},
|
||||||
{schema.GroupResource{Resource: "pods"}, true, "ingress_hits"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "namespaces"}, Namespaced: false, Metric: "service_proxy_packets"},
|
||||||
{schema.GroupResource{Resource: "namespaces"}, false, "ingress_hits"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Group: "extensions", Resource: "deployments"}, Namespaced: true, Metric: "work_queue_wait"},
|
||||||
{schema.GroupResource{Resource: "services"}, true, "service_proxy_packets"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "namespaces"}, Namespaced: false, Metric: "work_queue_wait"},
|
||||||
{schema.GroupResource{Resource: "namespaces"}, false, "service_proxy_packets"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "nodes"}, Namespaced: false, Metric: "node_gigawatts"},
|
||||||
{schema.GroupResource{Group: "extensions", Resource: "deployments"}, true, "work_queue_wait"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "persistentvolumes"}, Namespaced: false, Metric: "volume_claims"},
|
||||||
{schema.GroupResource{Resource: "namespaces"}, false, "work_queue_wait"},
|
provider.CustomMetricInfo{GroupResource: schema.GroupResource{Resource: "nodes"}, Namespaced: false, Metric: "node_fan"},
|
||||||
{schema.GroupResource{Resource: "nodes"}, false, "node_gigawatts"},
|
))
|
||||||
{schema.GroupResource{Resource: "persistentvolumes"}, false, "volume_claims"},
|
})
|
||||||
{schema.GroupResource{Resource: "nodes"}, false, "node_fan"},
|
})
|
||||||
}
|
})
|
||||||
|
|
||||||
// sort both for easy comparison
|
|
||||||
sort.Sort(metricInfoSorter(allMetrics))
|
|
||||||
sort.Sort(metricInfoSorter(expectedMetrics))
|
|
||||||
|
|
||||||
assert.Equal(expectedMetrics, allMetrics, "should have listed all expected metrics")
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkSetSeries(b *testing.B) {
|
|
||||||
namers := setupMetricNamer(b)
|
|
||||||
registry := &basicSeriesRegistry{
|
|
||||||
mapper: restMapper(),
|
|
||||||
}
|
|
||||||
|
|
||||||
numDuplicates := 10000
|
|
||||||
newSeriesSlices := make([][]prom.Series, len(seriesRegistryTestSeries))
|
|
||||||
for i, seriesSlice := range seriesRegistryTestSeries {
|
|
||||||
newSlice := make([]prom.Series, len(seriesSlice)*numDuplicates)
|
|
||||||
for j, series := range seriesSlice {
|
|
||||||
for k := 0; k < numDuplicates; k++ {
|
|
||||||
newSlice[j*numDuplicates+k] = series
|
|
||||||
}
|
|
||||||
}
|
|
||||||
newSeriesSlices[i] = newSlice
|
|
||||||
}
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
registry.SetSeries(newSeriesSlices, namers)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// metricInfoSorter is a sort.Interface for sorting provider.CustomMetricInfos
|
|
||||||
type metricInfoSorter []provider.CustomMetricInfo
|
|
||||||
|
|
||||||
func (s metricInfoSorter) Len() int {
|
|
||||||
return len(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s metricInfoSorter) Less(i, j int) bool {
|
|
||||||
infoI := s[i]
|
|
||||||
infoJ := s[j]
|
|
||||||
|
|
||||||
if infoI.Metric == infoJ.Metric {
|
|
||||||
if infoI.GroupResource == infoJ.GroupResource {
|
|
||||||
return infoI.Namespaced
|
|
||||||
}
|
|
||||||
|
|
||||||
if infoI.GroupResource.Group == infoJ.GroupResource.Group {
|
|
||||||
return infoI.GroupResource.Resource < infoJ.GroupResource.Resource
|
|
||||||
}
|
|
||||||
|
|
||||||
return infoI.GroupResource.Group < infoJ.GroupResource.Group
|
|
||||||
}
|
|
||||||
|
|
||||||
return infoI.Metric < infoJ.Metric
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s metricInfoSorter) Swap(i, j int) {
|
|
||||||
s[i], s[j] = s[j], s[i]
|
|
||||||
}
|
|
||||||
|
|
|
||||||
167
pkg/external-provider/basic_metric_lister.go
Normal file
167
pkg/external-provider/basic_metric_lister.go
Normal file
|
|
@ -0,0 +1,167 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package provider
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
"sigs.k8s.io/prometheus-adapter/pkg/naming"
|
||||||
|
|
||||||
|
pmodel "github.com/prometheus/common/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Runnable represents something that can be run until told to stop.
|
||||||
|
type Runnable interface {
|
||||||
|
// Run runs the runnable forever.
|
||||||
|
Run()
|
||||||
|
// RunUntil runs the runnable until the given channel is closed.
|
||||||
|
RunUntil(stopChan <-chan struct{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// A MetricLister provides a window into all of the metrics that are available within a given
|
||||||
|
// Prometheus instance, classified as either Custom or External metrics, but presented generically
|
||||||
|
// so that it can manage both types simultaneously.
|
||||||
|
type MetricLister interface {
|
||||||
|
ListAllMetrics() (MetricUpdateResult, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A MetricListerWithNotification is a MetricLister that has the ability to notify listeners
|
||||||
|
// when new metric data is available.
|
||||||
|
type MetricListerWithNotification interface {
|
||||||
|
MetricLister
|
||||||
|
Runnable
|
||||||
|
|
||||||
|
// AddNotificationReceiver registers a callback to be invoked when new metric data is available.
|
||||||
|
AddNotificationReceiver(MetricUpdateCallback)
|
||||||
|
// UpdateNow forces an immediate refresh from the source data. Primarily for test purposes.
|
||||||
|
UpdateNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
type basicMetricLister struct {
|
||||||
|
promClient prom.Client
|
||||||
|
namers []naming.MetricNamer
|
||||||
|
lookback time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBasicMetricLister creates a MetricLister that is capable of interactly directly with Prometheus to list metrics.
|
||||||
|
func NewBasicMetricLister(promClient prom.Client, namers []naming.MetricNamer, lookback time.Duration) MetricLister {
|
||||||
|
lister := basicMetricLister{
|
||||||
|
promClient: promClient,
|
||||||
|
namers: namers,
|
||||||
|
lookback: lookback,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &lister
|
||||||
|
}
|
||||||
|
|
||||||
|
type selectorSeries struct {
|
||||||
|
selector prom.Selector
|
||||||
|
series []prom.Series
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *basicMetricLister) ListAllMetrics() (MetricUpdateResult, error) {
|
||||||
|
result := MetricUpdateResult{
|
||||||
|
series: make([][]prom.Series, 0),
|
||||||
|
namers: make([]naming.MetricNamer, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
startTime := pmodel.Now().Add(-1 * l.lookback)
|
||||||
|
|
||||||
|
// these can take a while on large clusters, so launch in parallel
|
||||||
|
// and don't duplicate
|
||||||
|
selectors := make(map[prom.Selector]struct{})
|
||||||
|
selectorSeriesChan := make(chan selectorSeries, len(l.namers))
|
||||||
|
errs := make(chan error, len(l.namers))
|
||||||
|
for _, converter := range l.namers {
|
||||||
|
sel := converter.Selector()
|
||||||
|
if _, ok := selectors[sel]; ok {
|
||||||
|
errs <- nil
|
||||||
|
selectorSeriesChan <- selectorSeries{}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
selectors[sel] = struct{}{}
|
||||||
|
go func() {
|
||||||
|
series, err := l.promClient.Series(context.TODO(), pmodel.Interval{Start: startTime, End: 0}, sel)
|
||||||
|
if err != nil {
|
||||||
|
errs <- fmt.Errorf("unable to fetch metrics for query %q: %v", sel, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
errs <- nil
|
||||||
|
// Push into the channel: "this selector produced these series"
|
||||||
|
selectorSeriesChan <- selectorSeries{
|
||||||
|
selector: sel,
|
||||||
|
series: series,
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// don't do duplicate queries when it's just the matchers that change
|
||||||
|
seriesCacheByQuery := make(map[prom.Selector][]prom.Series)
|
||||||
|
|
||||||
|
// iterate through, blocking until we've got all results
|
||||||
|
// We know that, from above, we should have pushed one item into the channel
|
||||||
|
// for each converter. So here, we'll assume that we should receive one item per converter.
|
||||||
|
for range l.namers {
|
||||||
|
if err := <-errs; err != nil {
|
||||||
|
return result, fmt.Errorf("unable to update list of all metrics: %v", err)
|
||||||
|
}
|
||||||
|
// Receive from the channel: "this selector produced these series"
|
||||||
|
// We stuff that into this map so that we can collect the data as it arrives
|
||||||
|
// and then, once we've received it all, we can process it below.
|
||||||
|
if ss := <-selectorSeriesChan; ss.series != nil {
|
||||||
|
seriesCacheByQuery[ss.selector] = ss.series
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(errs)
|
||||||
|
|
||||||
|
// Now that we've collected all of the results into `seriesCacheByQuery`
|
||||||
|
// we can start processing them.
|
||||||
|
newSeries := make([][]prom.Series, len(l.namers))
|
||||||
|
for i, namer := range l.namers {
|
||||||
|
series, cached := seriesCacheByQuery[namer.Selector()]
|
||||||
|
if !cached {
|
||||||
|
return result, fmt.Errorf("unable to update list of all metrics: no metrics retrieved for query %q", namer.Selector())
|
||||||
|
}
|
||||||
|
// Because converters provide a "post-filtering" option, it's not enough to
|
||||||
|
// simply take all the series that were produced. We need to further filter them.
|
||||||
|
newSeries[i] = namer.FilterSeries(series)
|
||||||
|
}
|
||||||
|
|
||||||
|
klog.V(10).Infof("Set available metric list from Prometheus to: %v", newSeries)
|
||||||
|
|
||||||
|
result.series = newSeries
|
||||||
|
result.namers = l.namers
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetricUpdateResult represents the output of a periodic inspection of metrics found to be
|
||||||
|
// available in Prometheus.
|
||||||
|
// It includes both the series data the Prometheus exposed, as well as the configurational
|
||||||
|
// object that led to their discovery.
|
||||||
|
type MetricUpdateResult struct {
|
||||||
|
series [][]prom.Series
|
||||||
|
namers []naming.MetricNamer
|
||||||
|
}
|
||||||
|
|
||||||
|
// MetricUpdateCallback is a function signature for receiving periodic updates about
|
||||||
|
// available metrics.
|
||||||
|
type MetricUpdateCallback func(MetricUpdateResult)
|
||||||
128
pkg/external-provider/external_series_registry.go
Normal file
128
pkg/external-provider/external_series_registry.go
Normal file
|
|
@ -0,0 +1,128 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package provider
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
|
"sigs.k8s.io/custom-metrics-apiserver/pkg/provider"
|
||||||
|
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
"sigs.k8s.io/prometheus-adapter/pkg/naming"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ExternalSeriesRegistry acts as the top-level converter for transforming Kubernetes requests
|
||||||
|
// for external metrics into Prometheus queries.
|
||||||
|
type ExternalSeriesRegistry interface {
|
||||||
|
// ListAllMetrics lists all metrics known to this registry
|
||||||
|
ListAllMetrics() []provider.ExternalMetricInfo
|
||||||
|
QueryForMetric(namespace string, metricName string, metricSelector labels.Selector) (prom.Selector, bool, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// overridableSeriesRegistry is a basic SeriesRegistry
|
||||||
|
type externalSeriesRegistry struct {
|
||||||
|
// We lock when reading/writing metrics, and metricsInfo to prevent inconsistencies.
|
||||||
|
mu sync.RWMutex
|
||||||
|
|
||||||
|
// metrics is the list of all known metrics, ready to return from the API
|
||||||
|
metrics []provider.ExternalMetricInfo
|
||||||
|
// metricsInfo is a lookup from a metric to SeriesConverter for the sake of generating queries
|
||||||
|
metricsInfo map[string]seriesInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
type seriesInfo struct {
|
||||||
|
// seriesName is the name of the corresponding Prometheus series
|
||||||
|
seriesName string
|
||||||
|
|
||||||
|
// namer is the MetricNamer used to name this series
|
||||||
|
namer naming.MetricNamer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewExternalSeriesRegistry creates an ExternalSeriesRegistry driven by the data from the provided MetricLister.
|
||||||
|
func NewExternalSeriesRegistry(lister MetricListerWithNotification) ExternalSeriesRegistry {
|
||||||
|
var registry = externalSeriesRegistry{
|
||||||
|
metrics: make([]provider.ExternalMetricInfo, 0),
|
||||||
|
metricsInfo: map[string]seriesInfo{},
|
||||||
|
}
|
||||||
|
|
||||||
|
lister.AddNotificationReceiver(registry.filterAndStoreMetrics)
|
||||||
|
|
||||||
|
return ®istry
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *externalSeriesRegistry) filterAndStoreMetrics(result MetricUpdateResult) {
|
||||||
|
newSeriesSlices := result.series
|
||||||
|
namers := result.namers
|
||||||
|
|
||||||
|
if len(newSeriesSlices) != len(namers) {
|
||||||
|
klog.Fatal("need one set of series per converter")
|
||||||
|
}
|
||||||
|
apiMetricsCache := make([]provider.ExternalMetricInfo, 0)
|
||||||
|
rawMetricsCache := make(map[string]seriesInfo)
|
||||||
|
|
||||||
|
for i, newSeries := range newSeriesSlices {
|
||||||
|
namer := namers[i]
|
||||||
|
for _, series := range newSeries {
|
||||||
|
identity, err := namer.MetricNameForSeries(series)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("unable to name series %q, skipping: %v", series.String(), err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
name := identity
|
||||||
|
rawMetricsCache[name] = seriesInfo{
|
||||||
|
seriesName: series.Name,
|
||||||
|
namer: namer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for metricName := range rawMetricsCache {
|
||||||
|
apiMetricsCache = append(apiMetricsCache, provider.ExternalMetricInfo{
|
||||||
|
Metric: metricName,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
|
||||||
|
r.metrics = apiMetricsCache
|
||||||
|
r.metricsInfo = rawMetricsCache
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *externalSeriesRegistry) ListAllMetrics() []provider.ExternalMetricInfo {
|
||||||
|
r.mu.RLock()
|
||||||
|
defer r.mu.RUnlock()
|
||||||
|
|
||||||
|
return r.metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *externalSeriesRegistry) QueryForMetric(namespace string, metricName string, metricSelector labels.Selector) (prom.Selector, bool, error) {
|
||||||
|
r.mu.RLock()
|
||||||
|
defer r.mu.RUnlock()
|
||||||
|
|
||||||
|
info, found := r.metricsInfo[metricName]
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
klog.V(10).Infof("external metric %q not found", metricName)
|
||||||
|
return "", false, nil
|
||||||
|
}
|
||||||
|
query, err := info.namer.QueryForExternalSeries(info.seriesName, namespace, metricSelector)
|
||||||
|
|
||||||
|
return query, found, err
|
||||||
|
}
|
||||||
143
pkg/external-provider/metric_converter.go
Normal file
143
pkg/external-provider/metric_converter.go
Normal file
|
|
@ -0,0 +1,143 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package provider
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/metrics/pkg/apis/external_metrics"
|
||||||
|
|
||||||
|
"sigs.k8s.io/custom-metrics-apiserver/pkg/provider"
|
||||||
|
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MetricConverter provides a unified interface for converting the results of
|
||||||
|
// Prometheus queries into external metric types.
|
||||||
|
type MetricConverter interface {
|
||||||
|
Convert(info provider.ExternalMetricInfo, queryResult prom.QueryResult) (*external_metrics.ExternalMetricValueList, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type metricConverter struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMetricConverter creates a MetricCoverter, capable of converting any of the three metric types
|
||||||
|
// returned by the Prometheus client into external metrics types.
|
||||||
|
func NewMetricConverter() MetricConverter {
|
||||||
|
return &metricConverter{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *metricConverter) Convert(info provider.ExternalMetricInfo, queryResult prom.QueryResult) (*external_metrics.ExternalMetricValueList, error) {
|
||||||
|
if queryResult.Type == model.ValScalar {
|
||||||
|
return c.convertScalar(info, queryResult)
|
||||||
|
}
|
||||||
|
|
||||||
|
if queryResult.Type == model.ValVector {
|
||||||
|
return c.convertVector(info, queryResult)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.New("encountered an unexpected query result type")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *metricConverter) convertSample(info provider.ExternalMetricInfo, sample *model.Sample) (*external_metrics.ExternalMetricValue, error) {
|
||||||
|
labels := c.convertLabels(sample.Metric)
|
||||||
|
|
||||||
|
singleMetric := external_metrics.ExternalMetricValue{
|
||||||
|
MetricName: info.Metric,
|
||||||
|
Timestamp: metav1.Time{
|
||||||
|
Time: sample.Timestamp.Time(),
|
||||||
|
},
|
||||||
|
Value: *resource.NewMilliQuantity(int64(sample.Value*1000.0), resource.DecimalSI),
|
||||||
|
MetricLabels: labels,
|
||||||
|
}
|
||||||
|
|
||||||
|
return &singleMetric, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *metricConverter) convertLabels(inLabels model.Metric) map[string]string {
|
||||||
|
numLabels := len(inLabels)
|
||||||
|
outLabels := make(map[string]string, numLabels)
|
||||||
|
for labelName, labelVal := range inLabels {
|
||||||
|
outLabels[string(labelName)] = string(labelVal)
|
||||||
|
}
|
||||||
|
|
||||||
|
return outLabels
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *metricConverter) convertVector(info provider.ExternalMetricInfo, queryResult prom.QueryResult) (*external_metrics.ExternalMetricValueList, error) {
|
||||||
|
if queryResult.Type != model.ValVector {
|
||||||
|
return nil, errors.New("incorrect query result type")
|
||||||
|
}
|
||||||
|
|
||||||
|
toConvert := *queryResult.Vector
|
||||||
|
|
||||||
|
if toConvert == nil {
|
||||||
|
return nil, errors.New("the provided input did not contain vector query results")
|
||||||
|
}
|
||||||
|
|
||||||
|
items := []external_metrics.ExternalMetricValue{}
|
||||||
|
metricValueList := external_metrics.ExternalMetricValueList{
|
||||||
|
Items: items,
|
||||||
|
}
|
||||||
|
|
||||||
|
numSamples := toConvert.Len()
|
||||||
|
if numSamples == 0 {
|
||||||
|
return &metricValueList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, val := range toConvert {
|
||||||
|
singleMetric, err := c.convertSample(info, val)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to convert vector: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
items = append(items, *singleMetric)
|
||||||
|
}
|
||||||
|
|
||||||
|
metricValueList = external_metrics.ExternalMetricValueList{
|
||||||
|
Items: items,
|
||||||
|
}
|
||||||
|
return &metricValueList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *metricConverter) convertScalar(info provider.ExternalMetricInfo, queryResult prom.QueryResult) (*external_metrics.ExternalMetricValueList, error) {
|
||||||
|
if queryResult.Type != model.ValScalar {
|
||||||
|
return nil, errors.New("scalarConverter can only convert scalar query results")
|
||||||
|
}
|
||||||
|
|
||||||
|
toConvert := queryResult.Scalar
|
||||||
|
|
||||||
|
if toConvert == nil {
|
||||||
|
return nil, errors.New("the provided input did not contain scalar query results")
|
||||||
|
}
|
||||||
|
|
||||||
|
result := external_metrics.ExternalMetricValueList{
|
||||||
|
Items: []external_metrics.ExternalMetricValue{
|
||||||
|
{
|
||||||
|
MetricName: info.Metric,
|
||||||
|
Timestamp: metav1.Time{
|
||||||
|
Time: toConvert.Timestamp.Time(),
|
||||||
|
},
|
||||||
|
Value: *resource.NewMilliQuantity(int64(toConvert.Value*1000.0), resource.DecimalSI),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return &result, nil
|
||||||
|
}
|
||||||
91
pkg/external-provider/periodic_metric_lister.go
Normal file
91
pkg/external-provider/periodic_metric_lister.go
Normal file
|
|
@ -0,0 +1,91 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package provider
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
)
|
||||||
|
|
||||||
|
type periodicMetricLister struct {
|
||||||
|
realLister MetricLister
|
||||||
|
updateInterval time.Duration
|
||||||
|
mostRecentResult MetricUpdateResult
|
||||||
|
callbacks []MetricUpdateCallback
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPeriodicMetricLister creates a MetricLister that periodically pulls the list of available metrics
|
||||||
|
// at the provided interval, but defers the actual act of retrieving the metrics to the supplied MetricLister.
|
||||||
|
func NewPeriodicMetricLister(realLister MetricLister, updateInterval time.Duration) (MetricListerWithNotification, Runnable) {
|
||||||
|
lister := periodicMetricLister{
|
||||||
|
updateInterval: updateInterval,
|
||||||
|
realLister: realLister,
|
||||||
|
callbacks: make([]MetricUpdateCallback, 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
return &lister, &lister
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *periodicMetricLister) AddNotificationReceiver(callback MetricUpdateCallback) {
|
||||||
|
l.callbacks = append(l.callbacks, callback)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *periodicMetricLister) ListAllMetrics() (MetricUpdateResult, error) {
|
||||||
|
return l.mostRecentResult, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *periodicMetricLister) Run() {
|
||||||
|
l.RunUntil(wait.NeverStop)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *periodicMetricLister) RunUntil(stopChan <-chan struct{}) {
|
||||||
|
go wait.Until(func() {
|
||||||
|
if err := l.updateMetrics(); err != nil {
|
||||||
|
utilruntime.HandleError(err)
|
||||||
|
}
|
||||||
|
}, l.updateInterval, stopChan)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *periodicMetricLister) updateMetrics() error {
|
||||||
|
result, err := l.realLister.ListAllMetrics()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cache the result.
|
||||||
|
l.mostRecentResult = result
|
||||||
|
// Let our listeners know we've got new data ready for them.
|
||||||
|
l.notifyListeners()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *periodicMetricLister) notifyListeners() {
|
||||||
|
for _, listener := range l.callbacks {
|
||||||
|
if listener != nil {
|
||||||
|
listener(l.mostRecentResult)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *periodicMetricLister) UpdateNow() {
|
||||||
|
if err := l.updateMetrics(); err != nil {
|
||||||
|
utilruntime.HandleError(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
83
pkg/external-provider/periodic_metric_lister_test.go
Normal file
83
pkg/external-provider/periodic_metric_lister_test.go
Normal file
|
|
@ -0,0 +1,83 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package provider
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type fakeLister struct {
|
||||||
|
callCount int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *fakeLister) ListAllMetrics() (MetricUpdateResult, error) {
|
||||||
|
f.callCount++
|
||||||
|
|
||||||
|
return MetricUpdateResult{
|
||||||
|
series: [][]prom.Series{
|
||||||
|
{
|
||||||
|
{
|
||||||
|
Name: "a_series",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWhenNewMetricsAvailableCallbackIsInvoked(t *testing.T) {
|
||||||
|
fakeLister := &fakeLister{}
|
||||||
|
targetLister, _ := NewPeriodicMetricLister(fakeLister, time.Duration(1000))
|
||||||
|
periodicLister := targetLister.(*periodicMetricLister)
|
||||||
|
|
||||||
|
callbackInvoked := false
|
||||||
|
callback := func(r MetricUpdateResult) {
|
||||||
|
callbackInvoked = true
|
||||||
|
}
|
||||||
|
|
||||||
|
periodicLister.AddNotificationReceiver(callback)
|
||||||
|
err := periodicLister.updateMetrics()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, callbackInvoked)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestWhenListingMetricsReturnsCachedValues(t *testing.T) {
|
||||||
|
fakeLister := &fakeLister{}
|
||||||
|
targetLister, _ := NewPeriodicMetricLister(fakeLister, time.Duration(1000))
|
||||||
|
periodicLister := targetLister.(*periodicMetricLister)
|
||||||
|
|
||||||
|
// We haven't invoked the inner lister yet, so we should have no results.
|
||||||
|
resultBeforeUpdate, err := periodicLister.ListAllMetrics()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 0, len(resultBeforeUpdate.series))
|
||||||
|
require.Equal(t, 0, fakeLister.callCount)
|
||||||
|
|
||||||
|
// We can simulate waiting for the udpate interval to pass...
|
||||||
|
// which should result in calling the inner lister to get the metrics.
|
||||||
|
err = periodicLister.updateMetrics()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, fakeLister.callCount)
|
||||||
|
|
||||||
|
// If we list now, we should return the cached values.
|
||||||
|
// Make sure we got some results this time
|
||||||
|
// as well as that we didn't unnecessarily invoke the inner lister.
|
||||||
|
resultAfterUpdate, err := periodicLister.ListAllMetrics()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotEqual(t, 0, len(resultAfterUpdate.series))
|
||||||
|
require.Equal(t, 1, fakeLister.callCount)
|
||||||
|
}
|
||||||
90
pkg/external-provider/provider.go
Normal file
90
pkg/external-provider/provider.go
Normal file
|
|
@ -0,0 +1,90 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package provider
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
pmodel "github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
"k8s.io/metrics/pkg/apis/external_metrics"
|
||||||
|
|
||||||
|
"sigs.k8s.io/custom-metrics-apiserver/pkg/provider"
|
||||||
|
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
"sigs.k8s.io/prometheus-adapter/pkg/naming"
|
||||||
|
)
|
||||||
|
|
||||||
|
type externalPrometheusProvider struct {
|
||||||
|
promClient prom.Client
|
||||||
|
metricConverter MetricConverter
|
||||||
|
|
||||||
|
seriesRegistry ExternalSeriesRegistry
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *externalPrometheusProvider) GetExternalMetric(ctx context.Context, namespace string, metricSelector labels.Selector, info provider.ExternalMetricInfo) (*external_metrics.ExternalMetricValueList, error) {
|
||||||
|
selector, found, err := p.seriesRegistry.QueryForMetric(namespace, info.Metric, metricSelector)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("unable to generate a query for the metric: %v", err)
|
||||||
|
return nil, apierr.NewInternalError(fmt.Errorf("unable to fetch metrics"))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return nil, provider.NewMetricNotFoundError(p.selectGroupResource(namespace), info.Metric)
|
||||||
|
}
|
||||||
|
// Here is where we're making the query, need to be before here xD
|
||||||
|
queryResults, err := p.promClient.Query(ctx, pmodel.Now(), selector)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
klog.Errorf("unable to fetch metrics from prometheus: %v", err)
|
||||||
|
// don't leak implementation details to the user
|
||||||
|
return nil, apierr.NewInternalError(fmt.Errorf("unable to fetch metrics"))
|
||||||
|
}
|
||||||
|
return p.metricConverter.Convert(info, queryResults)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *externalPrometheusProvider) ListAllExternalMetrics() []provider.ExternalMetricInfo {
|
||||||
|
return p.seriesRegistry.ListAllMetrics()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *externalPrometheusProvider) selectGroupResource(namespace string) schema.GroupResource {
|
||||||
|
if namespace == "default" {
|
||||||
|
return naming.NsGroupResource
|
||||||
|
}
|
||||||
|
|
||||||
|
return schema.GroupResource{
|
||||||
|
Group: "",
|
||||||
|
Resource: "",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewExternalPrometheusProvider creates an ExternalMetricsProvider capable of responding to Kubernetes requests for external metric data
|
||||||
|
func NewExternalPrometheusProvider(promClient prom.Client, namers []naming.MetricNamer, updateInterval time.Duration, maxAge time.Duration) (provider.ExternalMetricsProvider, Runnable) {
|
||||||
|
metricConverter := NewMetricConverter()
|
||||||
|
basicLister := NewBasicMetricLister(promClient, namers, maxAge)
|
||||||
|
periodicLister, _ := NewPeriodicMetricLister(basicLister, updateInterval)
|
||||||
|
seriesRegistry := NewExternalSeriesRegistry(periodicLister)
|
||||||
|
return &externalPrometheusProvider{
|
||||||
|
promClient: promClient,
|
||||||
|
seriesRegistry: seriesRegistry,
|
||||||
|
metricConverter: metricConverter,
|
||||||
|
}, periodicLister
|
||||||
|
}
|
||||||
34
pkg/naming/errors.go
Normal file
34
pkg/naming/errors.go
Normal file
|
|
@ -0,0 +1,34 @@
|
||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package naming
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrUnsupportedOperator creates an error that represents the fact that we were requested to service a query that
|
||||||
|
// Prometheus would be unable to support.
|
||||||
|
ErrUnsupportedOperator = errors.New("operator not supported by prometheus")
|
||||||
|
|
||||||
|
// ErrMalformedQuery creates an error that represents the fact that we were requested to service a query
|
||||||
|
// that was malformed in its operator/value combination.
|
||||||
|
ErrMalformedQuery = errors.New("operator requires values")
|
||||||
|
|
||||||
|
// ErrQueryUnsupportedValues creates an error that represents an unsupported return value from the
|
||||||
|
// specified query.
|
||||||
|
ErrQueryUnsupportedValues = errors.New("operator does not support values")
|
||||||
|
|
||||||
|
// ErrLabelNotSpecified creates an error that represents the fact that we were requested to service a query
|
||||||
|
// that was malformed in its label specification.
|
||||||
|
ErrLabelNotSpecified = errors.New("label not specified")
|
||||||
|
)
|
||||||
100
pkg/naming/lbl_res.go
Normal file
100
pkg/naming/lbl_res.go
Normal file
|
|
@ -0,0 +1,100 @@
|
||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package naming
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
|
||||||
|
pmodel "github.com/prometheus/common/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
// labelGroupResExtractor extracts schema.GroupResources from series labels.
|
||||||
|
type labelGroupResExtractor struct {
|
||||||
|
regex *regexp.Regexp
|
||||||
|
|
||||||
|
resourceInd int
|
||||||
|
groupInd *int
|
||||||
|
}
|
||||||
|
|
||||||
|
// newLabelGroupResExtractor creates a new labelGroupResExtractor for labels whose form
|
||||||
|
// matches the given template. It does so by creating a regular expression from the template,
|
||||||
|
// so anything in the template which limits resource or group name length will cause issues.
|
||||||
|
func newLabelGroupResExtractor(labelTemplate *template.Template) (*labelGroupResExtractor, error) {
|
||||||
|
labelRegexBuff := new(bytes.Buffer)
|
||||||
|
if err := labelTemplate.Execute(labelRegexBuff, schema.GroupResource{
|
||||||
|
Group: "(?P<group>.+?)",
|
||||||
|
Resource: "(?P<resource>.+?)"},
|
||||||
|
); err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to convert label template to matcher: %v", err)
|
||||||
|
}
|
||||||
|
if labelRegexBuff.Len() == 0 {
|
||||||
|
return nil, fmt.Errorf("unable to convert label template to matcher: empty template")
|
||||||
|
}
|
||||||
|
labelRegexRaw := "^" + labelRegexBuff.String() + "$"
|
||||||
|
labelRegex, err := regexp.Compile(labelRegexRaw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to convert label template to matcher: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var groupInd *int
|
||||||
|
var resInd *int
|
||||||
|
|
||||||
|
for i, name := range labelRegex.SubexpNames() {
|
||||||
|
switch name {
|
||||||
|
case "group":
|
||||||
|
ind := i // copy to avoid iteration variable reference
|
||||||
|
groupInd = &ind
|
||||||
|
case "resource":
|
||||||
|
ind := i // copy to avoid iteration variable reference
|
||||||
|
resInd = &ind
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if resInd == nil {
|
||||||
|
return nil, fmt.Errorf("must include at least `{{.Resource}}` in the label template")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &labelGroupResExtractor{
|
||||||
|
regex: labelRegex,
|
||||||
|
resourceInd: *resInd,
|
||||||
|
groupInd: groupInd,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupResourceForLabel extracts a schema.GroupResource from the given label, if possible.
|
||||||
|
// The second return value indicates whether or not a potential group-resource was found in this label.
|
||||||
|
func (e *labelGroupResExtractor) GroupResourceForLabel(lbl pmodel.LabelName) (schema.GroupResource, bool) {
|
||||||
|
matchGroups := e.regex.FindStringSubmatch(string(lbl))
|
||||||
|
if matchGroups != nil {
|
||||||
|
group := ""
|
||||||
|
if e.groupInd != nil {
|
||||||
|
group = matchGroups[*e.groupInd]
|
||||||
|
}
|
||||||
|
|
||||||
|
return schema.GroupResource{
|
||||||
|
Group: group,
|
||||||
|
Resource: matchGroups[e.resourceInd],
|
||||||
|
}, true
|
||||||
|
}
|
||||||
|
|
||||||
|
return schema.GroupResource{}, false
|
||||||
|
}
|
||||||
222
pkg/naming/metric_namer.go
Normal file
222
pkg/naming/metric_namer.go
Normal file
|
|
@ -0,0 +1,222 @@
|
||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package naming
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
"sigs.k8s.io/prometheus-adapter/pkg/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MetricNamer knows how to convert Prometheus series names and label names to
|
||||||
|
// metrics API resources, and vice-versa. MetricNamers should be safe to access
|
||||||
|
// concurrently. Returned group-resources are "normalized" as per the
|
||||||
|
// MetricInfo#Normalized method. Group-resources passed as arguments must
|
||||||
|
// themselves be normalized.
|
||||||
|
type MetricNamer interface {
|
||||||
|
// Selector produces the appropriate Prometheus series selector to match all
|
||||||
|
// series handable by this namer.
|
||||||
|
Selector() prom.Selector
|
||||||
|
// FilterSeries checks to see which of the given series match any additional
|
||||||
|
// constraints beyond the series query. It's assumed that the series given
|
||||||
|
// already match the series query.
|
||||||
|
FilterSeries(series []prom.Series) []prom.Series
|
||||||
|
// MetricNameForSeries returns the name (as presented in the API) for a given series.
|
||||||
|
MetricNameForSeries(series prom.Series) (string, error)
|
||||||
|
// QueryForSeries returns the query for a given series (not API metric name), with
|
||||||
|
// the given namespace name (if relevant), resource, and resource names.
|
||||||
|
QueryForSeries(series string, resource schema.GroupResource, namespace string, metricSelector labels.Selector, names ...string) (prom.Selector, error)
|
||||||
|
// QueryForExternalSeries returns the query for a given series (not API metric name), with
|
||||||
|
// the given namespace name (if relevant), resource, and resource names.
|
||||||
|
QueryForExternalSeries(series string, namespace string, targetLabels labels.Selector) (prom.Selector, error)
|
||||||
|
|
||||||
|
ResourceConverter
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *metricNamer) Selector() prom.Selector {
|
||||||
|
return n.seriesQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReMatcher either positively or negatively matches a regex
|
||||||
|
type ReMatcher struct {
|
||||||
|
regex *regexp.Regexp
|
||||||
|
positive bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewReMatcher(cfg config.RegexFilter) (*ReMatcher, error) {
|
||||||
|
if cfg.Is != "" && cfg.IsNot != "" {
|
||||||
|
return nil, fmt.Errorf("cannot have both an `is` (%q) and `isNot` (%q) expression in a single filter", cfg.Is, cfg.IsNot)
|
||||||
|
}
|
||||||
|
if cfg.Is == "" && cfg.IsNot == "" {
|
||||||
|
return nil, fmt.Errorf("must have either an `is` or `isNot` expression in a filter")
|
||||||
|
}
|
||||||
|
|
||||||
|
var positive bool
|
||||||
|
var regexRaw string
|
||||||
|
if cfg.Is != "" {
|
||||||
|
positive = true
|
||||||
|
regexRaw = cfg.Is
|
||||||
|
} else {
|
||||||
|
positive = false
|
||||||
|
regexRaw = cfg.IsNot
|
||||||
|
}
|
||||||
|
|
||||||
|
regex, err := regexp.Compile(regexRaw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to compile series filter %q: %v", regexRaw, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ReMatcher{
|
||||||
|
regex: regex,
|
||||||
|
positive: positive,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ReMatcher) Matches(val string) bool {
|
||||||
|
return m.regex.MatchString(val) == m.positive
|
||||||
|
}
|
||||||
|
|
||||||
|
type metricNamer struct {
|
||||||
|
seriesQuery prom.Selector
|
||||||
|
metricsQuery MetricsQuery
|
||||||
|
nameMatches *regexp.Regexp
|
||||||
|
nameAs string
|
||||||
|
seriesMatchers []*ReMatcher
|
||||||
|
|
||||||
|
ResourceConverter
|
||||||
|
}
|
||||||
|
|
||||||
|
// queryTemplateArgs are the arguments for the metrics query template.
|
||||||
|
func (n *metricNamer) FilterSeries(initialSeries []prom.Series) []prom.Series {
|
||||||
|
if len(n.seriesMatchers) == 0 {
|
||||||
|
return initialSeries
|
||||||
|
}
|
||||||
|
|
||||||
|
finalSeries := make([]prom.Series, 0, len(initialSeries))
|
||||||
|
SeriesLoop:
|
||||||
|
for _, series := range initialSeries {
|
||||||
|
for _, matcher := range n.seriesMatchers {
|
||||||
|
if !matcher.Matches(series.Name) {
|
||||||
|
continue SeriesLoop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
finalSeries = append(finalSeries, series)
|
||||||
|
}
|
||||||
|
|
||||||
|
return finalSeries
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *metricNamer) QueryForSeries(series string, resource schema.GroupResource, namespace string, metricSelector labels.Selector, names ...string) (prom.Selector, error) {
|
||||||
|
return n.metricsQuery.Build(series, resource, namespace, nil, metricSelector, names...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *metricNamer) QueryForExternalSeries(series string, namespace string, metricSelector labels.Selector) (prom.Selector, error) {
|
||||||
|
return n.metricsQuery.BuildExternal(series, namespace, "", []string{}, metricSelector)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *metricNamer) MetricNameForSeries(series prom.Series) (string, error) {
|
||||||
|
matches := n.nameMatches.FindStringSubmatchIndex(series.Name)
|
||||||
|
if matches == nil {
|
||||||
|
return "", fmt.Errorf("series name %q did not match expected pattern %q", series.Name, n.nameMatches.String())
|
||||||
|
}
|
||||||
|
outNameBytes := n.nameMatches.ExpandString(nil, n.nameAs, series.Name, matches)
|
||||||
|
return string(outNameBytes), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NamersFromConfig produces a MetricNamer for each rule in the given config.
|
||||||
|
func NamersFromConfig(cfg []config.DiscoveryRule, mapper apimeta.RESTMapper) ([]MetricNamer, error) {
|
||||||
|
namers := make([]MetricNamer, len(cfg))
|
||||||
|
|
||||||
|
for i, rule := range cfg {
|
||||||
|
resConv, err := NewResourceConverter(rule.Resources.Template, rule.Resources.Overrides, mapper)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// queries are namespaced by default unless the rule specifically disables it
|
||||||
|
namespaced := true
|
||||||
|
if rule.Resources.Namespaced != nil {
|
||||||
|
namespaced = *rule.Resources.Namespaced
|
||||||
|
}
|
||||||
|
|
||||||
|
metricsQuery, err := NewExternalMetricsQuery(rule.MetricsQuery, resConv, namespaced)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to construct metrics query associated with series query %q: %v", rule.SeriesQuery, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
seriesMatchers := make([]*ReMatcher, len(rule.SeriesFilters))
|
||||||
|
for i, filterRaw := range rule.SeriesFilters {
|
||||||
|
matcher, err := NewReMatcher(filterRaw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to generate series name filter associated with series query %q: %v", rule.SeriesQuery, err)
|
||||||
|
}
|
||||||
|
seriesMatchers[i] = matcher
|
||||||
|
}
|
||||||
|
if rule.Name.Matches != "" {
|
||||||
|
matcher, err := NewReMatcher(config.RegexFilter{Is: rule.Name.Matches})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to generate series name filter from name rules associated with series query %q: %v", rule.SeriesQuery, err)
|
||||||
|
}
|
||||||
|
seriesMatchers = append(seriesMatchers, matcher)
|
||||||
|
}
|
||||||
|
|
||||||
|
var nameMatches *regexp.Regexp
|
||||||
|
if rule.Name.Matches != "" {
|
||||||
|
nameMatches, err = regexp.Compile(rule.Name.Matches)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to compile series name match expression %q associated with series query %q: %v", rule.Name.Matches, rule.SeriesQuery, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// this will always succeed
|
||||||
|
nameMatches = regexp.MustCompile(".*")
|
||||||
|
}
|
||||||
|
nameAs := rule.Name.As
|
||||||
|
if nameAs == "" {
|
||||||
|
// check if we have an obvious default
|
||||||
|
subexpNames := nameMatches.SubexpNames()
|
||||||
|
switch len(subexpNames) {
|
||||||
|
case 1:
|
||||||
|
// no capture groups, use the whole thing
|
||||||
|
nameAs = "$0"
|
||||||
|
case 2:
|
||||||
|
// one capture group, use that
|
||||||
|
nameAs = "$1"
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("must specify an 'as' value for name matcher %q associated with series query %q", rule.Name.Matches, rule.SeriesQuery)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
namer := &metricNamer{
|
||||||
|
seriesQuery: prom.Selector(rule.SeriesQuery),
|
||||||
|
metricsQuery: metricsQuery,
|
||||||
|
nameMatches: nameMatches,
|
||||||
|
nameAs: nameAs,
|
||||||
|
seriesMatchers: seriesMatchers,
|
||||||
|
ResourceConverter: resConv,
|
||||||
|
}
|
||||||
|
|
||||||
|
namers[i] = namer
|
||||||
|
}
|
||||||
|
|
||||||
|
return namers, nil
|
||||||
|
}
|
||||||
369
pkg/naming/metrics_query.go
Normal file
369
pkg/naming/metrics_query.go
Normal file
|
|
@ -0,0 +1,369 @@
|
||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package naming
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/apimachinery/pkg/selection"
|
||||||
|
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MetricsQuery represents a compiled metrics query for some set of
|
||||||
|
// series that can be converted into an series of Prometheus expressions to
|
||||||
|
// be passed to a client.
|
||||||
|
type MetricsQuery interface {
|
||||||
|
// Build constructs Prometheus expressions to represent this query
|
||||||
|
// over the given group-resource. If namespace is empty, the resource
|
||||||
|
// is considered to be root-scoped. extraGroupBy may be used for cases
|
||||||
|
// where we need to scope down more specifically than just the group-resource
|
||||||
|
// (e.g. container metrics).
|
||||||
|
Build(series string, groupRes schema.GroupResource, namespace string, extraGroupBy []string, metricSelector labels.Selector, resourceNames ...string) (prom.Selector, error)
|
||||||
|
BuildExternal(seriesName string, namespace string, groupBy string, groupBySlice []string, metricSelector labels.Selector) (prom.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMetricsQuery constructs a new MetricsQuery by compiling the given Go template.
|
||||||
|
// The delimiters on the template are `<<` and `>>`, and it may use the following fields:
|
||||||
|
// - Series: the series in question
|
||||||
|
// - LabelMatchers: a pre-stringified form of the label matchers for the resources in the query
|
||||||
|
// - LabelMatchersByName: the raw map-form of the above matchers
|
||||||
|
// - GroupBy: the group-by clause to use for the resources in the query (stringified)
|
||||||
|
// - GroupBySlice: the raw slice form of the above group-by clause
|
||||||
|
func NewMetricsQuery(queryTemplate string, resourceConverter ResourceConverter) (MetricsQuery, error) {
|
||||||
|
templ, err := template.New("metrics-query").Delims("<<", ">>").Parse(queryTemplate)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to parse metrics query template %q: %v", queryTemplate, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &metricsQuery{
|
||||||
|
resConverter: resourceConverter,
|
||||||
|
template: templ,
|
||||||
|
namespaced: true,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewExternalMetricsQuery constructs a new MetricsQuery by compiling the given Go template.
|
||||||
|
// The delimiters on the template are `<<` and `>>`, and it may use the following fields:
|
||||||
|
// - Series: the series in question
|
||||||
|
// - LabelMatchers: a pre-stringified form of the label matchers for the resources in the query
|
||||||
|
// - LabelMatchersByName: the raw map-form of the above matchers
|
||||||
|
// - GroupBy: the group-by clause to use for the resources in the query (stringified)
|
||||||
|
// - GroupBySlice: the raw slice form of the above group-by clause
|
||||||
|
func NewExternalMetricsQuery(queryTemplate string, resourceConverter ResourceConverter, namespaced bool) (MetricsQuery, error) {
|
||||||
|
templ, err := template.New("metrics-query").Delims("<<", ">>").Parse(queryTemplate)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to parse metrics query template %q: %v", queryTemplate, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &metricsQuery{
|
||||||
|
resConverter: resourceConverter,
|
||||||
|
template: templ,
|
||||||
|
namespaced: namespaced,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// metricsQuery is a MetricsQuery based on a compiled Go text template.
|
||||||
|
// with the delimiters as `<<` and `>>`, and the arguments found in
|
||||||
|
// queryTemplateArgs.
|
||||||
|
type metricsQuery struct {
|
||||||
|
resConverter ResourceConverter
|
||||||
|
template *template.Template
|
||||||
|
namespaced bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// queryTemplateArgs contains the arguments for the template used in metricsQuery.
|
||||||
|
type queryTemplateArgs struct {
|
||||||
|
Series string
|
||||||
|
LabelMatchers string
|
||||||
|
LabelValuesByName map[string]string
|
||||||
|
GroupBy string
|
||||||
|
GroupBySlice []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type queryPart struct {
|
||||||
|
labelName string
|
||||||
|
values []string
|
||||||
|
operator selection.Operator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *metricsQuery) Build(series string, resource schema.GroupResource, namespace string, extraGroupBy []string, metricSelector labels.Selector, names ...string) (prom.Selector, error) {
|
||||||
|
queryParts := q.createQueryPartsFromSelector(metricSelector)
|
||||||
|
|
||||||
|
if namespace != "" {
|
||||||
|
namespaceLbl, err := q.resConverter.LabelForResource(NsGroupResource)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
queryParts = append(queryParts, queryPart{
|
||||||
|
labelName: string(namespaceLbl),
|
||||||
|
values: []string{namespace},
|
||||||
|
operator: selection.Equals,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
exprs, valuesByName, err := q.processQueryParts(queryParts)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
resourceLbl, err := q.resConverter.LabelForResource(resource)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
matcher := prom.LabelEq
|
||||||
|
targetValue := strings.Join(names, "|")
|
||||||
|
|
||||||
|
if len(names) > 1 {
|
||||||
|
matcher = prom.LabelMatches
|
||||||
|
}
|
||||||
|
|
||||||
|
exprs = append(exprs, matcher(string(resourceLbl), targetValue))
|
||||||
|
valuesByName[string(resourceLbl)] = targetValue
|
||||||
|
|
||||||
|
groupBy := make([]string, 0, len(extraGroupBy)+1)
|
||||||
|
groupBy = append(groupBy, string(resourceLbl))
|
||||||
|
groupBy = append(groupBy, extraGroupBy...)
|
||||||
|
|
||||||
|
args := queryTemplateArgs{
|
||||||
|
Series: series,
|
||||||
|
LabelMatchers: strings.Join(exprs, ","),
|
||||||
|
LabelValuesByName: valuesByName,
|
||||||
|
GroupBy: strings.Join(groupBy, ","),
|
||||||
|
GroupBySlice: groupBy,
|
||||||
|
}
|
||||||
|
queryBuff := new(bytes.Buffer)
|
||||||
|
if err := q.template.Execute(queryBuff, args); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if queryBuff.Len() == 0 {
|
||||||
|
return "", fmt.Errorf("empty query produced by metrics query template")
|
||||||
|
}
|
||||||
|
|
||||||
|
return prom.Selector(queryBuff.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *metricsQuery) BuildExternal(seriesName string, namespace string, groupBy string, groupBySlice []string, metricSelector labels.Selector) (prom.Selector, error) {
|
||||||
|
queryParts := []queryPart{}
|
||||||
|
|
||||||
|
// Build up the query parts from the selector.
|
||||||
|
queryParts = append(queryParts, q.createQueryPartsFromSelector(metricSelector)...)
|
||||||
|
|
||||||
|
if q.namespaced && namespace != "" {
|
||||||
|
namespaceLbl, err := q.resConverter.LabelForResource(NsGroupResource)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
queryParts = append(queryParts, queryPart{
|
||||||
|
labelName: string(namespaceLbl),
|
||||||
|
values: []string{namespace},
|
||||||
|
operator: selection.Equals,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert our query parts into the types we need for our template.
|
||||||
|
exprs, valuesByName, err := q.processQueryParts(queryParts)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
args := queryTemplateArgs{
|
||||||
|
Series: seriesName,
|
||||||
|
LabelMatchers: strings.Join(exprs, ","),
|
||||||
|
LabelValuesByName: valuesByName,
|
||||||
|
GroupBy: groupBy,
|
||||||
|
GroupBySlice: groupBySlice,
|
||||||
|
}
|
||||||
|
|
||||||
|
queryBuff := new(bytes.Buffer)
|
||||||
|
if err := q.template.Execute(queryBuff, args); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if queryBuff.Len() == 0 {
|
||||||
|
return "", fmt.Errorf("empty query produced by metrics query template")
|
||||||
|
}
|
||||||
|
|
||||||
|
return prom.Selector(queryBuff.String()), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *metricsQuery) createQueryPartsFromSelector(metricSelector labels.Selector) []queryPart {
|
||||||
|
requirements, _ := metricSelector.Requirements()
|
||||||
|
|
||||||
|
selectors := []queryPart{}
|
||||||
|
for i := 0; i < len(requirements); i++ {
|
||||||
|
selector := q.convertRequirement(requirements[i])
|
||||||
|
|
||||||
|
selectors = append(selectors, selector)
|
||||||
|
}
|
||||||
|
|
||||||
|
return selectors
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *metricsQuery) convertRequirement(requirement labels.Requirement) queryPart {
|
||||||
|
labelName := requirement.Key()
|
||||||
|
values := requirement.Values().List()
|
||||||
|
|
||||||
|
return queryPart{
|
||||||
|
labelName: labelName,
|
||||||
|
values: values,
|
||||||
|
operator: requirement.Operator(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *metricsQuery) processQueryParts(queryParts []queryPart) ([]string, map[string]string, error) {
|
||||||
|
// We've take the approach here that if we can't perfectly map their query into a Prometheus
|
||||||
|
// query that we should abandon the effort completely.
|
||||||
|
// The concern is that if we don't get a perfect match on their query parameters, the query result
|
||||||
|
// might contain unexpected data that would cause them to take an erroneous action based on the result.
|
||||||
|
|
||||||
|
// Contains the expressions that we want to include as part of the query to Prometheus.
|
||||||
|
// e.g. "namespace=my-namespace"
|
||||||
|
// e.g. "some_label=some-value"
|
||||||
|
var exprs []string
|
||||||
|
|
||||||
|
// Contains the list of label values we're targeting, by namespace.
|
||||||
|
// e.g. "some_label" => "value-one|value-two"
|
||||||
|
valuesByName := map[string]string{}
|
||||||
|
|
||||||
|
// Convert our query parts into template arguments.
|
||||||
|
for _, qPart := range queryParts {
|
||||||
|
// Be resilient against bad inputs.
|
||||||
|
// We obviously can't generate label filters for these cases.
|
||||||
|
if qPart.labelName == "" {
|
||||||
|
return nil, nil, ErrLabelNotSpecified
|
||||||
|
}
|
||||||
|
|
||||||
|
if !q.operatorIsSupported(qPart.operator) {
|
||||||
|
return nil, nil, ErrUnsupportedOperator
|
||||||
|
}
|
||||||
|
|
||||||
|
matcher, err := q.selectMatcher(qPart.operator, qPart.values)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
targetValue, err := q.selectTargetValue(qPart.operator, qPart.values)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
expression := matcher(qPart.labelName, targetValue)
|
||||||
|
exprs = append(exprs, expression)
|
||||||
|
valuesByName[qPart.labelName] = strings.Join(qPart.values, "|")
|
||||||
|
}
|
||||||
|
|
||||||
|
return exprs, valuesByName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *metricsQuery) selectMatcher(operator selection.Operator, values []string) (func(string, string) string, error) {
|
||||||
|
switch len(values) {
|
||||||
|
case 0:
|
||||||
|
switch operator {
|
||||||
|
case selection.Exists:
|
||||||
|
return prom.LabelNeq, nil
|
||||||
|
case selection.DoesNotExist:
|
||||||
|
return prom.LabelEq, nil
|
||||||
|
case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.In, selection.NotIn:
|
||||||
|
return nil, ErrMalformedQuery
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
switch operator {
|
||||||
|
case selection.Equals, selection.DoubleEquals:
|
||||||
|
return prom.LabelEq, nil
|
||||||
|
case selection.NotEquals:
|
||||||
|
return prom.LabelNeq, nil
|
||||||
|
case selection.In, selection.Exists:
|
||||||
|
return prom.LabelMatches, nil
|
||||||
|
case selection.DoesNotExist, selection.NotIn:
|
||||||
|
return prom.LabelNotMatches, nil
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// Since labels can only have one value, providing multiple
|
||||||
|
// values results in a regex match, even if that's not what the user
|
||||||
|
// asked for.
|
||||||
|
switch operator {
|
||||||
|
case selection.Equals, selection.DoubleEquals, selection.In, selection.Exists:
|
||||||
|
return prom.LabelMatches, nil
|
||||||
|
case selection.NotEquals, selection.DoesNotExist, selection.NotIn:
|
||||||
|
return prom.LabelNotMatches, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.New("operator not supported by query builder")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *metricsQuery) selectTargetValue(operator selection.Operator, values []string) (string, error) {
|
||||||
|
switch len(values) {
|
||||||
|
case 0:
|
||||||
|
switch operator {
|
||||||
|
case selection.Exists, selection.DoesNotExist:
|
||||||
|
// Return an empty string when values are equal to 0
|
||||||
|
// When the operator is LabelNotMatches this will select series without the label
|
||||||
|
// or with the label but a value of "".
|
||||||
|
// When the operator is LabelMatches this will select series with the label
|
||||||
|
// whose value is NOT "".
|
||||||
|
return "", nil
|
||||||
|
case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.In, selection.NotIn:
|
||||||
|
return "", ErrMalformedQuery
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
switch operator {
|
||||||
|
case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.In, selection.NotIn:
|
||||||
|
// Pass the value through as-is.
|
||||||
|
// It's somewhat strange to do this for both the regex and equality
|
||||||
|
// operators, but if we do it this way it gives the user a little more control.
|
||||||
|
// They might choose to send an "IN" request and give a list of static values
|
||||||
|
// or they could send a single value that's a regex, giving them a passthrough
|
||||||
|
// for their label selector.
|
||||||
|
return values[0], nil
|
||||||
|
case selection.Exists, selection.DoesNotExist:
|
||||||
|
return "", ErrQueryUnsupportedValues
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
switch operator {
|
||||||
|
case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.In, selection.NotIn:
|
||||||
|
// Pass the value through as-is.
|
||||||
|
// It's somewhat strange to do this for both the regex and equality
|
||||||
|
// operators, but if we do it this way it gives the user a little more control.
|
||||||
|
// They might choose to send an "IN" request and give a list of static values
|
||||||
|
// or they could send a single value that's a regex, giving them a passthrough
|
||||||
|
// for their label selector.
|
||||||
|
return strings.Join(values, "|"), nil
|
||||||
|
case selection.Exists, selection.DoesNotExist:
|
||||||
|
return "", ErrQueryUnsupportedValues
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", errors.New("operator not supported by query builder")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *metricsQuery) operatorIsSupported(operator selection.Operator) bool {
|
||||||
|
return operator != selection.GreaterThan && operator != selection.LessThan
|
||||||
|
}
|
||||||
453
pkg/naming/metrics_query_test.go
Normal file
453
pkg/naming/metrics_query_test.go
Normal file
|
|
@ -0,0 +1,453 @@
|
||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package naming
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
labels "k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/apimachinery/pkg/selection"
|
||||||
|
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
|
||||||
|
pmodel "github.com/prometheus/common/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
type resourceConverterMock struct {
|
||||||
|
namespaced bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourcesForSeries is a mock that returns a single group resource,
|
||||||
|
// namely the series as a resource itself.
|
||||||
|
func (rcm *resourceConverterMock) ResourcesForSeries(series prom.Series) (res []schema.GroupResource, namespaced bool) {
|
||||||
|
return []schema.GroupResource{{Resource: series.Name}}, rcm.namespaced
|
||||||
|
}
|
||||||
|
|
||||||
|
// LabelForResource is a mock that returns the label name,
|
||||||
|
// simply by taking the given resource.
|
||||||
|
func (rcm *resourceConverterMock) LabelForResource(gr schema.GroupResource) (pmodel.LabelName, error) {
|
||||||
|
return pmodel.LabelName(gr.Resource), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type checkFunc func(prom.Selector, error) error
|
||||||
|
|
||||||
|
func hasError(want error) checkFunc {
|
||||||
|
return func(_ prom.Selector, got error) error {
|
||||||
|
if want != got {
|
||||||
|
return fmt.Errorf("got error %v, want %v", got, want)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasSelector(want string) checkFunc {
|
||||||
|
return func(got prom.Selector, _ error) error {
|
||||||
|
if prom.Selector(want) != got {
|
||||||
|
return fmt.Errorf("got selector %q, want %q", got, want)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checks(cs ...checkFunc) checkFunc {
|
||||||
|
return func(s prom.Selector, e error) error {
|
||||||
|
for _, c := range cs {
|
||||||
|
if err := c(s, e); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuildSelector(t *testing.T) {
|
||||||
|
mustNewQuery := func(queryTemplate string, namespaced bool) MetricsQuery {
|
||||||
|
mq, err := NewMetricsQuery(queryTemplate, &resourceConverterMock{namespaced})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
return mq
|
||||||
|
}
|
||||||
|
|
||||||
|
mustNewLabelRequirement := func(key string, op selection.Operator, vals []string) *labels.Requirement {
|
||||||
|
req, err := labels.NewRequirement(key, op, vals)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
mq MetricsQuery
|
||||||
|
|
||||||
|
series string
|
||||||
|
resource schema.GroupResource
|
||||||
|
namespace string
|
||||||
|
extraGroupBy []string
|
||||||
|
metricSelector labels.Selector
|
||||||
|
names []string
|
||||||
|
|
||||||
|
check checkFunc
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "series",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`series <<.Series>>`, false),
|
||||||
|
metricSelector: labels.NewSelector(),
|
||||||
|
series: "foo",
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector("series foo"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: "multiple LabelMatchers values",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<.LabelMatchers>>`, false),
|
||||||
|
metricSelector: labels.NewSelector(),
|
||||||
|
resource: schema.GroupResource{Group: "group", Resource: "resource"},
|
||||||
|
names: []string{"bar", "baz"},
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector(`resource=~"bar|baz"`),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: "single LabelMatchers value",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<.LabelMatchers>>`, false),
|
||||||
|
metricSelector: labels.NewSelector(),
|
||||||
|
resource: schema.GroupResource{Group: "group", Resource: "resource"},
|
||||||
|
names: []string{"bar"},
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector(`resource="bar"`),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: "LabelMatchers with additional metrics filter",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<.LabelMatchers>>`, false),
|
||||||
|
metricSelector: labels.NewSelector().Add(
|
||||||
|
*mustNewLabelRequirement("metric1", selection.Equals, []string{"value1"}),
|
||||||
|
),
|
||||||
|
resource: schema.GroupResource{Group: "group", Resource: "resource"},
|
||||||
|
names: []string{"bar"},
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector(`metric1="value1",resource="bar"`),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: "single LabelValuesByName value",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<index .LabelValuesByName "resource">>`, false),
|
||||||
|
metricSelector: labels.NewSelector(),
|
||||||
|
resource: schema.GroupResource{Group: "group", Resource: "resource"},
|
||||||
|
names: []string{"bar"},
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector("bar"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: "multiple LabelValuesByName values",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<index .LabelValuesByName "resource">>`, false),
|
||||||
|
metricSelector: labels.NewSelector(),
|
||||||
|
resource: schema.GroupResource{Group: "group", Resource: "resource"},
|
||||||
|
names: []string{"bar", "baz"},
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector("bar|baz"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: "multiple LabelValuesByName values with namespace",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<index .LabelValuesByName "namespaces">> <<index .LabelValuesByName "resource">>`, true),
|
||||||
|
metricSelector: labels.NewSelector(),
|
||||||
|
resource: schema.GroupResource{Group: "group", Resource: "resource"},
|
||||||
|
namespace: "default",
|
||||||
|
names: []string{"bar", "baz"},
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector("default bar|baz"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: "single GroupBy value",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<.GroupBy>>`, false),
|
||||||
|
metricSelector: labels.NewSelector(),
|
||||||
|
resource: schema.GroupResource{Group: "group", Resource: "resource"},
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector("resource"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: "multiple GroupBy values",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<.GroupBy>>`, false),
|
||||||
|
metricSelector: labels.NewSelector(),
|
||||||
|
resource: schema.GroupResource{Group: "group", Resource: "resource"},
|
||||||
|
extraGroupBy: []string{"extra", "groups"},
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector("resource,extra,groups"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: "single GroupBySlice value",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<.GroupBySlice>>`, false),
|
||||||
|
metricSelector: labels.NewSelector(),
|
||||||
|
resource: schema.GroupResource{Group: "group", Resource: "resource"},
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector("[resource]"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
name: "multiple GroupBySlice values",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<.GroupBySlice>>`, false),
|
||||||
|
metricSelector: labels.NewSelector(),
|
||||||
|
resource: schema.GroupResource{Group: "group", Resource: "resource"},
|
||||||
|
extraGroupBy: []string{"extra", "groups"},
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector("[resource extra groups]"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
selector, err := tc.mq.Build(tc.series, tc.resource, tc.namespace, tc.extraGroupBy, tc.metricSelector, tc.names...)
|
||||||
|
|
||||||
|
if err := tc.check(selector, err); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuildExternalSelector(t *testing.T) {
|
||||||
|
mustNewQuery := func(queryTemplate string) MetricsQuery {
|
||||||
|
mq, err := NewExternalMetricsQuery(queryTemplate, &resourceConverterMock{true}, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
return mq
|
||||||
|
}
|
||||||
|
|
||||||
|
mustNewNonNamespacedQuery := func(queryTemplate string) MetricsQuery {
|
||||||
|
mq, err := NewExternalMetricsQuery(queryTemplate, &resourceConverterMock{true}, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
return mq
|
||||||
|
}
|
||||||
|
|
||||||
|
mustNewLabelRequirement := func(key string, op selection.Operator, vals []string) *labels.Requirement {
|
||||||
|
req, err := labels.NewRequirement(key, op, vals)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
return req
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
mq MetricsQuery
|
||||||
|
|
||||||
|
series string
|
||||||
|
namespace string
|
||||||
|
groupBy string
|
||||||
|
groupBySlice []string
|
||||||
|
metricSelector labels.Selector
|
||||||
|
|
||||||
|
check checkFunc
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "series",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`series <<.Series>>`),
|
||||||
|
series: "foo",
|
||||||
|
metricSelector: labels.NewSelector(),
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector("series foo"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single GroupBy value",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<.GroupBy>>`),
|
||||||
|
groupBy: "foo",
|
||||||
|
metricSelector: labels.NewSelector(),
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector("foo"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple GroupBySlice values",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<.GroupBySlice>>`),
|
||||||
|
groupBySlice: []string{"foo", "bar"},
|
||||||
|
metricSelector: labels.NewSelector(),
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector("[foo bar]"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple GroupBySlice values with namespace",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<index .LabelValuesByName "namespaces">> <<.GroupBySlice>>`),
|
||||||
|
namespace: "default",
|
||||||
|
groupBySlice: []string{"foo", "bar"},
|
||||||
|
metricSelector: labels.NewSelector(),
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector("default [foo bar]"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple GroupBySlice values with namespace disabled",
|
||||||
|
|
||||||
|
mq: mustNewNonNamespacedQuery(`<<index .LabelValuesByName "namespaces">> <<.GroupBySlice>>`),
|
||||||
|
namespace: "default",
|
||||||
|
groupBySlice: []string{"foo", "bar"},
|
||||||
|
metricSelector: labels.NewSelector(),
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector(" [foo bar]"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single LabelMatchers value",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<.LabelMatchers>>`),
|
||||||
|
metricSelector: labels.NewSelector().Add(
|
||||||
|
*mustNewLabelRequirement("foo", selection.Equals, []string{"bar"}),
|
||||||
|
),
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector(`foo="bar"`),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single LabelMatchers value with namespace",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<.LabelMatchers>>`),
|
||||||
|
namespace: "default",
|
||||||
|
metricSelector: labels.NewSelector().Add(
|
||||||
|
*mustNewLabelRequirement("foo", selection.Equals, []string{"bar"}),
|
||||||
|
),
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector(`foo="bar",namespaces="default"`),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple LabelMatchers value",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<.LabelMatchers>>`),
|
||||||
|
metricSelector: labels.NewSelector().Add(
|
||||||
|
*mustNewLabelRequirement("foo", selection.Equals, []string{"bar"}),
|
||||||
|
*mustNewLabelRequirement("qux", selection.In, []string{"bar", "baz"}),
|
||||||
|
),
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector(`foo="bar",qux=~"bar|baz"`),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single LabelValuesByName value",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<.LabelValuesByName>>`),
|
||||||
|
metricSelector: labels.NewSelector().Add(
|
||||||
|
*mustNewLabelRequirement("foo", selection.Equals, []string{"bar"}),
|
||||||
|
),
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector("map[foo:bar]"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single LabelValuesByName with multiple selectors",
|
||||||
|
|
||||||
|
mq: mustNewQuery(`<<.LabelValuesByName>>`),
|
||||||
|
metricSelector: labels.NewSelector().Add(
|
||||||
|
*mustNewLabelRequirement("foo", selection.In, []string{"bar", "baz"}),
|
||||||
|
),
|
||||||
|
|
||||||
|
check: checks(
|
||||||
|
hasError(nil),
|
||||||
|
hasSelector("map[foo:bar|baz]"),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
selector, err := tc.mq.BuildExternal(tc.series, tc.namespace, tc.groupBy, tc.groupBySlice, tc.metricSelector)
|
||||||
|
t.Logf("selector: '%v'", selector)
|
||||||
|
|
||||||
|
if err := tc.check(selector, err); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
65
pkg/naming/regex_matcher_test.go
Normal file
65
pkg/naming/regex_matcher_test.go
Normal file
|
|
@ -0,0 +1,65 @@
|
||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package naming
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"sigs.k8s.io/prometheus-adapter/pkg/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestReMatcherIs(t *testing.T) {
|
||||||
|
filter := config.RegexFilter{
|
||||||
|
Is: "my_.*",
|
||||||
|
}
|
||||||
|
|
||||||
|
matcher, err := NewReMatcher(filter)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
result := matcher.Matches("my_label")
|
||||||
|
require.True(t, result)
|
||||||
|
|
||||||
|
result = matcher.Matches("your_label")
|
||||||
|
require.False(t, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReMatcherIsNot(t *testing.T) {
|
||||||
|
filter := config.RegexFilter{
|
||||||
|
IsNot: "my_.*",
|
||||||
|
}
|
||||||
|
|
||||||
|
matcher, err := NewReMatcher(filter)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
result := matcher.Matches("my_label")
|
||||||
|
require.False(t, result)
|
||||||
|
|
||||||
|
result = matcher.Matches("your_label")
|
||||||
|
require.True(t, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEnforcesIsOrIsNotButNotBoth(t *testing.T) {
|
||||||
|
filter := config.RegexFilter{
|
||||||
|
Is: "my_.*",
|
||||||
|
IsNot: "your_.*",
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := NewReMatcher(filter)
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
220
pkg/naming/resource_converter.go
Normal file
220
pkg/naming/resource_converter.go
Normal file
|
|
@ -0,0 +1,220 @@
|
||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package naming
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"text/template"
|
||||||
|
|
||||||
|
pmodel "github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
|
||||||
|
"sigs.k8s.io/custom-metrics-apiserver/pkg/provider"
|
||||||
|
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
"sigs.k8s.io/prometheus-adapter/pkg/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
GroupNameSanitizer = strings.NewReplacer(".", "_", "-", "_")
|
||||||
|
NsGroupResource = schema.GroupResource{Resource: "namespaces"}
|
||||||
|
NodeGroupResource = schema.GroupResource{Resource: "nodes"}
|
||||||
|
PVGroupResource = schema.GroupResource{Resource: "persistentvolumes"}
|
||||||
|
)
|
||||||
|
|
||||||
|
// ResourceConverter knows the relationship between Kubernetes group-resources and Prometheus labels,
|
||||||
|
// and can convert between the two for any given label or series.
|
||||||
|
type ResourceConverter interface {
|
||||||
|
// ResourcesForSeries returns the group-resources associated with the given series,
|
||||||
|
// as well as whether or not the given series has the "namespace" resource).
|
||||||
|
ResourcesForSeries(series prom.Series) (res []schema.GroupResource, namespaced bool)
|
||||||
|
// LabelForResource returns the appropriate label for the given resource.
|
||||||
|
LabelForResource(resource schema.GroupResource) (pmodel.LabelName, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type resourceConverter struct {
|
||||||
|
labelResourceMu sync.RWMutex
|
||||||
|
labelToResource map[pmodel.LabelName]schema.GroupResource
|
||||||
|
resourceToLabel map[schema.GroupResource]pmodel.LabelName
|
||||||
|
labelResExtractor *labelGroupResExtractor
|
||||||
|
mapper apimeta.RESTMapper
|
||||||
|
labelTemplate *template.Template
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewResourceConverter creates a ResourceConverter based on a generic template plus any overrides.
|
||||||
|
// Either overrides or the template may be empty, but not both.
|
||||||
|
func NewResourceConverter(resourceTemplate string, overrides map[string]config.GroupResource, mapper apimeta.RESTMapper) (ResourceConverter, error) {
|
||||||
|
converter := &resourceConverter{
|
||||||
|
labelToResource: make(map[pmodel.LabelName]schema.GroupResource),
|
||||||
|
resourceToLabel: make(map[schema.GroupResource]pmodel.LabelName),
|
||||||
|
mapper: mapper,
|
||||||
|
}
|
||||||
|
|
||||||
|
if resourceTemplate != "" {
|
||||||
|
labelTemplate, err := template.New("resource-label").Delims("<<", ">>").Parse(resourceTemplate)
|
||||||
|
if err != nil {
|
||||||
|
return converter, fmt.Errorf("unable to parse label template %q: %v", resourceTemplate, err)
|
||||||
|
}
|
||||||
|
converter.labelTemplate = labelTemplate
|
||||||
|
|
||||||
|
labelResExtractor, err := newLabelGroupResExtractor(labelTemplate)
|
||||||
|
if err != nil {
|
||||||
|
return converter, fmt.Errorf("unable to generate label format from template %q: %v", resourceTemplate, err)
|
||||||
|
}
|
||||||
|
converter.labelResExtractor = labelResExtractor
|
||||||
|
}
|
||||||
|
|
||||||
|
// invert the structure for consistency with the template
|
||||||
|
for lbl, groupRes := range overrides {
|
||||||
|
infoRaw := provider.CustomMetricInfo{
|
||||||
|
GroupResource: schema.GroupResource{
|
||||||
|
Group: groupRes.Group,
|
||||||
|
Resource: groupRes.Resource,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
info, _, err := infoRaw.Normalized(converter.mapper)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to normalize group-resource %v: %v", groupRes, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
converter.labelToResource[pmodel.LabelName(lbl)] = info.GroupResource
|
||||||
|
converter.resourceToLabel[info.GroupResource] = pmodel.LabelName(lbl)
|
||||||
|
}
|
||||||
|
|
||||||
|
return converter, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *resourceConverter) LabelForResource(resource schema.GroupResource) (pmodel.LabelName, error) {
|
||||||
|
r.labelResourceMu.RLock()
|
||||||
|
// check if we have a cached copy or override
|
||||||
|
lbl, ok := r.resourceToLabel[resource]
|
||||||
|
r.labelResourceMu.RUnlock() // release before we call makeLabelForResource
|
||||||
|
if ok {
|
||||||
|
return lbl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NB: we don't actually care about the gap between releasing read lock
|
||||||
|
// and acquiring the write lock -- if we do duplicate work sometimes, so be
|
||||||
|
// it, as long as we're correct.
|
||||||
|
|
||||||
|
// otherwise, use the template and save the result
|
||||||
|
lbl, err := r.makeLabelForResource(resource)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("unable to convert resource %s into label: %v", resource.String(), err)
|
||||||
|
}
|
||||||
|
return lbl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeLabelForResource constructs a label name for the given resource, and saves the result.
|
||||||
|
// It must *not* be called under an existing lock.
|
||||||
|
func (r *resourceConverter) makeLabelForResource(resource schema.GroupResource) (pmodel.LabelName, error) {
|
||||||
|
if r.labelTemplate == nil {
|
||||||
|
return "", fmt.Errorf("no generic resource label form specified for this metric")
|
||||||
|
}
|
||||||
|
buff := new(bytes.Buffer)
|
||||||
|
|
||||||
|
singularRes, err := r.mapper.ResourceSingularizer(resource.Resource)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("unable to singularize resource %s: %v", resource.String(), err)
|
||||||
|
}
|
||||||
|
convResource := schema.GroupResource{
|
||||||
|
Group: GroupNameSanitizer.Replace(resource.Group),
|
||||||
|
Resource: singularRes,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := r.labelTemplate.Execute(buff, convResource); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if buff.Len() == 0 {
|
||||||
|
return "", fmt.Errorf("empty label produced by label template")
|
||||||
|
}
|
||||||
|
lbl := pmodel.LabelName(buff.String())
|
||||||
|
|
||||||
|
r.labelResourceMu.Lock()
|
||||||
|
defer r.labelResourceMu.Unlock()
|
||||||
|
|
||||||
|
r.resourceToLabel[resource] = lbl
|
||||||
|
r.labelToResource[lbl] = resource
|
||||||
|
return lbl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *resourceConverter) ResourcesForSeries(series prom.Series) ([]schema.GroupResource, bool) {
|
||||||
|
// use an updates map to avoid having to drop the read lock to update the cache
|
||||||
|
// until the end. Since we'll probably have few updates after the first run,
|
||||||
|
// this should mean that we rarely have to hold the write lock.
|
||||||
|
var resources []schema.GroupResource
|
||||||
|
updates := make(map[pmodel.LabelName]schema.GroupResource)
|
||||||
|
namespaced := false
|
||||||
|
|
||||||
|
// use an anon func to get the right defer behavior
|
||||||
|
func() {
|
||||||
|
r.labelResourceMu.RLock()
|
||||||
|
defer r.labelResourceMu.RUnlock()
|
||||||
|
|
||||||
|
for lbl := range series.Labels {
|
||||||
|
var groupRes schema.GroupResource
|
||||||
|
var ok bool
|
||||||
|
|
||||||
|
// check if we have an override
|
||||||
|
if groupRes, ok = r.labelToResource[lbl]; ok {
|
||||||
|
resources = append(resources, groupRes)
|
||||||
|
} else if groupRes, ok = updates[lbl]; ok {
|
||||||
|
resources = append(resources, groupRes)
|
||||||
|
} else if r.labelResExtractor != nil {
|
||||||
|
// if not, check if it matches the form we expect, and if so,
|
||||||
|
// convert to a group-resource.
|
||||||
|
if groupRes, ok = r.labelResExtractor.GroupResourceForLabel(lbl); ok {
|
||||||
|
info, _, err := provider.CustomMetricInfo{GroupResource: groupRes}.Normalized(r.mapper)
|
||||||
|
if err != nil {
|
||||||
|
// this is likely to show up for a lot of labels, so make it a verbose info log
|
||||||
|
klog.V(9).Infof("unable to normalize group-resource %s from label %q, skipping: %v", groupRes.String(), lbl, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
groupRes = info.GroupResource
|
||||||
|
resources = append(resources, groupRes)
|
||||||
|
updates[lbl] = groupRes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if groupRes != NsGroupResource && groupRes != NodeGroupResource && groupRes != PVGroupResource {
|
||||||
|
namespaced = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// update the cache for next time. This should only be called by discovery,
|
||||||
|
// so we don't really have to worry about the gap between read and write locks
|
||||||
|
// (plus, we don't care if someone else updates the cache first, since the results
|
||||||
|
// are necessarily the same, so at most we've done extra work).
|
||||||
|
if len(updates) > 0 {
|
||||||
|
r.labelResourceMu.Lock()
|
||||||
|
defer r.labelResourceMu.Unlock()
|
||||||
|
|
||||||
|
for lbl, groupRes := range updates {
|
||||||
|
r.labelToResource[lbl] = groupRes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resources, namespaced
|
||||||
|
}
|
||||||
420
pkg/resourceprovider/provider.go
Normal file
420
pkg/resourceprovider/provider.go
Normal file
|
|
@ -0,0 +1,420 @@
|
||||||
|
/*
|
||||||
|
Copyright 2018 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package resourceprovider
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
metrics "k8s.io/metrics/pkg/apis/metrics"
|
||||||
|
|
||||||
|
"sigs.k8s.io/metrics-server/pkg/api"
|
||||||
|
|
||||||
|
"sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
"sigs.k8s.io/prometheus-adapter/pkg/config"
|
||||||
|
"sigs.k8s.io/prometheus-adapter/pkg/naming"
|
||||||
|
|
||||||
|
pmodel "github.com/prometheus/common/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
nodeResource = schema.GroupResource{Resource: "nodes"}
|
||||||
|
podResource = schema.GroupResource{Resource: "pods"}
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO(directxman12): consider support for nanocore values -- adjust scale if less than 1 millicore, or greater than max int64
|
||||||
|
|
||||||
|
// newResourceQuery instantiates query information from the give configuration rule for querying
|
||||||
|
// resource metrics for some resource.
|
||||||
|
func newResourceQuery(cfg config.ResourceRule, mapper apimeta.RESTMapper) (resourceQuery, error) {
|
||||||
|
converter, err := naming.NewResourceConverter(cfg.Resources.Template, cfg.Resources.Overrides, mapper)
|
||||||
|
if err != nil {
|
||||||
|
return resourceQuery{}, fmt.Errorf("unable to construct label-resource converter: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
contQuery, err := naming.NewMetricsQuery(cfg.ContainerQuery, converter)
|
||||||
|
if err != nil {
|
||||||
|
return resourceQuery{}, fmt.Errorf("unable to construct container metrics query: %v", err)
|
||||||
|
}
|
||||||
|
nodeQuery, err := naming.NewMetricsQuery(cfg.NodeQuery, converter)
|
||||||
|
if err != nil {
|
||||||
|
return resourceQuery{}, fmt.Errorf("unable to construct node metrics query: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resourceQuery{
|
||||||
|
converter: converter,
|
||||||
|
contQuery: contQuery,
|
||||||
|
nodeQuery: nodeQuery,
|
||||||
|
containerLabel: cfg.ContainerLabel,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// resourceQuery represents query information for querying resource metrics for some resource,
|
||||||
|
// like CPU or memory.
|
||||||
|
type resourceQuery struct {
|
||||||
|
converter naming.ResourceConverter
|
||||||
|
contQuery naming.MetricsQuery
|
||||||
|
nodeQuery naming.MetricsQuery
|
||||||
|
containerLabel string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewProvider constructs a new MetricsProvider to provide resource metrics from Prometheus using the given rules.
|
||||||
|
func NewProvider(prom client.Client, mapper apimeta.RESTMapper, cfg *config.ResourceRules) (api.MetricsGetter, error) {
|
||||||
|
cpuQuery, err := newResourceQuery(cfg.CPU, mapper)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to construct querier for CPU metrics: %v", err)
|
||||||
|
}
|
||||||
|
memQuery, err := newResourceQuery(cfg.Memory, mapper)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to construct querier for memory metrics: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &resourceProvider{
|
||||||
|
prom: prom,
|
||||||
|
cpu: cpuQuery,
|
||||||
|
mem: memQuery,
|
||||||
|
window: time.Duration(cfg.Window),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// resourceProvider is a MetricsProvider that contacts Prometheus to provide
|
||||||
|
// the resource metrics.
|
||||||
|
type resourceProvider struct {
|
||||||
|
prom client.Client
|
||||||
|
|
||||||
|
cpu, mem resourceQuery
|
||||||
|
|
||||||
|
window time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// nsQueryResults holds the results of one set
|
||||||
|
// of queries necessary to construct a resource metrics
|
||||||
|
// API response for a single namespace.
|
||||||
|
type nsQueryResults struct {
|
||||||
|
namespace string
|
||||||
|
cpu, mem queryResults
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPodMetrics implements the api.MetricsProvider interface.
|
||||||
|
func (p *resourceProvider) GetPodMetrics(pods ...*metav1.PartialObjectMetadata) ([]metrics.PodMetrics, error) {
|
||||||
|
resMetrics := make([]metrics.PodMetrics, 0, len(pods))
|
||||||
|
|
||||||
|
if len(pods) == 0 {
|
||||||
|
return resMetrics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(directxman12): figure out how well this scales if we go to list 1000+ pods
|
||||||
|
// (and consider adding timeouts)
|
||||||
|
|
||||||
|
// group pods by namespace (we could be listing for all pods in the cluster)
|
||||||
|
podsByNs := make(map[string][]string, len(pods))
|
||||||
|
for _, pod := range pods {
|
||||||
|
podsByNs[pod.Namespace] = append(podsByNs[pod.Namespace], pod.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// actually fetch the results for each namespace
|
||||||
|
now := pmodel.Now()
|
||||||
|
resChan := make(chan nsQueryResults, len(podsByNs))
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(len(podsByNs))
|
||||||
|
|
||||||
|
for ns, podNames := range podsByNs {
|
||||||
|
go func(ns string, podNames []string) {
|
||||||
|
defer wg.Done()
|
||||||
|
resChan <- p.queryBoth(now, podResource, ns, podNames...)
|
||||||
|
}(ns, podNames)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
close(resChan)
|
||||||
|
|
||||||
|
// index those results in a map for easy lookup
|
||||||
|
resultsByNs := make(map[string]nsQueryResults, len(podsByNs))
|
||||||
|
for result := range resChan {
|
||||||
|
if result.err != nil {
|
||||||
|
klog.Errorf("unable to fetch metrics for pods in namespace %q, skipping: %v", result.namespace, result.err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
resultsByNs[result.namespace] = result
|
||||||
|
}
|
||||||
|
|
||||||
|
// convert the unorganized per-container results into results grouped
|
||||||
|
// together by namespace, pod, and container
|
||||||
|
for _, pod := range pods {
|
||||||
|
podMetric := p.assignForPod(pod, resultsByNs)
|
||||||
|
if podMetric != nil {
|
||||||
|
resMetrics = append(resMetrics, *podMetric)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resMetrics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignForPod takes the resource metrics for all containers in the given pod
|
||||||
|
// from resultsByNs, and places them in MetricsProvider response format in resMetrics,
|
||||||
|
// also recording the earliest time in resTime. It will return without operating if
|
||||||
|
// any data is missing.
|
||||||
|
func (p *resourceProvider) assignForPod(pod *metav1.PartialObjectMetadata, resultsByNs map[string]nsQueryResults) *metrics.PodMetrics {
|
||||||
|
// check to make sure everything is present
|
||||||
|
nsRes, nsResPresent := resultsByNs[pod.Namespace]
|
||||||
|
if !nsResPresent {
|
||||||
|
klog.Errorf("unable to fetch metrics for pods in namespace %q, skipping pod %s", pod.Namespace, pod.String())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
cpuRes, hasResult := nsRes.cpu[pod.Name]
|
||||||
|
if !hasResult {
|
||||||
|
klog.Errorf("unable to fetch CPU metrics for pod %s, skipping", pod.String())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
memRes, hasResult := nsRes.mem[pod.Name]
|
||||||
|
if !hasResult {
|
||||||
|
klog.Errorf("unable to fetch memory metrics for pod %s, skipping", pod.String())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
containerMetrics := make(map[string]metrics.ContainerMetrics)
|
||||||
|
earliestTS := pmodel.Latest
|
||||||
|
|
||||||
|
// organize all the CPU results
|
||||||
|
for _, cpu := range cpuRes {
|
||||||
|
containerName := string(cpu.Metric[pmodel.LabelName(p.cpu.containerLabel)])
|
||||||
|
if _, present := containerMetrics[containerName]; !present {
|
||||||
|
containerMetrics[containerName] = metrics.ContainerMetrics{
|
||||||
|
Name: containerName,
|
||||||
|
Usage: corev1.ResourceList{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
containerMetrics[containerName].Usage[corev1.ResourceCPU] = *resource.NewMilliQuantity(int64(cpu.Value*1000.0), resource.DecimalSI)
|
||||||
|
if cpu.Timestamp.Before(earliestTS) {
|
||||||
|
earliestTS = cpu.Timestamp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// organize the memory results
|
||||||
|
for _, mem := range memRes {
|
||||||
|
containerName := string(mem.Metric[pmodel.LabelName(p.mem.containerLabel)])
|
||||||
|
if _, present := containerMetrics[containerName]; !present {
|
||||||
|
containerMetrics[containerName] = metrics.ContainerMetrics{
|
||||||
|
Name: containerName,
|
||||||
|
Usage: corev1.ResourceList{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
containerMetrics[containerName].Usage[corev1.ResourceMemory] = *resource.NewMilliQuantity(int64(mem.Value*1000.0), resource.BinarySI)
|
||||||
|
if mem.Timestamp.Before(earliestTS) {
|
||||||
|
earliestTS = mem.Timestamp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check for any containers that have either memory usage or CPU usage, but not both
|
||||||
|
for _, containerMetric := range containerMetrics {
|
||||||
|
_, hasMemory := containerMetric.Usage[corev1.ResourceMemory]
|
||||||
|
_, hasCPU := containerMetric.Usage[corev1.ResourceCPU]
|
||||||
|
if hasMemory && !hasCPU {
|
||||||
|
containerMetric.Usage[corev1.ResourceCPU] = *resource.NewMilliQuantity(int64(0), resource.BinarySI)
|
||||||
|
} else if hasCPU && !hasMemory {
|
||||||
|
containerMetric.Usage[corev1.ResourceMemory] = *resource.NewMilliQuantity(int64(0), resource.BinarySI)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
podMetric := &metrics.PodMetrics{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: pod.Name,
|
||||||
|
Namespace: pod.Namespace,
|
||||||
|
Labels: pod.Labels,
|
||||||
|
CreationTimestamp: metav1.Now(),
|
||||||
|
},
|
||||||
|
// store the time in the final format
|
||||||
|
Timestamp: metav1.NewTime(earliestTS.Time()),
|
||||||
|
Window: metav1.Duration{Duration: p.window},
|
||||||
|
}
|
||||||
|
|
||||||
|
// store the container metrics in the final format
|
||||||
|
podMetric.Containers = make([]metrics.ContainerMetrics, 0, len(containerMetrics))
|
||||||
|
for _, containerMetric := range containerMetrics {
|
||||||
|
podMetric.Containers = append(podMetric.Containers, containerMetric)
|
||||||
|
}
|
||||||
|
|
||||||
|
return podMetric
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNodeMetrics implements the api.MetricsProvider interface.
|
||||||
|
func (p *resourceProvider) GetNodeMetrics(nodes ...*corev1.Node) ([]metrics.NodeMetrics, error) {
|
||||||
|
resMetrics := make([]metrics.NodeMetrics, 0, len(nodes))
|
||||||
|
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return resMetrics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
now := pmodel.Now()
|
||||||
|
nodeNames := make([]string, 0, len(nodes))
|
||||||
|
for _, node := range nodes {
|
||||||
|
nodeNames = append(nodeNames, node.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// run the actual query
|
||||||
|
qRes := p.queryBoth(now, nodeResource, "", nodeNames...)
|
||||||
|
if qRes.err != nil {
|
||||||
|
klog.Errorf("failed querying node metrics: %v", qRes.err)
|
||||||
|
return resMetrics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// organize the results
|
||||||
|
for i, nodeName := range nodeNames {
|
||||||
|
// skip if any data is missing
|
||||||
|
rawCPUs, gotResult := qRes.cpu[nodeName]
|
||||||
|
if !gotResult {
|
||||||
|
klog.V(1).Infof("missing CPU for node %q, skipping", nodeName)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
rawMems, gotResult := qRes.mem[nodeName]
|
||||||
|
if !gotResult {
|
||||||
|
klog.V(1).Infof("missing memory for node %q, skipping", nodeName)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
rawMem := rawMems[0]
|
||||||
|
rawCPU := rawCPUs[0]
|
||||||
|
|
||||||
|
// use the earliest timestamp available (in order to be conservative
|
||||||
|
// when determining if metrics are tainted by startup)
|
||||||
|
ts := rawCPU.Timestamp.Time()
|
||||||
|
if ts.After(rawMem.Timestamp.Time()) {
|
||||||
|
ts = rawMem.Timestamp.Time()
|
||||||
|
}
|
||||||
|
|
||||||
|
// store the results
|
||||||
|
resMetrics = append(resMetrics, metrics.NodeMetrics{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: nodes[i].Name,
|
||||||
|
Labels: nodes[i].Labels,
|
||||||
|
CreationTimestamp: metav1.Now(),
|
||||||
|
},
|
||||||
|
Usage: corev1.ResourceList{
|
||||||
|
corev1.ResourceCPU: *resource.NewMilliQuantity(int64(rawCPU.Value*1000.0), resource.DecimalSI),
|
||||||
|
corev1.ResourceMemory: *resource.NewMilliQuantity(int64(rawMem.Value*1000.0), resource.BinarySI),
|
||||||
|
},
|
||||||
|
Timestamp: metav1.NewTime(ts),
|
||||||
|
Window: metav1.Duration{Duration: p.window},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return resMetrics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// queryBoth queries for both CPU and memory metrics on the given
|
||||||
|
// Kubernetes API resource (pods or nodes), and errors out if
|
||||||
|
// either query fails.
|
||||||
|
func (p *resourceProvider) queryBoth(now pmodel.Time, resource schema.GroupResource, namespace string, names ...string) nsQueryResults {
|
||||||
|
var cpuRes, memRes queryResults
|
||||||
|
var cpuErr, memErr error
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
cpuRes, cpuErr = p.runQuery(now, p.cpu, resource, namespace, names...)
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
memRes, memErr = p.runQuery(now, p.mem, resource, namespace, names...)
|
||||||
|
}()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
if cpuErr != nil {
|
||||||
|
return nsQueryResults{
|
||||||
|
namespace: namespace,
|
||||||
|
err: fmt.Errorf("unable to fetch node CPU metrics: %v", cpuErr),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if memErr != nil {
|
||||||
|
return nsQueryResults{
|
||||||
|
namespace: namespace,
|
||||||
|
err: fmt.Errorf("unable to fetch node memory metrics: %v", memErr),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nsQueryResults{
|
||||||
|
namespace: namespace,
|
||||||
|
cpu: cpuRes,
|
||||||
|
mem: memRes,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// queryResults maps an object name to all the results matching that object
|
||||||
|
type queryResults map[string][]*pmodel.Sample
|
||||||
|
|
||||||
|
// runQuery actually queries Prometheus for the metric represented by the given query information, on
|
||||||
|
// the given Kubernetes API resource (pods or nodes).
|
||||||
|
func (p *resourceProvider) runQuery(now pmodel.Time, queryInfo resourceQuery, resource schema.GroupResource, namespace string, names ...string) (queryResults, error) {
|
||||||
|
var query client.Selector
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// build the query, which needs the special "container" group by if this is for pod metrics
|
||||||
|
if resource == nodeResource {
|
||||||
|
query, err = queryInfo.nodeQuery.Build("", resource, namespace, nil, labels.Everything(), names...)
|
||||||
|
} else {
|
||||||
|
extraGroupBy := []string{queryInfo.containerLabel}
|
||||||
|
query, err = queryInfo.contQuery.Build("", resource, namespace, extraGroupBy, labels.Everything(), names...)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to construct query: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// run the query
|
||||||
|
rawRes, err := p.prom.Query(context.Background(), now, query)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to execute query: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rawRes.Type != pmodel.ValVector || rawRes.Vector == nil {
|
||||||
|
return nil, fmt.Errorf("invalid or empty value of non-vector type (%s) returned", rawRes.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check the appropriate label for the resource in question
|
||||||
|
resourceLbl, err := queryInfo.converter.LabelForResource(resource)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("unable to find label for resource %s: %v", resource.String(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// associate the results back to each given pod or node
|
||||||
|
res := make(queryResults, len(*rawRes.Vector))
|
||||||
|
for _, sample := range *rawRes.Vector {
|
||||||
|
// skip empty samples
|
||||||
|
if sample == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// replace NaN and negative values by zero
|
||||||
|
if math.IsNaN(float64(sample.Value)) || sample.Value < 0 {
|
||||||
|
sample.Value = 0
|
||||||
|
}
|
||||||
|
resKey := string(sample.Metric[resourceLbl])
|
||||||
|
res[resKey] = append(res[resKey], sample)
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
@ -14,8 +14,16 @@ See the License for the specific language governing permissions and
|
||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// +k8s:deepcopy-gen=package
|
package resourceprovider
|
||||||
// +k8s:conversion-gen=k8s.io/metrics/pkg/apis/external_metrics
|
|
||||||
// +k8s:openapi-gen=true
|
|
||||||
|
|
||||||
package v1beta1
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProvider(t *testing.T) {
|
||||||
|
RegisterFailHandler(Fail)
|
||||||
|
RunSpecs(t, "Resource Metrics Provider Suite")
|
||||||
|
}
|
||||||
352
pkg/resourceprovider/provider_test.go
Normal file
352
pkg/resourceprovider/provider_test.go
Normal file
|
|
@ -0,0 +1,352 @@
|
||||||
|
/*
|
||||||
|
Copyright 2018 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package resourceprovider
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
apimeta "k8s.io/apimachinery/pkg/api/meta"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/metrics/pkg/apis/metrics"
|
||||||
|
|
||||||
|
"sigs.k8s.io/metrics-server/pkg/api"
|
||||||
|
|
||||||
|
config "sigs.k8s.io/prometheus-adapter/cmd/config-gen/utils"
|
||||||
|
prom "sigs.k8s.io/prometheus-adapter/pkg/client"
|
||||||
|
fakeprom "sigs.k8s.io/prometheus-adapter/pkg/client/fake"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
pmodel "github.com/prometheus/common/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
func restMapper() apimeta.RESTMapper {
|
||||||
|
mapper := apimeta.NewDefaultRESTMapper([]schema.GroupVersion{corev1.SchemeGroupVersion})
|
||||||
|
|
||||||
|
mapper.Add(corev1.SchemeGroupVersion.WithKind("Pod"), apimeta.RESTScopeNamespace)
|
||||||
|
mapper.Add(corev1.SchemeGroupVersion.WithKind("Node"), apimeta.RESTScopeRoot)
|
||||||
|
mapper.Add(corev1.SchemeGroupVersion.WithKind("Namespace"), apimeta.RESTScopeRoot)
|
||||||
|
|
||||||
|
return mapper
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildPodSample(namespace, pod, container string, val float64, ts int64) *pmodel.Sample {
|
||||||
|
return &pmodel.Sample{
|
||||||
|
Metric: pmodel.Metric{
|
||||||
|
"namespace": pmodel.LabelValue(namespace),
|
||||||
|
"pod": pmodel.LabelValue(pod),
|
||||||
|
"container": pmodel.LabelValue(container),
|
||||||
|
},
|
||||||
|
Value: pmodel.SampleValue(val),
|
||||||
|
Timestamp: pmodel.Time(ts),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildNodeSample(node string, val float64, ts int64) *pmodel.Sample {
|
||||||
|
return &pmodel.Sample{
|
||||||
|
Metric: pmodel.Metric{
|
||||||
|
"instance": pmodel.LabelValue(node),
|
||||||
|
"id": "/",
|
||||||
|
},
|
||||||
|
Value: pmodel.SampleValue(val),
|
||||||
|
Timestamp: pmodel.Time(ts),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildQueryRes(metric string, samples ...*pmodel.Sample) prom.QueryResult {
|
||||||
|
for _, sample := range samples {
|
||||||
|
sample.Metric[pmodel.MetricNameLabel] = pmodel.LabelValue(metric)
|
||||||
|
}
|
||||||
|
vec := pmodel.Vector(samples)
|
||||||
|
return prom.QueryResult{
|
||||||
|
Type: pmodel.ValVector,
|
||||||
|
Vector: &vec,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustBuild(sel prom.Selector, err error) prom.Selector {
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
return sel
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildResList(cpu, memory float64) corev1.ResourceList {
|
||||||
|
return corev1.ResourceList{
|
||||||
|
corev1.ResourceCPU: *resource.NewMilliQuantity(int64(cpu*1000.0), resource.DecimalSI),
|
||||||
|
corev1.ResourceMemory: *resource.NewMilliQuantity(int64(memory*1000.0), resource.BinarySI),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = Describe("Resource Metrics Provider", func() {
|
||||||
|
var (
|
||||||
|
prov api.MetricsGetter
|
||||||
|
fakeProm *fakeprom.FakePrometheusClient
|
||||||
|
cpuQueries, memQueries resourceQuery
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
By("setting up a fake prometheus client and provider")
|
||||||
|
mapper := restMapper()
|
||||||
|
|
||||||
|
cfg := config.DefaultConfig(1*time.Minute, "")
|
||||||
|
|
||||||
|
var err error
|
||||||
|
cpuQueries, err = newResourceQuery(cfg.ResourceRules.CPU, mapper)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
memQueries, err = newResourceQuery(cfg.ResourceRules.Memory, mapper)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
fakeProm = &fakeprom.FakePrometheusClient{}
|
||||||
|
fakeProm.AcceptableInterval = pmodel.Interval{End: pmodel.Latest}
|
||||||
|
|
||||||
|
prov, err = NewProvider(fakeProm, restMapper(), cfg.ResourceRules)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should be able to list metrics pods across different namespaces", func() {
|
||||||
|
pods := []*metav1.PartialObjectMetadata{
|
||||||
|
{ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "pod1"}},
|
||||||
|
{ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "pod3"}},
|
||||||
|
{ObjectMeta: metav1.ObjectMeta{Namespace: "other-ns", Name: "pod27"}},
|
||||||
|
}
|
||||||
|
fakeProm.QueryResults = map[prom.Selector]prom.QueryResult{
|
||||||
|
mustBuild(cpuQueries.contQuery.Build("", podResource, "some-ns", []string{cpuQueries.containerLabel}, labels.Everything(), "pod1", "pod3")): buildQueryRes("container_cpu_usage_seconds_total",
|
||||||
|
buildPodSample("some-ns", "pod1", "cont1", 1100.0, 10),
|
||||||
|
buildPodSample("some-ns", "pod1", "cont2", 1110.0, 20),
|
||||||
|
buildPodSample("some-ns", "pod3", "cont1", 1300.0, 10),
|
||||||
|
buildPodSample("some-ns", "pod3", "cont2", 1310.0, 20),
|
||||||
|
),
|
||||||
|
mustBuild(cpuQueries.contQuery.Build("", podResource, "other-ns", []string{cpuQueries.containerLabel}, labels.Everything(), "pod27")): buildQueryRes("container_cpu_usage_seconds_total",
|
||||||
|
buildPodSample("other-ns", "pod27", "cont1", 2200.0, 270),
|
||||||
|
),
|
||||||
|
mustBuild(memQueries.contQuery.Build("", podResource, "some-ns", []string{cpuQueries.containerLabel}, labels.Everything(), "pod1", "pod3")): buildQueryRes("container_memory_working_set_bytes",
|
||||||
|
buildPodSample("some-ns", "pod1", "cont1", 3100.0, 11),
|
||||||
|
buildPodSample("some-ns", "pod1", "cont2", 3110.0, 21),
|
||||||
|
buildPodSample("some-ns", "pod3", "cont1", 3300.0, 11),
|
||||||
|
buildPodSample("some-ns", "pod3", "cont2", 3310.0, 21),
|
||||||
|
),
|
||||||
|
mustBuild(memQueries.contQuery.Build("", podResource, "other-ns", []string{cpuQueries.containerLabel}, labels.Everything(), "pod27")): buildQueryRes("container_memory_working_set_bytes",
|
||||||
|
buildPodSample("other-ns", "pod27", "cont1", 4200.0, 271),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
By("querying for metrics for some pods")
|
||||||
|
podMetrics, err := prov.GetPodMetrics(pods...)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("verifying that metrics have been fetched for all the pods")
|
||||||
|
Expect(podMetrics).To(HaveLen(3))
|
||||||
|
|
||||||
|
By("verifying that the reported times for each are the earliest times for each pod")
|
||||||
|
Expect(podMetrics[0].Timestamp.Time).To(Equal(pmodel.Time(10).Time()))
|
||||||
|
Expect(podMetrics[0].Window.Duration).To(Equal(time.Minute))
|
||||||
|
|
||||||
|
Expect(podMetrics[1].Timestamp.Time).To(Equal(pmodel.Time(10).Time()))
|
||||||
|
Expect(podMetrics[1].Window.Duration).To(Equal(time.Minute))
|
||||||
|
|
||||||
|
Expect(podMetrics[2].Timestamp.Time).To(Equal(pmodel.Time(270).Time()))
|
||||||
|
Expect(podMetrics[2].Window.Duration).To(Equal(time.Minute))
|
||||||
|
|
||||||
|
By("verifying that the right metrics were fetched")
|
||||||
|
Expect(podMetrics).To(HaveLen(3))
|
||||||
|
Expect(podMetrics[0].Containers).To(ConsistOf(
|
||||||
|
metrics.ContainerMetrics{Name: "cont1", Usage: buildResList(1100.0, 3100.0)},
|
||||||
|
metrics.ContainerMetrics{Name: "cont2", Usage: buildResList(1110.0, 3110.0)},
|
||||||
|
))
|
||||||
|
Expect(podMetrics[1].Containers).To(ConsistOf(
|
||||||
|
metrics.ContainerMetrics{Name: "cont1", Usage: buildResList(1300.0, 3300.0)},
|
||||||
|
metrics.ContainerMetrics{Name: "cont2", Usage: buildResList(1310.0, 3310.0)},
|
||||||
|
))
|
||||||
|
|
||||||
|
Expect(podMetrics[2].Containers).To(ConsistOf(
|
||||||
|
metrics.ContainerMetrics{Name: "cont1", Usage: buildResList(2200.0, 4200.0)},
|
||||||
|
))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should return nil metrics for missing pods, but still return partial results", func() {
|
||||||
|
fakeProm.QueryResults = map[prom.Selector]prom.QueryResult{
|
||||||
|
mustBuild(cpuQueries.contQuery.Build("", podResource, "some-ns", []string{cpuQueries.containerLabel}, labels.Everything(), "pod1", "pod-nonexistant")): buildQueryRes("container_cpu_usage_seconds_total",
|
||||||
|
buildPodSample("some-ns", "pod1", "cont1", 1100.0, 10),
|
||||||
|
buildPodSample("some-ns", "pod1", "cont2", 1110.0, 20),
|
||||||
|
),
|
||||||
|
mustBuild(memQueries.contQuery.Build("", podResource, "some-ns", []string{cpuQueries.containerLabel}, labels.Everything(), "pod1", "pod-nonexistant")): buildQueryRes("container_memory_working_set_bytes",
|
||||||
|
buildPodSample("some-ns", "pod1", "cont1", 3100.0, 11),
|
||||||
|
buildPodSample("some-ns", "pod1", "cont2", 3110.0, 21),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
By("querying for metrics for some pods, one of which is missing")
|
||||||
|
podMetrics, err := prov.GetPodMetrics(
|
||||||
|
&metav1.PartialObjectMetadata{ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "pod1"}},
|
||||||
|
&metav1.PartialObjectMetadata{ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "pod-nonexistant"}},
|
||||||
|
)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("verifying that the missing pod had no metrics")
|
||||||
|
Expect(podMetrics).To(HaveLen(1))
|
||||||
|
|
||||||
|
By("verifying that the rest of time metrics and times are correct")
|
||||||
|
Expect(podMetrics[0].Timestamp.Time).To(Equal(pmodel.Time(10).Time()))
|
||||||
|
Expect(podMetrics[0].Window.Duration).To(Equal(time.Minute))
|
||||||
|
Expect(podMetrics[0].Containers).To(ConsistOf(
|
||||||
|
metrics.ContainerMetrics{Name: "cont1", Usage: buildResList(1100.0, 3100.0)},
|
||||||
|
metrics.ContainerMetrics{Name: "cont2", Usage: buildResList(1110.0, 3110.0)},
|
||||||
|
))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should return metrics of value zero when pod metrics have NaN or negative values", func() {
|
||||||
|
fakeProm.QueryResults = map[prom.Selector]prom.QueryResult{
|
||||||
|
mustBuild(cpuQueries.contQuery.Build("", podResource, "some-ns", []string{cpuQueries.containerLabel}, labels.Everything(), "pod1", "pod3")): buildQueryRes("container_cpu_usage_seconds_total",
|
||||||
|
buildPodSample("some-ns", "pod1", "cont1", -1100.0, 10),
|
||||||
|
buildPodSample("some-ns", "pod1", "cont2", math.NaN(), 20),
|
||||||
|
buildPodSample("some-ns", "pod3", "cont1", -1300.0, 10),
|
||||||
|
buildPodSample("some-ns", "pod3", "cont2", 1310.0, 20),
|
||||||
|
),
|
||||||
|
mustBuild(memQueries.contQuery.Build("", podResource, "some-ns", []string{cpuQueries.containerLabel}, labels.Everything(), "pod1", "pod3")): buildQueryRes("container_memory_working_set_bytes",
|
||||||
|
buildPodSample("some-ns", "pod1", "cont1", 3100.0, 11),
|
||||||
|
buildPodSample("some-ns", "pod1", "cont2", -3110.0, 21),
|
||||||
|
buildPodSample("some-ns", "pod3", "cont1", math.NaN(), 11),
|
||||||
|
buildPodSample("some-ns", "pod3", "cont2", -3310.0, 21),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
By("querying for metrics for some pods")
|
||||||
|
podMetrics, err := prov.GetPodMetrics(
|
||||||
|
&metav1.PartialObjectMetadata{ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "pod1"}},
|
||||||
|
&metav1.PartialObjectMetadata{ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "pod3"}},
|
||||||
|
)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("verifying that metrics have been fetched for all the pods")
|
||||||
|
Expect(podMetrics).To(HaveLen(2))
|
||||||
|
|
||||||
|
By("verifying that the reported times for each are the earliest times for each pod")
|
||||||
|
Expect(podMetrics[0].Timestamp.Time).To(Equal(pmodel.Time(10).Time()))
|
||||||
|
Expect(podMetrics[0].Window.Duration).To(Equal(time.Minute))
|
||||||
|
Expect(podMetrics[1].Timestamp.Time).To(Equal(pmodel.Time(10).Time()))
|
||||||
|
Expect(podMetrics[1].Window.Duration).To(Equal(time.Minute))
|
||||||
|
|
||||||
|
By("verifying that NaN and negative values were replaced by zero")
|
||||||
|
Expect(podMetrics[0].Containers).To(ConsistOf(
|
||||||
|
metrics.ContainerMetrics{Name: "cont1", Usage: buildResList(0, 3100.0)},
|
||||||
|
metrics.ContainerMetrics{Name: "cont2", Usage: buildResList(0, 0)},
|
||||||
|
))
|
||||||
|
Expect(podMetrics[1].Containers).To(ConsistOf(
|
||||||
|
metrics.ContainerMetrics{Name: "cont1", Usage: buildResList(0, 0)},
|
||||||
|
metrics.ContainerMetrics{Name: "cont2", Usage: buildResList(1310.0, 0)},
|
||||||
|
))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should be able to list metrics for nodes", func() {
|
||||||
|
fakeProm.QueryResults = map[prom.Selector]prom.QueryResult{
|
||||||
|
mustBuild(cpuQueries.nodeQuery.Build("", nodeResource, "", nil, labels.Everything(), "node1", "node2")): buildQueryRes("container_cpu_usage_seconds_total",
|
||||||
|
buildNodeSample("node1", 1100.0, 10),
|
||||||
|
buildNodeSample("node2", 1200.0, 14),
|
||||||
|
),
|
||||||
|
mustBuild(memQueries.nodeQuery.Build("", nodeResource, "", nil, labels.Everything(), "node1", "node2")): buildQueryRes("container_memory_working_set_bytes",
|
||||||
|
buildNodeSample("node1", 2100.0, 11),
|
||||||
|
buildNodeSample("node2", 2200.0, 12),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
By("querying for metrics for some nodes")
|
||||||
|
nodeMetrics, err := prov.GetNodeMetrics(
|
||||||
|
&corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}},
|
||||||
|
&corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}},
|
||||||
|
)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("verifying that metrics have been fetched for all the nodes")
|
||||||
|
Expect(nodeMetrics).To(HaveLen(2))
|
||||||
|
|
||||||
|
By("verifying that the reported times for each are the earliest times for each node")
|
||||||
|
Expect(nodeMetrics[0].Timestamp.Time).To(Equal(pmodel.Time(10).Time()))
|
||||||
|
Expect(nodeMetrics[0].Window.Duration).To(Equal(time.Minute))
|
||||||
|
Expect(nodeMetrics[1].Timestamp.Time).To(Equal(pmodel.Time(12).Time()))
|
||||||
|
Expect(nodeMetrics[1].Window.Duration).To(Equal(time.Minute))
|
||||||
|
|
||||||
|
By("verifying that the right metrics were fetched")
|
||||||
|
Expect(nodeMetrics[0].Usage).To(Equal(buildResList(1100.0, 2100.0)))
|
||||||
|
Expect(nodeMetrics[1].Usage).To(Equal(buildResList(1200.0, 2200.0)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should return nil metrics for missing nodes, but still return partial results", func() {
|
||||||
|
fakeProm.QueryResults = map[prom.Selector]prom.QueryResult{
|
||||||
|
mustBuild(cpuQueries.nodeQuery.Build("", nodeResource, "", nil, labels.Everything(), "node1", "node2", "node3")): buildQueryRes("container_cpu_usage_seconds_total",
|
||||||
|
buildNodeSample("node1", 1100.0, 10),
|
||||||
|
buildNodeSample("node2", 1200.0, 14),
|
||||||
|
),
|
||||||
|
mustBuild(memQueries.nodeQuery.Build("", nodeResource, "", nil, labels.Everything(), "node1", "node2", "node3")): buildQueryRes("container_memory_working_set_bytes",
|
||||||
|
buildNodeSample("node1", 2100.0, 11),
|
||||||
|
buildNodeSample("node2", 2200.0, 12),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
By("querying for metrics for some nodes, one of which is missing")
|
||||||
|
nodeMetrics, err := prov.GetNodeMetrics(
|
||||||
|
&corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}},
|
||||||
|
&corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}},
|
||||||
|
&corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node3"}},
|
||||||
|
)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("verifying that the missing pod had no metrics")
|
||||||
|
Expect(nodeMetrics).To(HaveLen(2))
|
||||||
|
|
||||||
|
By("verifying that the rest of time metrics and times are correct")
|
||||||
|
Expect(nodeMetrics[0].Usage).To(Equal(buildResList(1100.0, 2100.0)))
|
||||||
|
Expect(nodeMetrics[0].Timestamp.Time).To(Equal(pmodel.Time(10).Time()))
|
||||||
|
Expect(nodeMetrics[0].Window.Duration).To(Equal(time.Minute))
|
||||||
|
Expect(nodeMetrics[1].Usage).To(Equal(buildResList(1200.0, 2200.0)))
|
||||||
|
Expect(nodeMetrics[1].Timestamp.Time).To(Equal(pmodel.Time(12).Time()))
|
||||||
|
Expect(nodeMetrics[1].Window.Duration).To(Equal(time.Minute))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should return metrics of value zero when node metrics have NaN or negative values", func() {
|
||||||
|
fakeProm.QueryResults = map[prom.Selector]prom.QueryResult{
|
||||||
|
mustBuild(cpuQueries.nodeQuery.Build("", nodeResource, "", nil, labels.Everything(), "node1", "node2")): buildQueryRes("container_cpu_usage_seconds_total",
|
||||||
|
buildNodeSample("node1", -1100.0, 10),
|
||||||
|
buildNodeSample("node2", 1200.0, 14),
|
||||||
|
),
|
||||||
|
mustBuild(memQueries.nodeQuery.Build("", nodeResource, "", nil, labels.Everything(), "node1", "node2")): buildQueryRes("container_memory_working_set_bytes",
|
||||||
|
buildNodeSample("node1", 2100.0, 11),
|
||||||
|
buildNodeSample("node2", math.NaN(), 12),
|
||||||
|
),
|
||||||
|
}
|
||||||
|
By("querying for metrics for some nodes")
|
||||||
|
nodeMetrics, err := prov.GetNodeMetrics(
|
||||||
|
&corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}},
|
||||||
|
&corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2"}},
|
||||||
|
)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("verifying that metrics have been fetched for all the nodes")
|
||||||
|
Expect(nodeMetrics).To(HaveLen(2))
|
||||||
|
|
||||||
|
By("verifying that the reported times for each are the earliest times for each pod")
|
||||||
|
Expect(nodeMetrics[0].Timestamp.Time).To(Equal(pmodel.Time(10).Time()))
|
||||||
|
Expect(nodeMetrics[0].Window.Duration).To(Equal(time.Minute))
|
||||||
|
Expect(nodeMetrics[1].Timestamp.Time).To(Equal(pmodel.Time(12).Time()))
|
||||||
|
Expect(nodeMetrics[1].Window.Duration).To(Equal(time.Minute))
|
||||||
|
|
||||||
|
By("verifying that NaN and negative values were replaced by zero")
|
||||||
|
Expect(nodeMetrics[0].Usage).To(Equal(buildResList(0, 2100.0)))
|
||||||
|
Expect(nodeMetrics[1].Usage).To(Equal(buildResList(1200.0, 0)))
|
||||||
|
})
|
||||||
|
})
|
||||||
41
test/README.md
Normal file
41
test/README.md
Normal file
|
|
@ -0,0 +1,41 @@
|
||||||
|
# End-to-end tests
|
||||||
|
|
||||||
|
## With [kind](https://kind.sigs.k8s.io/)
|
||||||
|
|
||||||
|
[`kind`](https://kind.sigs.k8s.io/) and `kubectl` are automatically downloaded
|
||||||
|
except if `SKIP_INSTALL=true` is set.
|
||||||
|
A `kind` cluster is automatically created before the tests, and deleted after
|
||||||
|
the tests.
|
||||||
|
The `prometheus-adapter` container image is build locally and imported
|
||||||
|
into the cluster.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
KIND_E2E=true make test-e2e
|
||||||
|
```
|
||||||
|
|
||||||
|
## With an existing Kubernetes cluster
|
||||||
|
|
||||||
|
If you already have a Kubernetes cluster, you can use:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
KUBECONFIG="/path/to/kube/config" REGISTRY="my.registry/prefix" make test-e2e
|
||||||
|
```
|
||||||
|
|
||||||
|
- The cluster should not have a namespace `prometheus-adapter-e2e`.
|
||||||
|
The namespace will be created and deleted as part of the E2E tests.
|
||||||
|
- `KUBECONFIG` is the path of the [`kubeconfig` file].
|
||||||
|
**Optional**, defaults to `${HOME}/.kube/config`
|
||||||
|
- `REGISTRY` is the image registry where the container image should be pushed.
|
||||||
|
**Required**.
|
||||||
|
|
||||||
|
[`kubeconfig` file]: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/
|
||||||
|
|
||||||
|
## Additional environment variables
|
||||||
|
|
||||||
|
These environment variables may also be used (with any non-empty value):
|
||||||
|
|
||||||
|
- `SKIP_INSTALL`: skip the installation of `kind` and `kubectl` binaries;
|
||||||
|
- `SKIP_CLEAN_AFTER`: skip the deletion of resources (`Kind` cluster or
|
||||||
|
Kubernetes namespace) and of the temporary directory `.e2e`;
|
||||||
|
- `CLEAN_BEFORE`: clean before running the tests, e.g. if `SKIP_CLEAN_AFTER`
|
||||||
|
was used on the previous run.
|
||||||
213
test/e2e/e2e_test.go
Normal file
213
test/e2e/e2e_test.go
Normal file
|
|
@ -0,0 +1,213 @@
|
||||||
|
/*
|
||||||
|
Copyright 2022 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
|
||||||
|
monitoring "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
|
metricsv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||||
|
metrics "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ns = "prometheus-adapter-e2e"
|
||||||
|
prometheusInstance = "prometheus"
|
||||||
|
deployment = "prometheus-adapter"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
client clientset.Interface
|
||||||
|
promOpClient monitoring.Interface
|
||||||
|
metricsClient metrics.Interface
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
kubeconfig := os.Getenv("KUBECONFIG")
|
||||||
|
if len(kubeconfig) == 0 {
|
||||||
|
log.Fatal("KUBECONFIG not provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
client, promOpClient, metricsClient, err = initializeClients(kubeconfig)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Cannot create clients: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
err = waitForPrometheusReady(ctx, ns, prometheusInstance)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Prometheus instance 'prometheus' not ready: %v", err)
|
||||||
|
}
|
||||||
|
err = waitForDeploymentReady(ctx, ns, deployment)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Deployment prometheus-adapter not ready: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
exitVal := m.Run()
|
||||||
|
os.Exit(exitVal)
|
||||||
|
}
|
||||||
|
|
||||||
|
func initializeClients(kubeconfig string) (clientset.Interface, monitoring.Interface, metrics.Interface, error) {
|
||||||
|
cfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, fmt.Errorf("Error during client configuration with %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
clientSet, err := clientset.NewForConfig(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, fmt.Errorf("Error during client creation with %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
promOpClient, err := monitoring.NewForConfig(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, fmt.Errorf("Error during dynamic client creation with %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
metricsClientSet, err := metrics.NewForConfig(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, fmt.Errorf("Error during metrics client creation with %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return clientSet, promOpClient, metricsClientSet, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForPrometheusReady(ctx context.Context, namespace string, name string) error {
|
||||||
|
return wait.PollUntilContextTimeout(ctx, 5*time.Second, 120*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||||
|
prom, err := promOpClient.MonitoringV1().Prometheuses(ns).Get(ctx, name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var reconciled, available *monitoringv1.Condition
|
||||||
|
for _, condition := range prom.Status.Conditions {
|
||||||
|
cond := condition
|
||||||
|
if cond.Type == monitoringv1.Reconciled {
|
||||||
|
reconciled = &cond
|
||||||
|
} else if cond.Type == monitoringv1.Available {
|
||||||
|
available = &cond
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if reconciled == nil {
|
||||||
|
log.Printf("Prometheus instance '%s': Waiting for reconciliation status...", name)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if reconciled.Status != monitoringv1.ConditionTrue {
|
||||||
|
log.Printf("Prometheus instance '%s': Reconciiled = %v. Waiting for reconciliation (reason %s, %q)...", name, reconciled.Status, reconciled.Reason, reconciled.Message)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
specReplicas := *prom.Spec.Replicas
|
||||||
|
availableReplicas := prom.Status.AvailableReplicas
|
||||||
|
if specReplicas != availableReplicas {
|
||||||
|
log.Printf("Prometheus instance '%s': %v/%v pods are ready. Waiting for all pods to be ready...", name, availableReplicas, specReplicas)
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if available == nil {
|
||||||
|
log.Printf("Prometheus instance '%s': Waiting for Available status...", name)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if available.Status != monitoringv1.ConditionTrue {
|
||||||
|
log.Printf("Prometheus instance '%s': Available = %v. Waiting for Available status... (reason %s, %q)", name, available.Status, available.Reason, available.Message)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Prometheus instance '%s': Ready.", name)
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForDeploymentReady(ctx context.Context, namespace string, name string) error {
|
||||||
|
return wait.PollUntilContextTimeout(ctx, 5*time.Second, 30*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||||
|
sts, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if sts.Status.ReadyReplicas == *sts.Spec.Replicas {
|
||||||
|
log.Printf("Deployment %s: %v/%v pods are ready.", name, sts.Status.ReadyReplicas, *sts.Spec.Replicas)
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
log.Printf("Deployment %s: %v/%v pods are ready. Waiting for all pods to be ready...", name, sts.Status.ReadyReplicas, *sts.Spec.Replicas)
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNodeMetrics(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
var nodeMetrics *metricsv1beta1.NodeMetricsList
|
||||||
|
err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 30*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||||
|
var err error
|
||||||
|
nodeMetrics, err = metricsClient.MetricsV1beta1().NodeMetricses().List(ctx, metav1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
nonEmptyNodeMetrics := len(nodeMetrics.Items) > 0
|
||||||
|
if !nonEmptyNodeMetrics {
|
||||||
|
t.Logf("Node metrics empty... Retrying.")
|
||||||
|
}
|
||||||
|
return nonEmptyNodeMetrics, nil
|
||||||
|
})
|
||||||
|
require.NoErrorf(t, err, "Node metrics should not be empty")
|
||||||
|
|
||||||
|
for _, nodeMetric := range nodeMetrics.Items {
|
||||||
|
positiveMemory := nodeMetric.Usage.Memory().CmpInt64(0)
|
||||||
|
assert.Positivef(t, positiveMemory, "Memory usage for node %s is %v, should be > 0", nodeMetric.Name, nodeMetric.Usage.Memory())
|
||||||
|
|
||||||
|
positiveCPU := nodeMetric.Usage.Cpu().CmpInt64(0)
|
||||||
|
assert.Positivef(t, positiveCPU, "CPU usage for node %s is %v, should be > 0", nodeMetric.Name, nodeMetric.Usage.Cpu())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPodMetrics(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
var podMetrics *metricsv1beta1.PodMetricsList
|
||||||
|
err := wait.PollUntilContextTimeout(ctx, 2*time.Second, 30*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||||
|
var err error
|
||||||
|
podMetrics, err = metricsClient.MetricsV1beta1().PodMetricses(ns).List(ctx, metav1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
nonEmptyNodeMetrics := len(podMetrics.Items) > 0
|
||||||
|
if !nonEmptyNodeMetrics {
|
||||||
|
t.Logf("Pod metrics empty... Retrying.")
|
||||||
|
}
|
||||||
|
return nonEmptyNodeMetrics, nil
|
||||||
|
})
|
||||||
|
require.NoErrorf(t, err, "Pod metrics should not be empty")
|
||||||
|
|
||||||
|
for _, pod := range podMetrics.Items {
|
||||||
|
for _, containerMetric := range pod.Containers {
|
||||||
|
positiveMemory := containerMetric.Usage.Memory().CmpInt64(0)
|
||||||
|
assert.Positivef(t, positiveMemory, "Memory usage for pod %s/%s is %v, should be > 0", pod.Name, containerMetric.Name, containerMetric.Usage.Memory())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,12 +1,12 @@
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
metadata:
|
metadata:
|
||||||
name: custom-metrics:system:auth-delegator
|
name: prometheus
|
||||||
roleRef:
|
roleRef:
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: system:auth-delegator
|
name: prometheus
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: custom-metrics-apiserver
|
name: prometheus
|
||||||
namespace: custom-metrics
|
namespace: prometheus-adapter-e2e
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue