mirror of
https://github.com/kubernetes-sigs/prometheus-adapter.git
synced 2026-04-05 17:27:51 +00:00
Add initial e2e tests
This commit is contained in:
parent
b03cc3e7c8
commit
1145dbfe93
13 changed files with 537 additions and 0 deletions
41
test/README.md
Normal file
41
test/README.md
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
# End-to-end tests
|
||||
|
||||
## With [kind](https://kind.sigs.k8s.io/)
|
||||
|
||||
[`kind`](https://kind.sigs.k8s.io/) and `kubectl` are automatically downloaded
|
||||
except if `SKIP_INSTALL=true` is set.
|
||||
A `kind` cluster is automatically created before the tests, and deleted after
|
||||
the tests.
|
||||
The `prometheus-adapter` container image is build locally and imported
|
||||
into the cluster.
|
||||
|
||||
```bash
|
||||
KIND_E2E=true make test-e2e
|
||||
```
|
||||
|
||||
## With an existing Kubernetes cluster
|
||||
|
||||
If you already have a Kubernetes cluster, you can use:
|
||||
|
||||
```bash
|
||||
KUBECONFIG="/path/to/kube/config" REGISTRY="my.registry/prefix" make test-e2e
|
||||
```
|
||||
|
||||
- The cluster should not have a namespace `prometheus-adapter-e2e`.
|
||||
The namespace will be created and deleted as part of the E2E tests.
|
||||
- `KUBECONFIG` is the path of the [`kubeconfig` file].
|
||||
**Optional**, defaults to `${HOME}/.kube/config`
|
||||
- `REGISTRY` is the image registry where the container image should be pushed.
|
||||
**Required**.
|
||||
|
||||
[`kubeconfig` file]: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/
|
||||
|
||||
## Additional environment variables
|
||||
|
||||
These environment variables may also be used (with any non-empty value):
|
||||
|
||||
- `SKIP_INSTALL`: skip the installation of `kind` and `kubectl` binaries;
|
||||
- `SKIP_CLEAN_AFTER`: skip the deletion of resources (`Kind` cluster or
|
||||
Kubernetes namespace) and of the temporary directory `.e2e`;
|
||||
- `CLEAN_BEFORE`: clean before running the tests, e.g. if `SKIP_CLEAN_AFTER`
|
||||
was used on the previous run.
|
||||
213
test/e2e/e2e_test.go
Normal file
213
test/e2e/e2e_test.go
Normal file
|
|
@ -0,0 +1,213 @@
|
|||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
|
||||
monitoring "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
metricsv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
metrics "k8s.io/metrics/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
const (
|
||||
ns = "prometheus-adapter-e2e"
|
||||
prometheusInstance = "prometheus"
|
||||
deployment = "prometheus-adapter"
|
||||
)
|
||||
|
||||
var (
|
||||
client clientset.Interface
|
||||
promOpClient monitoring.Interface
|
||||
metricsClient metrics.Interface
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
kubeconfig := os.Getenv("KUBECONFIG")
|
||||
if len(kubeconfig) == 0 {
|
||||
log.Fatal("KUBECONFIG not provided")
|
||||
}
|
||||
|
||||
var err error
|
||||
client, promOpClient, metricsClient, err = initializeClients(kubeconfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot create clients: %v", err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
err = waitForPrometheusReady(ctx, ns, prometheusInstance)
|
||||
if err != nil {
|
||||
log.Fatalf("Prometheus instance 'prometheus' not ready: %v", err)
|
||||
}
|
||||
err = waitForDeploymentReady(ctx, ns, deployment)
|
||||
if err != nil {
|
||||
log.Fatalf("Deployment prometheus-adapter not ready: %v", err)
|
||||
}
|
||||
|
||||
exitVal := m.Run()
|
||||
os.Exit(exitVal)
|
||||
}
|
||||
|
||||
func initializeClients(kubeconfig string) (clientset.Interface, monitoring.Interface, metrics.Interface, error) {
|
||||
cfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("Error during client configuration with %v", err)
|
||||
}
|
||||
|
||||
clientSet, err := clientset.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("Error during client creation with %v", err)
|
||||
}
|
||||
|
||||
promOpClient, err := monitoring.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("Error during dynamic client creation with %v", err)
|
||||
}
|
||||
|
||||
metricsClientSet, err := metrics.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, nil, nil, fmt.Errorf("Error during metrics client creation with %v", err)
|
||||
}
|
||||
|
||||
return clientSet, promOpClient, metricsClientSet, nil
|
||||
}
|
||||
|
||||
func waitForPrometheusReady(ctx context.Context, namespace string, name string) error {
|
||||
return wait.PollImmediateWithContext(ctx, 5*time.Second, 120*time.Second, func(ctx context.Context) (bool, error) {
|
||||
prom, err := promOpClient.MonitoringV1().Prometheuses(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var reconciled, available *monitoringv1.PrometheusCondition
|
||||
for _, condition := range prom.Status.Conditions {
|
||||
cond := condition
|
||||
if cond.Type == monitoringv1.PrometheusReconciled {
|
||||
reconciled = &cond
|
||||
} else if cond.Type == monitoringv1.PrometheusAvailable {
|
||||
available = &cond
|
||||
}
|
||||
}
|
||||
|
||||
if reconciled == nil {
|
||||
log.Printf("Prometheus instance '%s': Waiting for reconciliation status...", name)
|
||||
return false, nil
|
||||
}
|
||||
if reconciled.Status != monitoringv1.PrometheusConditionTrue {
|
||||
log.Printf("Prometheus instance '%s': Reconciiled = %v. Waiting for reconciliation (reason %s, %q)...", name, reconciled.Status, reconciled.Reason, reconciled.Message)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
specReplicas := *prom.Spec.Replicas
|
||||
availableReplicas := prom.Status.AvailableReplicas
|
||||
if specReplicas != availableReplicas {
|
||||
log.Printf("Prometheus instance '%s': %v/%v pods are ready. Waiting for all pods to be ready...", name, availableReplicas, specReplicas)
|
||||
return false, err
|
||||
}
|
||||
|
||||
if available == nil {
|
||||
log.Printf("Prometheus instance '%s': Waiting for Available status...", name)
|
||||
return false, nil
|
||||
}
|
||||
if available.Status != monitoringv1.PrometheusConditionTrue {
|
||||
log.Printf("Prometheus instance '%s': Available = %v. Waiting for Available status... (reason %s, %q)", name, available.Status, available.Reason, available.Message)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Printf("Prometheus instance '%s': Ready.", name)
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
func waitForDeploymentReady(ctx context.Context, namespace string, name string) error {
|
||||
return wait.PollImmediateWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
|
||||
sts, err := client.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if sts.Status.ReadyReplicas == *sts.Spec.Replicas {
|
||||
log.Printf("Deployment %s: %v/%v pods are ready.", name, sts.Status.ReadyReplicas, *sts.Spec.Replicas)
|
||||
return true, nil
|
||||
}
|
||||
log.Printf("Deployment %s: %v/%v pods are ready. Waiting for all pods to be ready...", name, sts.Status.ReadyReplicas, *sts.Spec.Replicas)
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func TestNodeMetrics(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
var nodeMetrics *metricsv1beta1.NodeMetricsList
|
||||
err := wait.PollImmediateWithContext(ctx, 2*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
|
||||
var err error
|
||||
nodeMetrics, err = metricsClient.MetricsV1beta1().NodeMetricses().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
nonEmptyNodeMetrics := len(nodeMetrics.Items) > 0
|
||||
if !nonEmptyNodeMetrics {
|
||||
t.Logf("Node metrics empty... Retrying.")
|
||||
}
|
||||
return nonEmptyNodeMetrics, nil
|
||||
})
|
||||
require.NoErrorf(t, err, "Node metrics should not be empty")
|
||||
|
||||
for _, nodeMetric := range nodeMetrics.Items {
|
||||
positiveMemory := nodeMetric.Usage.Memory().CmpInt64(0)
|
||||
assert.Positivef(t, positiveMemory, "Memory usage for node %s is %v, should be > 0", nodeMetric.Name, nodeMetric.Usage.Memory())
|
||||
|
||||
positiveCPU := nodeMetric.Usage.Cpu().CmpInt64(0)
|
||||
assert.Positivef(t, positiveCPU, "CPU usage for node %s is %v, should be > 0", nodeMetric.Name, nodeMetric.Usage.Cpu())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodMetrics(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
var podMetrics *metricsv1beta1.PodMetricsList
|
||||
err := wait.PollImmediateWithContext(ctx, 2*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
|
||||
var err error
|
||||
podMetrics, err = metricsClient.MetricsV1beta1().PodMetricses(ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
nonEmptyNodeMetrics := len(podMetrics.Items) > 0
|
||||
if !nonEmptyNodeMetrics {
|
||||
t.Logf("Pod metrics empty... Retrying.")
|
||||
}
|
||||
return nonEmptyNodeMetrics, nil
|
||||
})
|
||||
require.NoErrorf(t, err, "Pod metrics should not be empty")
|
||||
|
||||
for _, pod := range podMetrics.Items {
|
||||
for _, containerMetric := range pod.Containers {
|
||||
positiveMemory := containerMetric.Usage.Memory().CmpInt64(0)
|
||||
assert.Positivef(t, positiveMemory, "Memory usage for pod %s/%s is %v, should be > 0", pod.Name, containerMetric.Name, containerMetric.Usage.Memory())
|
||||
}
|
||||
}
|
||||
}
|
||||
12
test/prometheus-manifests/cluster-role-binding.yaml
Normal file
12
test/prometheus-manifests/cluster-role-binding.yaml
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: prometheus
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus
|
||||
namespace: prometheus-adapter-e2e
|
||||
24
test/prometheus-manifests/cluster-role.yaml
Normal file
24
test/prometheus-manifests/cluster-role.yaml
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: prometheus
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
- nodes/metrics
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["get"]
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs: ["get", "list", "watch"]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
9
test/prometheus-manifests/prometheus.yaml
Normal file
9
test/prometheus-manifests/prometheus.yaml
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: Prometheus
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: prometheus-adapter-e2e
|
||||
spec:
|
||||
replicas: 2
|
||||
serviceAccountName: prometheus
|
||||
serviceMonitorSelector: {}
|
||||
5
test/prometheus-manifests/service-account.yaml
Normal file
5
test/prometheus-manifests/service-account.yaml
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: prometheus-adapter-e2e
|
||||
25
test/prometheus-manifests/service-monitor-kubelet.yaml
Normal file
25
test/prometheus-manifests/service-monitor-kubelet.yaml
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: kubelet
|
||||
name: kubelet
|
||||
namespace: prometheus-adapter-e2e
|
||||
spec:
|
||||
endpoints:
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
honorLabels: true
|
||||
honorTimestamps: false
|
||||
interval: 10s
|
||||
path: /metrics/resource
|
||||
port: https-metrics
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
jobLabel: app.kubernetes.io/name
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- kube-system
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: kubelet
|
||||
14
test/prometheus-manifests/service.yaml
Normal file
14
test/prometheus-manifests/service.yaml
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: prometheus-adapter-e2e
|
||||
spec:
|
||||
ports:
|
||||
- name: web
|
||||
port: 9090
|
||||
targetPort: web
|
||||
selector:
|
||||
app.kubernetes.io/instance: prometheus
|
||||
app.kubernetes.io/name: prometheus
|
||||
sessionAffinity: ClientIP
|
||||
134
test/run-e2e-tests.sh
Executable file
134
test/run-e2e-tests.sh
Executable file
|
|
@ -0,0 +1,134 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2022 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -x
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
|
||||
# Tool versions
|
||||
K8S_VERSION=${KUBERNETES_VERSION:-v1.26.0} # cf https://hub.docker.com/r/kindest/node/tags
|
||||
KIND_VERSION=${KIND_VERSION:-v0.17.0} # cf https://github.com/kubernetes-sigs/kind/releases
|
||||
PROM_OPERATOR_VERSION=${PROM_OPERATOR_VERSION:-v0.62.0} # cf https://github.com/prometheus-operator/prometheus-operator/releases
|
||||
|
||||
# Variables; set to empty if unbound/empty
|
||||
REGISTRY=${REGISTRY:-}
|
||||
KIND_E2E=${KIND_E2E:-}
|
||||
SKIP_INSTALL=${SKIP_INSTALL:-}
|
||||
SKIP_CLEAN_AFTER=${SKIP_CLEAN_AFTER:-}
|
||||
CLEAN_BEFORE=${CLEAN_BEFORE:-}
|
||||
|
||||
# KUBECONFIG - will be overriden if a cluster is deployed with Kind
|
||||
KUBECONFIG=${KUBECONFIG:-"${HOME}/.kube/config"}
|
||||
|
||||
# A temporary directory used by the tests
|
||||
E2E_DIR="${PWD}/.e2e"
|
||||
|
||||
# The namespace where prometheus-adapter is deployed
|
||||
NAMESPACE="prometheus-adapter-e2e"
|
||||
|
||||
if [[ -z "${REGISTRY}" && -z "${KIND_E2E}" ]]; then
|
||||
echo -e "Either REGISTRY or KIND_E2E should be set."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function clean {
|
||||
if [[ -n "${KIND_E2E}" ]]; then
|
||||
kind delete cluster || true
|
||||
else
|
||||
kubectl delete -f ./deploy/manifests || true
|
||||
kubectl delete -f ./test/prometheus-manifests || true
|
||||
kubectl delete namespace "${NAMESPACE}" || true
|
||||
fi
|
||||
|
||||
rm -rf "${E2E_DIR}"
|
||||
}
|
||||
|
||||
if [[ -n "${CLEAN_BEFORE}" ]]; then
|
||||
clean
|
||||
fi
|
||||
|
||||
function on_exit {
|
||||
local error_code="$?"
|
||||
|
||||
echo "Obtaining prometheus-adapter pod logs..."
|
||||
kubectl logs -l app.kubernetes.io/name=prometheus-adapter -n "${NAMESPACE}" || true
|
||||
|
||||
if [[ -z "${SKIP_CLEAN_AFTER}" ]]; then
|
||||
clean
|
||||
fi
|
||||
|
||||
test "${error_code}" == 0 && return;
|
||||
}
|
||||
trap on_exit EXIT
|
||||
|
||||
if [[ -d "${E2E_DIR}" ]]; then
|
||||
echo -e "${E2E_DIR} already exists."
|
||||
exit 1
|
||||
fi
|
||||
mkdir -p "${E2E_DIR}"
|
||||
|
||||
if [[ -n "${KIND_E2E}" ]]; then
|
||||
# Install kubectl and kind, if we did not set SKIP_INSTALL
|
||||
if [[ -z "${SKIP_INSTALL}" ]]; then
|
||||
BIN="${E2E_DIR}/bin"
|
||||
mkdir -p "${BIN}"
|
||||
curl -Lo "${BIN}/kubectl" "https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/bin/linux/amd64/kubectl" && chmod +x "${BIN}/kubectl"
|
||||
curl -Lo "${BIN}/kind" "https://kind.sigs.k8s.io/dl/${KIND_VERSION}/kind-linux-amd64" && chmod +x "${BIN}/kind"
|
||||
export PATH="${BIN}:${PATH}"
|
||||
fi
|
||||
|
||||
kind create cluster --image "kindest/node:${K8S_VERSION}"
|
||||
|
||||
REGISTRY="localhost"
|
||||
|
||||
KUBECONFIG="${E2E_DIR}/kubeconfig"
|
||||
kind get kubeconfig > "${KUBECONFIG}"
|
||||
fi
|
||||
|
||||
# Create the test namespace
|
||||
kubectl create namespace "${NAMESPACE}"
|
||||
|
||||
export REGISTRY
|
||||
IMAGE_NAME="${REGISTRY}/prometheus-adapter-$(go env GOARCH)"
|
||||
IMAGE_TAG="v$(cat VERSION)"
|
||||
|
||||
if [[ -n "${KIND_E2E}" ]]; then
|
||||
make container
|
||||
kind load docker-image "${IMAGE_NAME}:${IMAGE_TAG}"
|
||||
else
|
||||
make push
|
||||
fi
|
||||
|
||||
# Install prometheus-operator
|
||||
kubectl apply -f "https://github.com/prometheus-operator/prometheus-operator/releases/download/${PROM_OPERATOR_VERSION}/bundle.yaml" --server-side
|
||||
|
||||
# Install and setup prometheus
|
||||
kubectl apply -f ./test/prometheus-manifests --server-side
|
||||
|
||||
# Customize prometheus-adapter manifests
|
||||
# TODO: use Kustomize or generate manifests from Jsonnet
|
||||
cp -r ./deploy/manifests "${E2E_DIR}/manifests"
|
||||
prom_url="http://prometheus.${NAMESPACE}.svc:9090/"
|
||||
sed -i -e "s|--prometheus-url=.*$|--prometheus-url=${prom_url}|g" "${E2E_DIR}/manifests/deployment.yaml"
|
||||
sed -i -e "s|image: .*$|image: ${IMAGE_NAME}:${IMAGE_TAG}|g" "${E2E_DIR}/manifests/deployment.yaml"
|
||||
find "${E2E_DIR}/manifests" -type f -exec sed -i -e "s|namespace: monitoring|namespace: ${NAMESPACE}|g" {} \;
|
||||
|
||||
# Deploy prometheus-adapter
|
||||
kubectl apply -f "${E2E_DIR}/manifests" --server-side
|
||||
|
||||
PROJECT_PREFIX="sigs.k8s.io/prometheus-adapter"
|
||||
export KUBECONFIG
|
||||
go test "${PROJECT_PREFIX}/test/e2e/" -v -count=1
|
||||
Loading…
Add table
Add a link
Reference in a new issue