mirror of
https://github.com/kubernetes-sigs/prometheus-adapter.git
synced 2026-04-07 22:25:03 +00:00
vendor: revendor metrics-server, custom-metrics-apiserver
This commit is contained in:
parent
752ce84723
commit
523aa52367
1010 changed files with 91458 additions and 29107 deletions
145
vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go
generated
vendored
145
vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go
generated
vendored
|
|
@ -39,29 +39,11 @@ import (
|
|||
"k8s.io/apiserver/pkg/storage"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
utiltrace "k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
/*
|
||||
* By default, all the following metrics are defined as falling under
|
||||
* ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)
|
||||
*
|
||||
* Promoting the stability level of the metric is a responsibility of the component owner, since it
|
||||
* involves explicitly acknowledging support for the metric across multiple releases, in accordance with
|
||||
* the metric stability policy.
|
||||
*/
|
||||
var (
|
||||
initCounter = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Name: "apiserver_init_events_total",
|
||||
Help: "Counter of init events processed in watchcache broken by resource type",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"resource"},
|
||||
)
|
||||
emptyFunc = func() {}
|
||||
)
|
||||
|
||||
|
|
@ -69,17 +51,15 @@ const (
|
|||
// storageWatchListPageSize is the cacher's request chunk size of
|
||||
// initial and resync watch lists to storage.
|
||||
storageWatchListPageSize = int64(10000)
|
||||
// defaultBookmarkFrequency defines how frequently watch bookmarks should be send
|
||||
// in addition to sending a bookmark right before watch deadline.
|
||||
//
|
||||
// NOTE: Update `eventFreshDuration` when changing this value.
|
||||
defaultBookmarkFrequency = time.Minute
|
||||
)
|
||||
|
||||
func init() {
|
||||
legacyregistry.MustRegister(initCounter)
|
||||
}
|
||||
|
||||
// Config contains the configuration for a given Cache.
|
||||
type Config struct {
|
||||
// Maximum size of the history cached in memory.
|
||||
CacheCapacity int
|
||||
|
||||
// An underlying storage.Interface.
|
||||
Storage storage.Interface
|
||||
|
||||
|
|
@ -112,6 +92,8 @@ type Config struct {
|
|||
NewListFunc func() runtime.Object
|
||||
|
||||
Codec runtime.Codec
|
||||
|
||||
Clock clock.Clock
|
||||
}
|
||||
|
||||
type watchersMap map[int]*cacheWatcher
|
||||
|
|
@ -176,24 +158,26 @@ func (i *indexedWatchers) terminateAll(objectType reflect.Type, done func(*cache
|
|||
// second in a bucket, and pop up them once at the timeout. To be more specific,
|
||||
// if you set fire time at X, you can get the bookmark within (X-1,X+1) period.
|
||||
type watcherBookmarkTimeBuckets struct {
|
||||
lock sync.Mutex
|
||||
watchersBuckets map[int64][]*cacheWatcher
|
||||
startBucketID int64
|
||||
clock clock.Clock
|
||||
lock sync.Mutex
|
||||
watchersBuckets map[int64][]*cacheWatcher
|
||||
startBucketID int64
|
||||
clock clock.Clock
|
||||
bookmarkFrequency time.Duration
|
||||
}
|
||||
|
||||
func newTimeBucketWatchers(clock clock.Clock) *watcherBookmarkTimeBuckets {
|
||||
func newTimeBucketWatchers(clock clock.Clock, bookmarkFrequency time.Duration) *watcherBookmarkTimeBuckets {
|
||||
return &watcherBookmarkTimeBuckets{
|
||||
watchersBuckets: make(map[int64][]*cacheWatcher),
|
||||
startBucketID: clock.Now().Unix(),
|
||||
clock: clock,
|
||||
watchersBuckets: make(map[int64][]*cacheWatcher),
|
||||
startBucketID: clock.Now().Unix(),
|
||||
clock: clock,
|
||||
bookmarkFrequency: bookmarkFrequency,
|
||||
}
|
||||
}
|
||||
|
||||
// adds a watcher to the bucket, if the deadline is before the start, it will be
|
||||
// added to the first one.
|
||||
func (t *watcherBookmarkTimeBuckets) addWatcher(w *cacheWatcher) bool {
|
||||
nextTime, ok := w.nextBookmarkTime(t.clock.Now())
|
||||
nextTime, ok := w.nextBookmarkTime(t.clock.Now(), t.bookmarkFrequency)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
|
@ -336,11 +320,14 @@ func NewCacherFromConfig(config Config) (*Cacher, error) {
|
|||
}
|
||||
}
|
||||
|
||||
clock := clock.RealClock{}
|
||||
if config.Clock == nil {
|
||||
config.Clock = clock.RealClock{}
|
||||
}
|
||||
objType := reflect.TypeOf(obj)
|
||||
cacher := &Cacher{
|
||||
ready: newReady(),
|
||||
storage: config.Storage,
|
||||
objectType: reflect.TypeOf(obj),
|
||||
objectType: objType,
|
||||
versioner: config.Versioner,
|
||||
newFunc: config.NewFunc,
|
||||
indexedTrigger: indexedTrigger,
|
||||
|
|
@ -358,9 +345,9 @@ func NewCacherFromConfig(config Config) (*Cacher, error) {
|
|||
// and there are no guarantees on the order that they will stop.
|
||||
// So we will be simply closing the channel, and synchronizing on the WaitGroup.
|
||||
stopCh: stopCh,
|
||||
clock: clock,
|
||||
clock: config.Clock,
|
||||
timer: time.NewTimer(time.Duration(0)),
|
||||
bookmarkWatchers: newTimeBucketWatchers(clock),
|
||||
bookmarkWatchers: newTimeBucketWatchers(config.Clock, defaultBookmarkFrequency),
|
||||
}
|
||||
|
||||
// Ensure that timer is stopped.
|
||||
|
|
@ -371,7 +358,7 @@ func NewCacherFromConfig(config Config) (*Cacher, error) {
|
|||
}
|
||||
|
||||
watchCache := newWatchCache(
|
||||
config.CacheCapacity, config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers)
|
||||
config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, objType)
|
||||
listerWatcher := NewCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc)
|
||||
reflectorName := "storage/cacher.go:" + config.ResourcePrefix
|
||||
|
||||
|
|
@ -412,6 +399,7 @@ func (c *Cacher) startCaching(stopChannel <-chan struct{}) {
|
|||
c.watchCache.SetOnReplace(func() {
|
||||
successfulList = true
|
||||
c.ready.set(true)
|
||||
klog.V(1).Infof("cacher (%v): initialized", c.objectType.String())
|
||||
})
|
||||
defer func() {
|
||||
if successfulList {
|
||||
|
|
@ -425,7 +413,7 @@ func (c *Cacher) startCaching(stopChannel <-chan struct{}) {
|
|||
// Also note that startCaching is called in a loop, so there's no need
|
||||
// to have another loop here.
|
||||
if err := c.reflector.ListAndWatch(stopChannel); err != nil {
|
||||
klog.Errorf("unexpected ListAndWatch error: %v", err)
|
||||
klog.Errorf("cacher (%v): unexpected ListAndWatch error: %v; reinitializing...", c.objectType.String(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -445,8 +433,9 @@ func (c *Cacher) Delete(ctx context.Context, key string, out runtime.Object, pre
|
|||
}
|
||||
|
||||
// Watch implements storage.Interface.
|
||||
func (c *Cacher) Watch(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate) (watch.Interface, error) {
|
||||
watchRV, err := c.versioner.ParseResourceVersion(resourceVersion)
|
||||
func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
|
||||
pred := opts.Predicate
|
||||
watchRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -529,22 +518,22 @@ func (c *Cacher) Watch(ctx context.Context, key string, resourceVersion string,
|
|||
}
|
||||
|
||||
// WatchList implements storage.Interface.
|
||||
func (c *Cacher) WatchList(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate) (watch.Interface, error) {
|
||||
return c.Watch(ctx, key, resourceVersion, pred)
|
||||
func (c *Cacher) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
|
||||
return c.Watch(ctx, key, opts)
|
||||
}
|
||||
|
||||
// Get implements storage.Interface.
|
||||
func (c *Cacher) Get(ctx context.Context, key string, resourceVersion string, objPtr runtime.Object, ignoreNotFound bool) error {
|
||||
if resourceVersion == "" {
|
||||
func (c *Cacher) Get(ctx context.Context, key string, opts storage.GetOptions, objPtr runtime.Object) error {
|
||||
if opts.ResourceVersion == "" {
|
||||
// If resourceVersion is not specified, serve it from underlying
|
||||
// storage (for backward compatibility).
|
||||
return c.storage.Get(ctx, key, resourceVersion, objPtr, ignoreNotFound)
|
||||
return c.storage.Get(ctx, key, opts, objPtr)
|
||||
}
|
||||
|
||||
// If resourceVersion is specified, serve it from cache.
|
||||
// It's guaranteed that the returned value is at least that
|
||||
// fresh as the given resourceVersion.
|
||||
getRV, err := c.versioner.ParseResourceVersion(resourceVersion)
|
||||
getRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -552,7 +541,7 @@ func (c *Cacher) Get(ctx context.Context, key string, resourceVersion string, ob
|
|||
if getRV == 0 && !c.ready.check() {
|
||||
// If Cacher is not yet initialized and we don't require any specific
|
||||
// minimal resource version, simply forward the request to storage.
|
||||
return c.storage.Get(ctx, key, resourceVersion, objPtr, ignoreNotFound)
|
||||
return c.storage.Get(ctx, key, opts, objPtr)
|
||||
}
|
||||
|
||||
// Do not create a trace - it's not for free and there are tons
|
||||
|
|
@ -577,7 +566,7 @@ func (c *Cacher) Get(ctx context.Context, key string, resourceVersion string, ob
|
|||
objVal.Set(reflect.ValueOf(elem.Object).Elem())
|
||||
} else {
|
||||
objVal.Set(reflect.Zero(objVal.Type()))
|
||||
if !ignoreNotFound {
|
||||
if !opts.IgnoreNotFound {
|
||||
return storage.NewKeyNotFoundError(key, int64(readResourceVersion))
|
||||
}
|
||||
}
|
||||
|
|
@ -585,18 +574,20 @@ func (c *Cacher) Get(ctx context.Context, key string, resourceVersion string, ob
|
|||
}
|
||||
|
||||
// GetToList implements storage.Interface.
|
||||
func (c *Cacher) GetToList(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate, listObj runtime.Object) error {
|
||||
func (c *Cacher) GetToList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
|
||||
resourceVersion := opts.ResourceVersion
|
||||
pred := opts.Predicate
|
||||
pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
|
||||
hasContinuation := pagingEnabled && len(pred.Continue) > 0
|
||||
hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0"
|
||||
if resourceVersion == "" || hasContinuation || hasLimit {
|
||||
if resourceVersion == "" || hasContinuation || hasLimit || opts.ResourceVersionMatch == metav1.ResourceVersionMatchExact {
|
||||
// If resourceVersion is not specified, serve it from underlying
|
||||
// storage (for backward compatibility). If a continuation is
|
||||
// requested, serve it from the underlying storage as well.
|
||||
// Limits are only sent to storage when resourceVersion is non-zero
|
||||
// since the watch cache isn't able to perform continuations, and
|
||||
// limits are ignored when resource version is zero
|
||||
return c.storage.GetToList(ctx, key, resourceVersion, pred, listObj)
|
||||
return c.storage.GetToList(ctx, key, opts, listObj)
|
||||
}
|
||||
|
||||
// If resourceVersion is specified, serve it from cache.
|
||||
|
|
@ -610,7 +601,7 @@ func (c *Cacher) GetToList(ctx context.Context, key string, resourceVersion stri
|
|||
if listRV == 0 && !c.ready.check() {
|
||||
// If Cacher is not yet initialized and we don't require any specific
|
||||
// minimal resource version, simply forward the request to storage.
|
||||
return c.storage.GetToList(ctx, key, resourceVersion, pred, listObj)
|
||||
return c.storage.GetToList(ctx, key, opts, listObj)
|
||||
}
|
||||
|
||||
trace := utiltrace.New("cacher list", utiltrace.Field{"type", c.objectType.String()})
|
||||
|
|
@ -657,18 +648,20 @@ func (c *Cacher) GetToList(ctx context.Context, key string, resourceVersion stri
|
|||
}
|
||||
|
||||
// List implements storage.Interface.
|
||||
func (c *Cacher) List(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate, listObj runtime.Object) error {
|
||||
func (c *Cacher) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
|
||||
resourceVersion := opts.ResourceVersion
|
||||
pred := opts.Predicate
|
||||
pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
|
||||
hasContinuation := pagingEnabled && len(pred.Continue) > 0
|
||||
hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0"
|
||||
if resourceVersion == "" || hasContinuation || hasLimit {
|
||||
if resourceVersion == "" || hasContinuation || hasLimit || opts.ResourceVersionMatch == metav1.ResourceVersionMatchExact {
|
||||
// If resourceVersion is not specified, serve it from underlying
|
||||
// storage (for backward compatibility). If a continuation is
|
||||
// requested, serve it from the underlying storage as well.
|
||||
// Limits are only sent to storage when resourceVersion is non-zero
|
||||
// since the watch cache isn't able to perform continuations, and
|
||||
// limits are ignored when resource version is zero.
|
||||
return c.storage.List(ctx, key, resourceVersion, pred, listObj)
|
||||
return c.storage.List(ctx, key, opts, listObj)
|
||||
}
|
||||
|
||||
// If resourceVersion is specified, serve it from cache.
|
||||
|
|
@ -682,7 +675,7 @@ func (c *Cacher) List(ctx context.Context, key string, resourceVersion string, p
|
|||
if listRV == 0 && !c.ready.check() {
|
||||
// If Cacher is not yet initialized and we don't require any specific
|
||||
// minimal resource version, simply forward the request to storage.
|
||||
return c.storage.List(ctx, key, resourceVersion, pred, listObj)
|
||||
return c.storage.List(ctx, key, opts, listObj)
|
||||
}
|
||||
|
||||
trace := utiltrace.New("cacher list", utiltrace.Field{"type", c.objectType.String()})
|
||||
|
|
@ -935,9 +928,8 @@ func (c *Cacher) startDispatchingBookmarkEvents() {
|
|||
continue
|
||||
}
|
||||
c.watchersBuffer = append(c.watchersBuffer, watcher)
|
||||
// Given that we send bookmark event once at deadline-2s, never push again
|
||||
// after the watcher pops up from the buckets. Once we decide to change the
|
||||
// strategy to more sophisticated, we may need it here.
|
||||
// Requeue the watcher for the next bookmark if needed.
|
||||
c.bookmarkWatchers.addWatcher(watcher)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1098,7 +1090,7 @@ func (lw *cacherListerWatcher) List(options metav1.ListOptions) (runtime.Object,
|
|||
Continue: options.Continue,
|
||||
}
|
||||
|
||||
if err := lw.storage.List(context.TODO(), lw.resourcePrefix, "", pred, list); err != nil {
|
||||
if err := lw.storage.List(context.TODO(), lw.resourcePrefix, storage.ListOptions{ResourceVersionMatch: options.ResourceVersionMatch, Predicate: pred}, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return list, nil
|
||||
|
|
@ -1106,7 +1098,7 @@ func (lw *cacherListerWatcher) List(options metav1.ListOptions) (runtime.Object,
|
|||
|
||||
// Implements cache.ListerWatcher interface.
|
||||
func (lw *cacherListerWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return lw.storage.WatchList(context.TODO(), lw.resourcePrefix, options.ResourceVersion, storage.Everything)
|
||||
return lw.storage.WatchList(context.TODO(), lw.resourcePrefix, storage.ListOptions{ResourceVersion: options.ResourceVersion, Predicate: storage.Everything})
|
||||
}
|
||||
|
||||
// errWatcher implements watch.Interface to return a single error
|
||||
|
|
@ -1240,13 +1232,28 @@ func (c *cacheWatcher) add(event *watchCacheEvent, timer *time.Timer) bool {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *cacheWatcher) nextBookmarkTime(now time.Time) (time.Time, bool) {
|
||||
// For now we return 2s before deadline (and maybe +infinity is now already passed this time)
|
||||
// but it gives us extensibility for the future(false when deadline is not set).
|
||||
func (c *cacheWatcher) nextBookmarkTime(now time.Time, bookmarkFrequency time.Duration) (time.Time, bool) {
|
||||
// We try to send bookmarks:
|
||||
// (a) roughly every minute
|
||||
// (b) right before the watcher timeout - for now we simply set it 2s before
|
||||
// the deadline
|
||||
// The former gives us periodicity if the watch breaks due to unexpected
|
||||
// conditions, the later ensures that on timeout the watcher is as close to
|
||||
// now as possible - this covers 99% of cases.
|
||||
heartbeatTime := now.Add(bookmarkFrequency)
|
||||
if c.deadline.IsZero() {
|
||||
return c.deadline, false
|
||||
// Timeout is set by our client libraries (e.g. reflector) as well as defaulted by
|
||||
// apiserver if properly configured. So this shoudln't happen in practice.
|
||||
return heartbeatTime, true
|
||||
}
|
||||
return c.deadline.Add(-2 * time.Second), true
|
||||
if pretimeoutTime := c.deadline.Add(-2 * time.Second); pretimeoutTime.Before(heartbeatTime) {
|
||||
heartbeatTime = pretimeoutTime
|
||||
}
|
||||
|
||||
if heartbeatTime.Before(now) {
|
||||
return time.Time{}, false
|
||||
}
|
||||
return heartbeatTime, true
|
||||
}
|
||||
|
||||
func getEventObject(object runtime.Object) runtime.Object {
|
||||
|
|
|
|||
2
vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go
generated
vendored
2
vendor/k8s.io/apiserver/pkg/storage/cacher/caching_object.go
generated
vendored
|
|
@ -30,7 +30,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
var _ runtime.CacheableObject = &cachingObject{}
|
||||
|
|
|
|||
74
vendor/k8s.io/apiserver/pkg/storage/cacher/metrics.go
generated
vendored
Normal file
74
vendor/k8s.io/apiserver/pkg/storage/cacher/metrics.go
generated
vendored
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cacher
|
||||
|
||||
import (
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
)
|
||||
|
||||
/*
|
||||
* By default, all the following metrics are defined as falling under
|
||||
* ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)
|
||||
*
|
||||
* Promoting the stability level of the metric is a responsibility of the component owner, since it
|
||||
* involves explicitly acknowledging support for the metric across multiple releases, in accordance with
|
||||
* the metric stability policy.
|
||||
*/
|
||||
var (
|
||||
initCounter = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Name: "apiserver_init_events_total",
|
||||
Help: "Counter of init events processed in watchcache broken by resource type.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"resource"},
|
||||
)
|
||||
|
||||
watchCacheCapacityIncreaseTotal = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Name: "watch_cache_capacity_increase_total",
|
||||
Help: "Total number of watch cache capacity increase events broken by resource type.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"resource"},
|
||||
)
|
||||
|
||||
watchCacheCapacityDecreaseTotal = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Name: "watch_cache_capacity_decrease_total",
|
||||
Help: "Total number of watch cache capacity decrease events broken by resource type.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"resource"},
|
||||
)
|
||||
)
|
||||
|
||||
func init() {
|
||||
legacyregistry.MustRegister(initCounter)
|
||||
legacyregistry.MustRegister(watchCacheCapacityIncreaseTotal)
|
||||
legacyregistry.MustRegister(watchCacheCapacityDecreaseTotal)
|
||||
}
|
||||
|
||||
// recordsWatchCacheCapacityChange record watchCache capacity resize(increase or decrease) operations.
|
||||
func recordsWatchCacheCapacityChange(objType string, old, new int) {
|
||||
if old < new {
|
||||
watchCacheCapacityIncreaseTotal.WithLabelValues(objType).Inc()
|
||||
return
|
||||
}
|
||||
watchCacheCapacityDecreaseTotal.WithLabelValues(objType).Inc()
|
||||
}
|
||||
14
vendor/k8s.io/apiserver/pkg/storage/cacher/util.go
generated
vendored
14
vendor/k8s.io/apiserver/pkg/storage/cacher/util.go
generated
vendored
|
|
@ -44,3 +44,17 @@ func hasPathPrefix(s, pathPrefix string) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
|
|
|||
105
vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go
generated
vendored
105
vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go
generated
vendored
|
|
@ -18,6 +18,7 @@ package cacher
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
|
@ -30,7 +31,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/klog/v2"
|
||||
utiltrace "k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
|
|
@ -44,6 +45,19 @@ const (
|
|||
// resourceVersionTooHighRetrySeconds is the seconds before a operation should be retried by the client
|
||||
// after receiving a 'too high resource version' error.
|
||||
resourceVersionTooHighRetrySeconds = 1
|
||||
|
||||
// eventFreshDuration is time duration of events we want to keep.
|
||||
// We set it to `defaultBookmarkFrequency` plus epsilon to maximize
|
||||
// chances that last bookmark was sent within kept history, at the
|
||||
// same time, minimizing the needed memory usage.
|
||||
eventFreshDuration = 75 * time.Second
|
||||
|
||||
// defaultLowerBoundCapacity is a default value for event cache capacity's lower bound.
|
||||
// TODO: Figure out, to what value we can decreased it.
|
||||
defaultLowerBoundCapacity = 100
|
||||
|
||||
// defaultUpperBoundCapacity should be able to keep eventFreshDuration of history.
|
||||
defaultUpperBoundCapacity = 100 * 1024
|
||||
)
|
||||
|
||||
// watchCacheEvent is a single "watch event" that is send to users of
|
||||
|
|
@ -60,6 +74,7 @@ type watchCacheEvent struct {
|
|||
PrevObjFields fields.Set
|
||||
Key string
|
||||
ResourceVersion uint64
|
||||
RecordTime time.Time
|
||||
}
|
||||
|
||||
// Computing a key of an object is generally non-trivial (it performs
|
||||
|
|
@ -126,6 +141,12 @@ type watchCache struct {
|
|||
// Maximum size of history window.
|
||||
capacity int
|
||||
|
||||
// upper bound of capacity since event cache has a dynamic size.
|
||||
upperBoundCapacity int
|
||||
|
||||
// lower bound of capacity since event cache has a dynamic size.
|
||||
lowerBoundCapacity int
|
||||
|
||||
// keyFunc is used to get a key in the underlying storage for a given object.
|
||||
keyFunc func(runtime.Object) (string, error)
|
||||
|
||||
|
|
@ -165,28 +186,35 @@ type watchCache struct {
|
|||
|
||||
// An underlying storage.Versioner.
|
||||
versioner storage.Versioner
|
||||
|
||||
// cacher's objectType.
|
||||
objectType reflect.Type
|
||||
}
|
||||
|
||||
func newWatchCache(
|
||||
capacity int,
|
||||
keyFunc func(runtime.Object) (string, error),
|
||||
eventHandler func(*watchCacheEvent),
|
||||
getAttrsFunc func(runtime.Object) (labels.Set, fields.Set, error),
|
||||
versioner storage.Versioner,
|
||||
indexers *cache.Indexers) *watchCache {
|
||||
indexers *cache.Indexers,
|
||||
clock clock.Clock,
|
||||
objectType reflect.Type) *watchCache {
|
||||
wc := &watchCache{
|
||||
capacity: capacity,
|
||||
capacity: defaultLowerBoundCapacity,
|
||||
keyFunc: keyFunc,
|
||||
getAttrsFunc: getAttrsFunc,
|
||||
cache: make([]*watchCacheEvent, capacity),
|
||||
cache: make([]*watchCacheEvent, defaultLowerBoundCapacity),
|
||||
lowerBoundCapacity: defaultLowerBoundCapacity,
|
||||
upperBoundCapacity: defaultUpperBoundCapacity,
|
||||
startIndex: 0,
|
||||
endIndex: 0,
|
||||
store: cache.NewIndexer(storeElementKey, storeElementIndexers(indexers)),
|
||||
resourceVersion: 0,
|
||||
listResourceVersion: 0,
|
||||
eventHandler: eventHandler,
|
||||
clock: clock.RealClock{},
|
||||
clock: clock,
|
||||
versioner: versioner,
|
||||
objectType: objectType,
|
||||
}
|
||||
wc.cond = sync.NewCond(wc.RLocker())
|
||||
return wc
|
||||
|
|
@ -260,6 +288,7 @@ func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, upd
|
|||
ObjFields: elem.Fields,
|
||||
Key: key,
|
||||
ResourceVersion: resourceVersion,
|
||||
RecordTime: w.clock.Now(),
|
||||
}
|
||||
|
||||
if err := func() error {
|
||||
|
|
@ -301,7 +330,8 @@ func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, upd
|
|||
|
||||
// Assumes that lock is already held for write.
|
||||
func (w *watchCache) updateCache(event *watchCacheEvent) {
|
||||
if w.endIndex == w.startIndex+w.capacity {
|
||||
w.resizeCacheLocked(event.RecordTime)
|
||||
if w.isCacheFullLocked() {
|
||||
// Cache is full - remove the oldest element.
|
||||
w.startIndex++
|
||||
}
|
||||
|
|
@ -309,6 +339,48 @@ func (w *watchCache) updateCache(event *watchCacheEvent) {
|
|||
w.endIndex++
|
||||
}
|
||||
|
||||
// resizeCacheLocked resizes the cache if necessary:
|
||||
// - increases capacity by 2x if cache is full and all cached events occurred within last eventFreshDuration.
|
||||
// - decreases capacity by 2x when recent quarter of events occurred outside of eventFreshDuration(protect watchCache from flapping).
|
||||
func (w *watchCache) resizeCacheLocked(eventTime time.Time) {
|
||||
if w.isCacheFullLocked() && eventTime.Sub(w.cache[w.startIndex%w.capacity].RecordTime) < eventFreshDuration {
|
||||
capacity := min(w.capacity*2, w.upperBoundCapacity)
|
||||
if capacity > w.capacity {
|
||||
w.doCacheResizeLocked(capacity)
|
||||
}
|
||||
return
|
||||
}
|
||||
if w.isCacheFullLocked() && eventTime.Sub(w.cache[(w.endIndex-w.capacity/4)%w.capacity].RecordTime) > eventFreshDuration {
|
||||
capacity := max(w.capacity/2, w.lowerBoundCapacity)
|
||||
if capacity < w.capacity {
|
||||
w.doCacheResizeLocked(capacity)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// isCacheFullLocked used to judge whether watchCacheEvent is full.
|
||||
// Assumes that lock is already held for write.
|
||||
func (w *watchCache) isCacheFullLocked() bool {
|
||||
return w.endIndex == w.startIndex+w.capacity
|
||||
}
|
||||
|
||||
// doCacheResizeLocked resize watchCache's event array with different capacity.
|
||||
// Assumes that lock is already held for write.
|
||||
func (w *watchCache) doCacheResizeLocked(capacity int) {
|
||||
newCache := make([]*watchCacheEvent, capacity)
|
||||
if capacity < w.capacity {
|
||||
// adjust startIndex if cache capacity shrink.
|
||||
w.startIndex = w.endIndex - capacity
|
||||
}
|
||||
for i := w.startIndex; i < w.endIndex; i++ {
|
||||
newCache[i%capacity] = w.cache[i%w.capacity]
|
||||
}
|
||||
w.cache = newCache
|
||||
recordsWatchCacheCapacityChange(w.objectType.String(), w.capacity, capacity)
|
||||
w.capacity = capacity
|
||||
}
|
||||
|
||||
// List returns list of pointers to <storeElement> objects.
|
||||
func (w *watchCache) List() []interface{} {
|
||||
return w.store.List()
|
||||
|
|
@ -460,19 +532,16 @@ func (w *watchCache) GetAllEventsSinceThreadUnsafe(resourceVersion uint64) ([]*w
|
|||
size := w.endIndex - w.startIndex
|
||||
var oldest uint64
|
||||
switch {
|
||||
case size >= w.capacity:
|
||||
// Once the watch event buffer is full, the oldest watch event we can deliver
|
||||
// is the first one in the buffer.
|
||||
oldest = w.cache[w.startIndex%w.capacity].ResourceVersion
|
||||
case w.listResourceVersion > 0:
|
||||
// If the watch event buffer isn't full, the oldest watch event we can deliver
|
||||
// is one greater than the resource version of the last full list.
|
||||
case w.listResourceVersion > 0 && w.startIndex == 0:
|
||||
// If no event was removed from the buffer since last relist, the oldest watch
|
||||
// event we can deliver is one greater than the resource version of the list.
|
||||
oldest = w.listResourceVersion + 1
|
||||
case size > 0:
|
||||
// If we've never completed a list, use the resourceVersion of the oldest event
|
||||
// in the buffer.
|
||||
// This should only happen in unit tests that populate the buffer without
|
||||
// performing list/replace operations.
|
||||
// If the previous condition is not satisfied: either some event was already
|
||||
// removed from the buffer or we've never completed a list (the latter can
|
||||
// only happen in unit tests that populate the buffer without performing
|
||||
// list/replace operations), the oldest watch event we can deliver is the first
|
||||
// one in the buffer.
|
||||
oldest = w.cache[w.startIndex%w.capacity].ResourceVersion
|
||||
default:
|
||||
return nil, fmt.Errorf("watch cache isn't correctly initialized")
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue