mirror of
https://github.com/kubernetes-sigs/prometheus-adapter.git
synced 2026-04-07 10:17:51 +00:00
vendor dependencies
This commit is contained in:
parent
604208ef4f
commit
72abf135d6
1156 changed files with 78178 additions and 105799 deletions
2
vendor/k8s.io/apiserver/pkg/storage/OWNERS
generated
vendored
2
vendor/k8s.io/apiserver/pkg/storage/OWNERS
generated
vendored
|
|
@ -1,3 +1,5 @@
|
|||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- lavalamp
|
||||
- liggitt
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
package cacher
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
|
@ -24,7 +24,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
|
|
@ -37,21 +37,38 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/features"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
utiltrace "k8s.io/apiserver/pkg/util/trace"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
utiltrace "k8s.io/utils/trace"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// CacherConfig contains the configuration for a given Cache.
|
||||
type CacherConfig struct {
|
||||
var (
|
||||
initCounter = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "apiserver_init_events_total",
|
||||
Help: "Counter of init events processed in watchcache broken by resource type",
|
||||
},
|
||||
[]string{"resource"},
|
||||
)
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(initCounter)
|
||||
}
|
||||
|
||||
// Config contains the configuration for a given Cache.
|
||||
type Config struct {
|
||||
// Maximum size of the history cached in memory.
|
||||
CacheCapacity int
|
||||
|
||||
// An underlying storage.Interface.
|
||||
Storage Interface
|
||||
Storage storage.Interface
|
||||
|
||||
// An underlying storage.Versioner.
|
||||
Versioner Versioner
|
||||
Versioner storage.Versioner
|
||||
|
||||
// The Cache will be caching objects of a given Type and assumes that they
|
||||
// are all stored under ResourcePrefix directory in the underlying database.
|
||||
|
|
@ -61,12 +78,12 @@ type CacherConfig struct {
|
|||
// KeyFunc is used to get a key in the underlying storage for a given object.
|
||||
KeyFunc func(runtime.Object) (string, error)
|
||||
|
||||
// GetAttrsFunc is used to get object labels, fields, and the uninitialized bool
|
||||
GetAttrsFunc func(runtime.Object) (label labels.Set, field fields.Set, uninitialized bool, err error)
|
||||
// GetAttrsFunc is used to get object labels, fields
|
||||
GetAttrsFunc func(runtime.Object) (label labels.Set, field fields.Set, err error)
|
||||
|
||||
// TriggerPublisherFunc is used for optimizing amount of watchers that
|
||||
// needs to process an incoming event.
|
||||
TriggerPublisherFunc TriggerPublisherFunc
|
||||
TriggerPublisherFunc storage.TriggerPublisherFunc
|
||||
|
||||
// NewList is a function that creates new empty object storing a list of
|
||||
// objects of type Type.
|
||||
|
|
@ -81,14 +98,17 @@ func (wm watchersMap) addWatcher(w *cacheWatcher, number int) {
|
|||
wm[number] = w
|
||||
}
|
||||
|
||||
func (wm watchersMap) deleteWatcher(number int) {
|
||||
delete(wm, number)
|
||||
func (wm watchersMap) deleteWatcher(number int, done func(*cacheWatcher)) {
|
||||
if watcher, ok := wm[number]; ok {
|
||||
delete(wm, number)
|
||||
done(watcher)
|
||||
}
|
||||
}
|
||||
|
||||
func (wm watchersMap) terminateAll() {
|
||||
func (wm watchersMap) terminateAll(done func(*cacheWatcher)) {
|
||||
for key, watcher := range wm {
|
||||
delete(wm, key)
|
||||
watcher.stop()
|
||||
done(watcher)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -108,29 +128,29 @@ func (i *indexedWatchers) addWatcher(w *cacheWatcher, number int, value string,
|
|||
}
|
||||
}
|
||||
|
||||
func (i *indexedWatchers) deleteWatcher(number int, value string, supported bool) {
|
||||
func (i *indexedWatchers) deleteWatcher(number int, value string, supported bool, done func(*cacheWatcher)) {
|
||||
if supported {
|
||||
i.valueWatchers[value].deleteWatcher(number)
|
||||
i.valueWatchers[value].deleteWatcher(number, done)
|
||||
if len(i.valueWatchers[value]) == 0 {
|
||||
delete(i.valueWatchers, value)
|
||||
}
|
||||
} else {
|
||||
i.allWatchers.deleteWatcher(number)
|
||||
i.allWatchers.deleteWatcher(number, done)
|
||||
}
|
||||
}
|
||||
|
||||
func (i *indexedWatchers) terminateAll(objectType reflect.Type) {
|
||||
func (i *indexedWatchers) terminateAll(objectType reflect.Type, done func(*cacheWatcher)) {
|
||||
if len(i.allWatchers) > 0 || len(i.valueWatchers) > 0 {
|
||||
glog.Warningf("Terminating all watchers from cacher %v", objectType)
|
||||
klog.Warningf("Terminating all watchers from cacher %v", objectType)
|
||||
}
|
||||
i.allWatchers.terminateAll()
|
||||
i.allWatchers.terminateAll(done)
|
||||
for index, watchers := range i.valueWatchers {
|
||||
watchers.terminateAll()
|
||||
watchers.terminateAll(done)
|
||||
delete(i.valueWatchers, index)
|
||||
}
|
||||
}
|
||||
|
||||
type filterWithAttrsFunc func(key string, l labels.Set, f fields.Set, uninitialized bool) bool
|
||||
type filterWithAttrsFunc func(key string, l labels.Set, f fields.Set) bool
|
||||
|
||||
// Cacher is responsible for serving WATCH and LIST requests for a given
|
||||
// resource from its internal cache and updating its cache in the background
|
||||
|
|
@ -141,7 +161,7 @@ type Cacher struct {
|
|||
// HighWaterMarks for performance debugging.
|
||||
// Important: Since HighWaterMark is using sync/atomic, it has to be at the top of the struct due to a bug on 32-bit platforms
|
||||
// See: https://golang.org/pkg/sync/atomic/ for more information
|
||||
incomingHWM HighWaterMark
|
||||
incomingHWM storage.HighWaterMark
|
||||
// Incoming events that should be dispatched to watchers.
|
||||
incoming chan watchCacheEvent
|
||||
|
||||
|
|
@ -156,7 +176,7 @@ type Cacher struct {
|
|||
ready *ready
|
||||
|
||||
// Underlying storage.Interface.
|
||||
storage Interface
|
||||
storage storage.Interface
|
||||
|
||||
// Expected type of objects in the underlying cache.
|
||||
objectType reflect.Type
|
||||
|
|
@ -166,11 +186,11 @@ type Cacher struct {
|
|||
reflector *cache.Reflector
|
||||
|
||||
// Versioner is used to handle resource versions.
|
||||
versioner Versioner
|
||||
versioner storage.Versioner
|
||||
|
||||
// triggerFunc is used for optimizing amount of watchers that needs to process
|
||||
// an incoming event.
|
||||
triggerFunc TriggerPublisherFunc
|
||||
triggerFunc storage.TriggerPublisherFunc
|
||||
// watchers is mapping from the value of trigger function that a
|
||||
// watcher is interested into the watchers
|
||||
watcherIdx int
|
||||
|
|
@ -185,13 +205,27 @@ type Cacher struct {
|
|||
stopped bool
|
||||
stopCh chan struct{}
|
||||
stopWg sync.WaitGroup
|
||||
|
||||
// timer is used to avoid unnecessary allocations in underlying watchers.
|
||||
timer *time.Timer
|
||||
|
||||
// dispatching determines whether there is currently dispatching of
|
||||
// any event in flight.
|
||||
dispatching bool
|
||||
// watchersBuffer is a list of watchers potentially interested in currently
|
||||
// dispatched event.
|
||||
watchersBuffer []*cacheWatcher
|
||||
// watchersToStop is a list of watchers that were supposed to be stopped
|
||||
// during current dispatching, but stopping was deferred to the end of
|
||||
// dispatching that event to avoid race with closing channels in watchers.
|
||||
watchersToStop []*cacheWatcher
|
||||
}
|
||||
|
||||
// Create a new Cacher responsible for servicing WATCH and LIST requests from
|
||||
// NewCacherFromConfig creates a new Cacher responsible for servicing WATCH and LIST requests from
|
||||
// its internal cache and updating its cache in the background based on the
|
||||
// given configuration.
|
||||
func NewCacherFromConfig(config CacherConfig) *Cacher {
|
||||
watchCache := newWatchCache(config.CacheCapacity, config.KeyFunc, config.GetAttrsFunc)
|
||||
func NewCacherFromConfig(config Config) *Cacher {
|
||||
watchCache := newWatchCache(config.CacheCapacity, config.KeyFunc, config.GetAttrsFunc, config.Versioner)
|
||||
listerWatcher := newCacherListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc)
|
||||
reflectorName := "storage/cacher.go:" + config.ResourcePrefix
|
||||
|
||||
|
|
@ -226,6 +260,7 @@ func NewCacherFromConfig(config CacherConfig) *Cacher {
|
|||
// and there are no guarantees on the order that they will stop.
|
||||
// So we will be simply closing the channel, and synchronizing on the WaitGroup.
|
||||
stopCh: stopCh,
|
||||
timer: time.NewTimer(time.Duration(0)),
|
||||
}
|
||||
watchCache.SetOnEvent(cacher.processEvent)
|
||||
go cacher.dispatchEvents()
|
||||
|
|
@ -241,6 +276,14 @@ func NewCacherFromConfig(config CacherConfig) *Cacher {
|
|||
}, time.Second, stopCh,
|
||||
)
|
||||
}()
|
||||
|
||||
// Ensure that timer is stopped.
|
||||
if !cacher.timer.Stop() {
|
||||
// Consume triggered (but not yet received) timer event
|
||||
// so that future reuse does not get a spurious timeout.
|
||||
<-cacher.timer.C
|
||||
}
|
||||
|
||||
return cacher
|
||||
}
|
||||
|
||||
|
|
@ -268,28 +311,28 @@ func (c *Cacher) startCaching(stopChannel <-chan struct{}) {
|
|||
// Also note that startCaching is called in a loop, so there's no need
|
||||
// to have another loop here.
|
||||
if err := c.reflector.ListAndWatch(stopChannel); err != nil {
|
||||
glog.Errorf("unexpected ListAndWatch error: %v", err)
|
||||
klog.Errorf("unexpected ListAndWatch error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
func (c *Cacher) Versioner() Versioner {
|
||||
// Versioner implements storage.Interface.
|
||||
func (c *Cacher) Versioner() storage.Versioner {
|
||||
return c.storage.Versioner()
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
// Create implements storage.Interface.
|
||||
func (c *Cacher) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {
|
||||
return c.storage.Create(ctx, key, obj, out, ttl)
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
func (c *Cacher) Delete(ctx context.Context, key string, out runtime.Object, preconditions *Preconditions) error {
|
||||
// Delete implements storage.Interface.
|
||||
func (c *Cacher) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions) error {
|
||||
return c.storage.Delete(ctx, key, out, preconditions)
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
func (c *Cacher) Watch(ctx context.Context, key string, resourceVersion string, pred SelectionPredicate) (watch.Interface, error) {
|
||||
watchRV, err := c.versioner.ParseWatchResourceVersion(resourceVersion)
|
||||
// Watch implements storage.Interface.
|
||||
func (c *Cacher) Watch(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate) (watch.Interface, error) {
|
||||
watchRV, err := c.versioner.ParseResourceVersion(resourceVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -334,6 +377,13 @@ func (c *Cacher) Watch(ctx context.Context, key string, resourceVersion string,
|
|||
chanSize = 1000
|
||||
}
|
||||
|
||||
// With some events already sent, update resourceVersion so that
|
||||
// events that were buffered and not yet processed won't be delivered
|
||||
// to this watcher second time causing going back in time.
|
||||
if len(initEvents) > 0 {
|
||||
watchRV = initEvents[len(initEvents)-1].ResourceVersion
|
||||
}
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
forget := forgetWatcher(c, c.watcherIdx, triggerValue, triggerSupported)
|
||||
|
|
@ -344,12 +394,12 @@ func (c *Cacher) Watch(ctx context.Context, key string, resourceVersion string,
|
|||
return watcher, nil
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
func (c *Cacher) WatchList(ctx context.Context, key string, resourceVersion string, pred SelectionPredicate) (watch.Interface, error) {
|
||||
// WatchList implements storage.Interface.
|
||||
func (c *Cacher) WatchList(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate) (watch.Interface, error) {
|
||||
return c.Watch(ctx, key, resourceVersion, pred)
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
// Get implements storage.Interface.
|
||||
func (c *Cacher) Get(ctx context.Context, key string, resourceVersion string, objPtr runtime.Object, ignoreNotFound bool) error {
|
||||
if resourceVersion == "" {
|
||||
// If resourceVersion is not specified, serve it from underlying
|
||||
|
|
@ -360,7 +410,7 @@ func (c *Cacher) Get(ctx context.Context, key string, resourceVersion string, ob
|
|||
// If resourceVersion is specified, serve it from cache.
|
||||
// It's guaranteed that the returned value is at least that
|
||||
// fresh as the given resourceVersion.
|
||||
getRV, err := c.versioner.ParseListResourceVersion(resourceVersion)
|
||||
getRV, err := c.versioner.ParseResourceVersion(resourceVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -394,26 +444,31 @@ func (c *Cacher) Get(ctx context.Context, key string, resourceVersion string, ob
|
|||
} else {
|
||||
objVal.Set(reflect.Zero(objVal.Type()))
|
||||
if !ignoreNotFound {
|
||||
return NewKeyNotFoundError(key, int64(readResourceVersion))
|
||||
return storage.NewKeyNotFoundError(key, int64(readResourceVersion))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
func (c *Cacher) GetToList(ctx context.Context, key string, resourceVersion string, pred SelectionPredicate, listObj runtime.Object) error {
|
||||
// GetToList implements storage.Interface.
|
||||
func (c *Cacher) GetToList(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate, listObj runtime.Object) error {
|
||||
pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
|
||||
if resourceVersion == "" || (pagingEnabled && (len(pred.Continue) > 0 || pred.Limit > 0)) {
|
||||
hasContinuation := pagingEnabled && len(pred.Continue) > 0
|
||||
hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0"
|
||||
if resourceVersion == "" || hasContinuation || hasLimit {
|
||||
// If resourceVersion is not specified, serve it from underlying
|
||||
// storage (for backward compatibility). If a continuation or limit is
|
||||
// storage (for backward compatibility). If a continuation is
|
||||
// requested, serve it from the underlying storage as well.
|
||||
// Limits are only sent to storage when resourceVersion is non-zero
|
||||
// since the watch cache isn't able to perform continuations, and
|
||||
// limits are ignored when resource version is zero
|
||||
return c.storage.GetToList(ctx, key, resourceVersion, pred, listObj)
|
||||
}
|
||||
|
||||
// If resourceVersion is specified, serve it from cache.
|
||||
// It's guaranteed that the returned value is at least that
|
||||
// fresh as the given resourceVersion.
|
||||
listRV, err := c.versioner.ParseListResourceVersion(resourceVersion)
|
||||
listRV, err := c.versioner.ParseResourceVersion(resourceVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -452,7 +507,7 @@ func (c *Cacher) GetToList(ctx context.Context, key string, resourceVersion stri
|
|||
if !ok {
|
||||
return fmt.Errorf("non *storeElement returned from storage: %v", obj)
|
||||
}
|
||||
if filter(elem.Key, elem.Labels, elem.Fields, elem.Uninitialized) {
|
||||
if filter(elem.Key, elem.Labels, elem.Fields) {
|
||||
listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem()))
|
||||
}
|
||||
}
|
||||
|
|
@ -464,8 +519,8 @@ func (c *Cacher) GetToList(ctx context.Context, key string, resourceVersion stri
|
|||
return nil
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
func (c *Cacher) List(ctx context.Context, key string, resourceVersion string, pred SelectionPredicate, listObj runtime.Object) error {
|
||||
// List implements storage.Interface.
|
||||
func (c *Cacher) List(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate, listObj runtime.Object) error {
|
||||
pagingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
|
||||
hasContinuation := pagingEnabled && len(pred.Continue) > 0
|
||||
hasLimit := pagingEnabled && pred.Limit > 0 && resourceVersion != "0"
|
||||
|
|
@ -482,7 +537,7 @@ func (c *Cacher) List(ctx context.Context, key string, resourceVersion string, p
|
|||
// If resourceVersion is specified, serve it from cache.
|
||||
// It's guaranteed that the returned value is at least that
|
||||
// fresh as the given resourceVersion.
|
||||
listRV, err := c.versioner.ParseListResourceVersion(resourceVersion)
|
||||
listRV, err := c.versioner.ParseResourceVersion(resourceVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -526,7 +581,7 @@ func (c *Cacher) List(ctx context.Context, key string, resourceVersion string, p
|
|||
if !ok {
|
||||
return fmt.Errorf("non *storeElement returned from storage: %v", obj)
|
||||
}
|
||||
if filter(elem.Key, elem.Labels, elem.Fields, elem.Uninitialized) {
|
||||
if filter(elem.Key, elem.Labels, elem.Fields) {
|
||||
listVal.Set(reflect.Append(listVal, reflect.ValueOf(elem.Object).Elem()))
|
||||
}
|
||||
}
|
||||
|
|
@ -539,14 +594,14 @@ func (c *Cacher) List(ctx context.Context, key string, resourceVersion string, p
|
|||
return nil
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
// GuaranteedUpdate implements storage.Interface.
|
||||
func (c *Cacher) GuaranteedUpdate(
|
||||
ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,
|
||||
preconditions *Preconditions, tryUpdate UpdateFunc, _ ...runtime.Object) error {
|
||||
preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, _ ...runtime.Object) error {
|
||||
// Ignore the suggestion and try to pass down the current version of the object
|
||||
// read from cache.
|
||||
if elem, exists, err := c.watchCache.GetByKey(key); err != nil {
|
||||
glog.Errorf("GetByKey returned error: %v", err)
|
||||
klog.Errorf("GetByKey returned error: %v", err)
|
||||
} else if exists {
|
||||
currObj := elem.(*storeElement).Object.DeepCopyObject()
|
||||
return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate, currObj)
|
||||
|
|
@ -555,6 +610,7 @@ func (c *Cacher) GuaranteedUpdate(
|
|||
return c.storage.GuaranteedUpdate(ctx, key, ptrToType, ignoreNotFound, preconditions, tryUpdate)
|
||||
}
|
||||
|
||||
// Count implements storage.Interface.
|
||||
func (c *Cacher) Count(pathPrefix string) (int64, error) {
|
||||
return c.storage.Count(pathPrefix)
|
||||
}
|
||||
|
|
@ -588,7 +644,7 @@ func (c *Cacher) triggerValues(event *watchCacheEvent) ([]string, bool) {
|
|||
func (c *Cacher) processEvent(event *watchCacheEvent) {
|
||||
if curLen := int64(len(c.incoming)); c.incomingHWM.Update(curLen) {
|
||||
// Monitor if this gets backed up, and how much.
|
||||
glog.V(1).Infof("cacher (%v): %v objects queued in incoming channel.", c.objectType.String(), curLen)
|
||||
klog.V(1).Infof("cacher (%v): %v objects queued in incoming channel.", c.objectType.String(), curLen)
|
||||
}
|
||||
c.incoming <- *event
|
||||
}
|
||||
|
|
@ -608,19 +664,41 @@ func (c *Cacher) dispatchEvents() {
|
|||
}
|
||||
|
||||
func (c *Cacher) dispatchEvent(event *watchCacheEvent) {
|
||||
c.startDispatching(event)
|
||||
|
||||
// Since add() can block, we explicitly add when cacher is unlocked.
|
||||
for _, watcher := range c.watchersBuffer {
|
||||
watcher.add(event, c.timer, c.dispatchTimeoutBudget)
|
||||
}
|
||||
|
||||
c.finishDispatching()
|
||||
}
|
||||
|
||||
// startDispatching chooses watchers potentially interested in a given event
|
||||
// a marks dispatching as true.
|
||||
func (c *Cacher) startDispatching(event *watchCacheEvent) {
|
||||
triggerValues, supported := c.triggerValues(event)
|
||||
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
c.dispatching = true
|
||||
// We are reusing the slice to avoid memory reallocations in every
|
||||
// dispatchEvent() call. That may prevent Go GC from freeing items
|
||||
// from previous phases that are sitting behind the current length
|
||||
// of the slice, but there is only a limited number of those and the
|
||||
// gain from avoiding memory allocations is much bigger.
|
||||
c.watchersBuffer = c.watchersBuffer[:0]
|
||||
|
||||
// Iterate over "allWatchers" no matter what the trigger function is.
|
||||
for _, watcher := range c.watchers.allWatchers {
|
||||
watcher.add(event, c.dispatchTimeoutBudget)
|
||||
c.watchersBuffer = append(c.watchersBuffer, watcher)
|
||||
}
|
||||
if supported {
|
||||
// Iterate over watchers interested in the given values of the trigger.
|
||||
for _, triggerValue := range triggerValues {
|
||||
for _, watcher := range c.watchers.valueWatchers[triggerValue] {
|
||||
watcher.add(event, c.dispatchTimeoutBudget)
|
||||
c.watchersBuffer = append(c.watchersBuffer, watcher)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
@ -633,16 +711,38 @@ func (c *Cacher) dispatchEvent(event *watchCacheEvent) {
|
|||
// Iterate over watchers interested in exact values for all values.
|
||||
for _, watchers := range c.watchers.valueWatchers {
|
||||
for _, watcher := range watchers {
|
||||
watcher.add(event, c.dispatchTimeoutBudget)
|
||||
c.watchersBuffer = append(c.watchersBuffer, watcher)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// finishDispatching stops all the watchers that were supposed to be
|
||||
// stopped in the meantime, but it was deferred to avoid closing input
|
||||
// channels of watchers, as add() may still have writing to it.
|
||||
// It also marks dispatching as false.
|
||||
func (c *Cacher) finishDispatching() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.dispatching = false
|
||||
for _, watcher := range c.watchersToStop {
|
||||
watcher.stop()
|
||||
}
|
||||
c.watchersToStop = c.watchersToStop[:0]
|
||||
}
|
||||
|
||||
func (c *Cacher) terminateAllWatchers() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.watchers.terminateAll(c.objectType)
|
||||
c.watchers.terminateAll(c.objectType, c.stopWatcherThreadUnsafe)
|
||||
}
|
||||
|
||||
func (c *Cacher) stopWatcherThreadUnsafe(watcher *cacheWatcher) {
|
||||
if c.dispatching {
|
||||
c.watchersToStop = append(c.watchersToStop, watcher)
|
||||
} else {
|
||||
watcher.stop()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cacher) isStopped() bool {
|
||||
|
|
@ -651,6 +751,7 @@ func (c *Cacher) isStopped() bool {
|
|||
return c.stopped
|
||||
}
|
||||
|
||||
// Stop implements the graceful termination.
|
||||
func (c *Cacher) Stop() {
|
||||
// avoid stopping twice (note: cachers are shared with subresources)
|
||||
if c.isStopped() {
|
||||
|
|
@ -667,49 +768,44 @@ func (c *Cacher) Stop() {
|
|||
c.stopWg.Wait()
|
||||
}
|
||||
|
||||
func forgetWatcher(c *Cacher, index int, triggerValue string, triggerSupported bool) func(bool) {
|
||||
return func(lock bool) {
|
||||
if lock {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
} else {
|
||||
// false is currently passed only if we are forcing watcher to close due
|
||||
// to its unresponsiveness and blocking other watchers.
|
||||
// TODO: Get this information in cleaner way.
|
||||
glog.V(1).Infof("Forcing watcher close due to unresponsiveness: %v", c.objectType.String())
|
||||
}
|
||||
func forgetWatcher(c *Cacher, index int, triggerValue string, triggerSupported bool) func() {
|
||||
return func() {
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
// It's possible that the watcher is already not in the structure (e.g. in case of
|
||||
// simultaneous Stop() and terminateAllWatchers(), but it doesn't break anything.
|
||||
c.watchers.deleteWatcher(index, triggerValue, triggerSupported)
|
||||
// simultaneous Stop() and terminateAllWatchers(), but it is safe to call stop()
|
||||
// on a watcher multiple times.
|
||||
c.watchers.deleteWatcher(index, triggerValue, triggerSupported, c.stopWatcherThreadUnsafe)
|
||||
}
|
||||
}
|
||||
|
||||
func filterWithAttrsFunction(key string, p SelectionPredicate) filterWithAttrsFunc {
|
||||
filterFunc := func(objKey string, label labels.Set, field fields.Set, uninitialized bool) bool {
|
||||
func filterWithAttrsFunction(key string, p storage.SelectionPredicate) filterWithAttrsFunc {
|
||||
filterFunc := func(objKey string, label labels.Set, field fields.Set) bool {
|
||||
if !hasPathPrefix(objKey, key) {
|
||||
return false
|
||||
}
|
||||
return p.MatchesObjectAttributes(label, field, uninitialized)
|
||||
return p.MatchesObjectAttributes(label, field)
|
||||
}
|
||||
return filterFunc
|
||||
}
|
||||
|
||||
// Returns resource version to which the underlying cache is synced.
|
||||
// LastSyncResourceVersion returns resource version to which the underlying cache is synced.
|
||||
func (c *Cacher) LastSyncResourceVersion() (uint64, error) {
|
||||
c.ready.wait()
|
||||
|
||||
resourceVersion := c.reflector.LastSyncResourceVersion()
|
||||
return c.versioner.ParseListResourceVersion(resourceVersion)
|
||||
return c.versioner.ParseResourceVersion(resourceVersion)
|
||||
}
|
||||
|
||||
// cacherListerWatcher opaques storage.Interface to expose cache.ListerWatcher.
|
||||
type cacherListerWatcher struct {
|
||||
storage Interface
|
||||
storage storage.Interface
|
||||
resourcePrefix string
|
||||
newListFunc func() runtime.Object
|
||||
}
|
||||
|
||||
func newCacherListerWatcher(storage Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher {
|
||||
func newCacherListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher {
|
||||
return &cacherListerWatcher{
|
||||
storage: storage,
|
||||
resourcePrefix: resourcePrefix,
|
||||
|
|
@ -720,7 +816,7 @@ func newCacherListerWatcher(storage Interface, resourcePrefix string, newListFun
|
|||
// Implements cache.ListerWatcher interface.
|
||||
func (lw *cacherListerWatcher) List(options metav1.ListOptions) (runtime.Object, error) {
|
||||
list := lw.newListFunc()
|
||||
if err := lw.storage.List(context.TODO(), lw.resourcePrefix, "", Everything, list); err != nil {
|
||||
if err := lw.storage.List(context.TODO(), lw.resourcePrefix, "", storage.Everything, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return list, nil
|
||||
|
|
@ -728,7 +824,7 @@ func (lw *cacherListerWatcher) List(options metav1.ListOptions) (runtime.Object,
|
|||
|
||||
// Implements cache.ListerWatcher interface.
|
||||
func (lw *cacherListerWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return lw.storage.WatchList(context.TODO(), lw.resourcePrefix, options.ResourceVersion, Everything)
|
||||
return lw.storage.WatchList(context.TODO(), lw.resourcePrefix, options.ResourceVersion, storage.Everything)
|
||||
}
|
||||
|
||||
// errWatcher implements watch.Interface to return a single error
|
||||
|
|
@ -771,7 +867,7 @@ func (c *errWatcher) Stop() {
|
|||
// no-op
|
||||
}
|
||||
|
||||
// cachWatcher implements watch.Interface
|
||||
// cacheWatcher implements watch.Interface
|
||||
type cacheWatcher struct {
|
||||
sync.Mutex
|
||||
input chan *watchCacheEvent
|
||||
|
|
@ -779,11 +875,11 @@ type cacheWatcher struct {
|
|||
done chan struct{}
|
||||
filter filterWithAttrsFunc
|
||||
stopped bool
|
||||
forget func(bool)
|
||||
versioner Versioner
|
||||
forget func()
|
||||
versioner storage.Versioner
|
||||
}
|
||||
|
||||
func newCacheWatcher(resourceVersion uint64, chanSize int, initEvents []*watchCacheEvent, filter filterWithAttrsFunc, forget func(bool), versioner Versioner) *cacheWatcher {
|
||||
func newCacheWatcher(resourceVersion uint64, chanSize int, initEvents []*watchCacheEvent, filter filterWithAttrsFunc, forget func(), versioner storage.Versioner) *cacheWatcher {
|
||||
watcher := &cacheWatcher{
|
||||
input: make(chan *watchCacheEvent, chanSize),
|
||||
result: make(chan watch.Event, chanSize),
|
||||
|
|
@ -804,8 +900,7 @@ func (c *cacheWatcher) ResultChan() <-chan watch.Event {
|
|||
|
||||
// Implements watch.Interface.
|
||||
func (c *cacheWatcher) Stop() {
|
||||
c.forget(true)
|
||||
c.stop()
|
||||
c.forget()
|
||||
}
|
||||
|
||||
func (c *cacheWatcher) stop() {
|
||||
|
|
@ -818,9 +913,7 @@ func (c *cacheWatcher) stop() {
|
|||
}
|
||||
}
|
||||
|
||||
var timerPool sync.Pool
|
||||
|
||||
func (c *cacheWatcher) add(event *watchCacheEvent, budget *timeBudget) {
|
||||
func (c *cacheWatcher) add(event *watchCacheEvent, timer *time.Timer, budget *timeBudget) {
|
||||
// Try to send the event immediately, without blocking.
|
||||
select {
|
||||
case c.input <- event:
|
||||
|
|
@ -834,28 +927,21 @@ func (c *cacheWatcher) add(event *watchCacheEvent, budget *timeBudget) {
|
|||
startTime := time.Now()
|
||||
timeout := budget.takeAvailable()
|
||||
|
||||
t, ok := timerPool.Get().(*time.Timer)
|
||||
if ok {
|
||||
t.Reset(timeout)
|
||||
} else {
|
||||
t = time.NewTimer(timeout)
|
||||
}
|
||||
defer timerPool.Put(t)
|
||||
timer.Reset(timeout)
|
||||
|
||||
select {
|
||||
case c.input <- event:
|
||||
stopped := t.Stop()
|
||||
if !stopped {
|
||||
if !timer.Stop() {
|
||||
// Consume triggered (but not yet received) timer event
|
||||
// so that future reuse does not get a spurious timeout.
|
||||
<-t.C
|
||||
<-timer.C
|
||||
}
|
||||
case <-t.C:
|
||||
case <-timer.C:
|
||||
// This means that we couldn't send event to that watcher.
|
||||
// Since we don't want to block on it infinitely,
|
||||
// we simply terminate it.
|
||||
c.forget(false)
|
||||
c.stop()
|
||||
klog.V(1).Infof("Forcing watcher close due to unresponsiveness: %v", reflect.TypeOf(event.Object).String())
|
||||
c.forget()
|
||||
}
|
||||
|
||||
budget.returnUnused(timeout - time.Since(startTime))
|
||||
|
|
@ -863,10 +949,10 @@ func (c *cacheWatcher) add(event *watchCacheEvent, budget *timeBudget) {
|
|||
|
||||
// NOTE: sendWatchCacheEvent is assumed to not modify <event> !!!
|
||||
func (c *cacheWatcher) sendWatchCacheEvent(event *watchCacheEvent) {
|
||||
curObjPasses := event.Type != watch.Deleted && c.filter(event.Key, event.ObjLabels, event.ObjFields, event.ObjUninitialized)
|
||||
curObjPasses := event.Type != watch.Deleted && c.filter(event.Key, event.ObjLabels, event.ObjFields)
|
||||
oldObjPasses := false
|
||||
if event.PrevObject != nil {
|
||||
oldObjPasses = c.filter(event.Key, event.PrevObjLabels, event.PrevObjFields, event.PrevObjUninitialized)
|
||||
oldObjPasses = c.filter(event.Key, event.PrevObjLabels, event.PrevObjFields)
|
||||
}
|
||||
if !curObjPasses && !oldObjPasses {
|
||||
// Watcher is not interested in that object.
|
||||
|
|
@ -933,22 +1019,22 @@ func (c *cacheWatcher) process(initEvents []*watchCacheEvent, resourceVersion ui
|
|||
for _, event := range initEvents {
|
||||
c.sendWatchCacheEvent(event)
|
||||
}
|
||||
if len(initEvents) > 0 {
|
||||
objType := reflect.TypeOf(initEvents[0].Object).String()
|
||||
initCounter.WithLabelValues(objType).Add(float64(len(initEvents)))
|
||||
}
|
||||
processingTime := time.Since(startTime)
|
||||
if processingTime > initProcessThreshold {
|
||||
objType := "<null>"
|
||||
if len(initEvents) > 0 {
|
||||
objType = reflect.TypeOf(initEvents[0].Object).String()
|
||||
}
|
||||
glog.V(2).Infof("processing %d initEvents of %s took %v", len(initEvents), objType, processingTime)
|
||||
klog.V(2).Infof("processing %d initEvents of %s took %v", len(initEvents), objType, processingTime)
|
||||
}
|
||||
|
||||
defer close(c.result)
|
||||
defer c.Stop()
|
||||
for {
|
||||
event, ok := <-c.input
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
for event := range c.input {
|
||||
// only send events newer than resourceVersion
|
||||
if event.ResourceVersion > resourceVersion {
|
||||
c.sendWatchCacheEvent(event)
|
||||
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
package cacher
|
||||
|
||||
import (
|
||||
"sync"
|
||||
46
vendor/k8s.io/apiserver/pkg/storage/cacher/util.go
generated
vendored
Normal file
46
vendor/k8s.io/apiserver/pkg/storage/cacher/util.go
generated
vendored
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cacher
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// hasPathPrefix returns true if the string matches pathPrefix exactly, or if is prefixed with pathPrefix at a path segment boundary
|
||||
func hasPathPrefix(s, pathPrefix string) bool {
|
||||
// Short circuit if s doesn't contain the prefix at all
|
||||
if !strings.HasPrefix(s, pathPrefix) {
|
||||
return false
|
||||
}
|
||||
|
||||
pathPrefixLength := len(pathPrefix)
|
||||
|
||||
if len(s) == pathPrefixLength {
|
||||
// Exact match
|
||||
return true
|
||||
}
|
||||
if strings.HasSuffix(pathPrefix, "/") {
|
||||
// pathPrefix already ensured a path segment boundary
|
||||
return true
|
||||
}
|
||||
if s[pathPrefixLength:pathPrefixLength+1] == "/" {
|
||||
// The next character in s is a path segment boundary
|
||||
// Check this instead of normalizing pathPrefix to avoid allocating on every call
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
@ -14,24 +14,23 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
package cacher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
utiltrace "k8s.io/apiserver/pkg/util/trace"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
utiltrace "k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -47,30 +46,27 @@ const (
|
|||
// the previous value of the object to enable proper filtering in the
|
||||
// upper layers.
|
||||
type watchCacheEvent struct {
|
||||
Type watch.EventType
|
||||
Object runtime.Object
|
||||
ObjLabels labels.Set
|
||||
ObjFields fields.Set
|
||||
ObjUninitialized bool
|
||||
PrevObject runtime.Object
|
||||
PrevObjLabels labels.Set
|
||||
PrevObjFields fields.Set
|
||||
PrevObjUninitialized bool
|
||||
Key string
|
||||
ResourceVersion uint64
|
||||
Type watch.EventType
|
||||
Object runtime.Object
|
||||
ObjLabels labels.Set
|
||||
ObjFields fields.Set
|
||||
PrevObject runtime.Object
|
||||
PrevObjLabels labels.Set
|
||||
PrevObjFields fields.Set
|
||||
Key string
|
||||
ResourceVersion uint64
|
||||
}
|
||||
|
||||
// Computing a key of an object is generally non-trivial (it performs
|
||||
// e.g. validation underneath). Similarly computing object fields and
|
||||
// labels. To avoid computing them multiple times (to serve the event
|
||||
// in different List/Watch requests), in the underlying store we are
|
||||
// keeping structs (key, object, labels, fields, uninitialized).
|
||||
// keeping structs (key, object, labels, fields).
|
||||
type storeElement struct {
|
||||
Key string
|
||||
Object runtime.Object
|
||||
Labels labels.Set
|
||||
Fields fields.Set
|
||||
Uninitialized bool
|
||||
Key string
|
||||
Object runtime.Object
|
||||
Labels labels.Set
|
||||
Fields fields.Set
|
||||
}
|
||||
|
||||
func storeElementKey(obj interface{}) (string, error) {
|
||||
|
|
@ -108,7 +104,7 @@ type watchCache struct {
|
|||
keyFunc func(runtime.Object) (string, error)
|
||||
|
||||
// getAttrsFunc is used to get labels and fields of an object.
|
||||
getAttrsFunc func(runtime.Object) (labels.Set, fields.Set, bool, error)
|
||||
getAttrsFunc func(runtime.Object) (labels.Set, fields.Set, error)
|
||||
|
||||
// cache is used a cyclic buffer - its first element (with the smallest
|
||||
// resourceVersion) is defined by startIndex, its last element is defined
|
||||
|
|
@ -128,6 +124,9 @@ type watchCache struct {
|
|||
// ResourceVersion up to which the watchCache is propagated.
|
||||
resourceVersion uint64
|
||||
|
||||
// ResourceVersion of the last list result (populated via Replace() method).
|
||||
listResourceVersion uint64
|
||||
|
||||
// This handler is run at the end of every successful Replace() method.
|
||||
onReplace func()
|
||||
|
||||
|
|
@ -137,22 +136,28 @@ type watchCache struct {
|
|||
|
||||
// for testing timeouts.
|
||||
clock clock.Clock
|
||||
|
||||
// An underlying storage.Versioner.
|
||||
versioner storage.Versioner
|
||||
}
|
||||
|
||||
func newWatchCache(
|
||||
capacity int,
|
||||
keyFunc func(runtime.Object) (string, error),
|
||||
getAttrsFunc func(runtime.Object) (labels.Set, fields.Set, bool, error)) *watchCache {
|
||||
getAttrsFunc func(runtime.Object) (labels.Set, fields.Set, error),
|
||||
versioner storage.Versioner) *watchCache {
|
||||
wc := &watchCache{
|
||||
capacity: capacity,
|
||||
keyFunc: keyFunc,
|
||||
getAttrsFunc: getAttrsFunc,
|
||||
cache: make([]watchCacheElement, capacity),
|
||||
startIndex: 0,
|
||||
endIndex: 0,
|
||||
store: cache.NewStore(storeElementKey),
|
||||
resourceVersion: 0,
|
||||
clock: clock.RealClock{},
|
||||
capacity: capacity,
|
||||
keyFunc: keyFunc,
|
||||
getAttrsFunc: getAttrsFunc,
|
||||
cache: make([]watchCacheElement, capacity),
|
||||
startIndex: 0,
|
||||
endIndex: 0,
|
||||
store: cache.NewStore(storeElementKey),
|
||||
resourceVersion: 0,
|
||||
listResourceVersion: 0,
|
||||
clock: clock.RealClock{},
|
||||
versioner: versioner,
|
||||
}
|
||||
wc.cond = sync.NewCond(wc.RLocker())
|
||||
return wc
|
||||
|
|
@ -160,7 +165,7 @@ func newWatchCache(
|
|||
|
||||
// Add takes runtime.Object as an argument.
|
||||
func (w *watchCache) Add(obj interface{}) error {
|
||||
object, resourceVersion, err := objectToVersionedRuntimeObject(obj)
|
||||
object, resourceVersion, err := w.objectToVersionedRuntimeObject(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -172,7 +177,7 @@ func (w *watchCache) Add(obj interface{}) error {
|
|||
|
||||
// Update takes runtime.Object as an argument.
|
||||
func (w *watchCache) Update(obj interface{}) error {
|
||||
object, resourceVersion, err := objectToVersionedRuntimeObject(obj)
|
||||
object, resourceVersion, err := w.objectToVersionedRuntimeObject(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -184,7 +189,7 @@ func (w *watchCache) Update(obj interface{}) error {
|
|||
|
||||
// Delete takes runtime.Object as an argument.
|
||||
func (w *watchCache) Delete(obj interface{}) error {
|
||||
object, resourceVersion, err := objectToVersionedRuntimeObject(obj)
|
||||
object, resourceVersion, err := w.objectToVersionedRuntimeObject(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -194,49 +199,36 @@ func (w *watchCache) Delete(obj interface{}) error {
|
|||
return w.processEvent(event, resourceVersion, f)
|
||||
}
|
||||
|
||||
func objectToVersionedRuntimeObject(obj interface{}) (runtime.Object, uint64, error) {
|
||||
func (w *watchCache) objectToVersionedRuntimeObject(obj interface{}) (runtime.Object, uint64, error) {
|
||||
object, ok := obj.(runtime.Object)
|
||||
if !ok {
|
||||
return nil, 0, fmt.Errorf("obj does not implement runtime.Object interface: %v", obj)
|
||||
}
|
||||
meta, err := meta.Accessor(object)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
resourceVersion, err := parseResourceVersion(meta.GetResourceVersion())
|
||||
resourceVersion, err := w.versioner.ObjectResourceVersion(object)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return object, resourceVersion, nil
|
||||
}
|
||||
|
||||
func parseResourceVersion(resourceVersion string) (uint64, error) {
|
||||
if resourceVersion == "" {
|
||||
return 0, nil
|
||||
}
|
||||
// Use bitsize being the size of int on the machine.
|
||||
return strconv.ParseUint(resourceVersion, 10, 0)
|
||||
}
|
||||
|
||||
func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, updateFunc func(*storeElement) error) error {
|
||||
key, err := w.keyFunc(event.Object)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't compute key: %v", err)
|
||||
}
|
||||
elem := &storeElement{Key: key, Object: event.Object}
|
||||
elem.Labels, elem.Fields, elem.Uninitialized, err = w.getAttrsFunc(event.Object)
|
||||
elem.Labels, elem.Fields, err = w.getAttrsFunc(event.Object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
watchCacheEvent := &watchCacheEvent{
|
||||
Type: event.Type,
|
||||
Object: elem.Object,
|
||||
ObjLabels: elem.Labels,
|
||||
ObjFields: elem.Fields,
|
||||
ObjUninitialized: elem.Uninitialized,
|
||||
Key: key,
|
||||
ResourceVersion: resourceVersion,
|
||||
Type: event.Type,
|
||||
Object: elem.Object,
|
||||
ObjLabels: elem.Labels,
|
||||
ObjFields: elem.Fields,
|
||||
Key: key,
|
||||
ResourceVersion: resourceVersion,
|
||||
}
|
||||
|
||||
// TODO: We should consider moving this lock below after the watchCacheEvent
|
||||
|
|
@ -254,7 +246,6 @@ func (w *watchCache) processEvent(event watch.Event, resourceVersion uint64, upd
|
|||
watchCacheEvent.PrevObject = previousElem.Object
|
||||
watchCacheEvent.PrevObjLabels = previousElem.Labels
|
||||
watchCacheEvent.PrevObjFields = previousElem.Fields
|
||||
watchCacheEvent.PrevObjUninitialized = previousElem.Uninitialized
|
||||
}
|
||||
|
||||
if w.onEvent != nil {
|
||||
|
|
@ -362,7 +353,7 @@ func (w *watchCache) GetByKey(key string) (interface{}, bool, error) {
|
|||
|
||||
// Replace takes slice of runtime.Object as a parameter.
|
||||
func (w *watchCache) Replace(objs []interface{}, resourceVersion string) error {
|
||||
version, err := parseResourceVersion(resourceVersion)
|
||||
version, err := w.versioner.ParseResourceVersion(resourceVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -377,16 +368,15 @@ func (w *watchCache) Replace(objs []interface{}, resourceVersion string) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("couldn't compute key: %v", err)
|
||||
}
|
||||
objLabels, objFields, objUninitialized, err := w.getAttrsFunc(object)
|
||||
objLabels, objFields, err := w.getAttrsFunc(object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
toReplace = append(toReplace, &storeElement{
|
||||
Key: key,
|
||||
Object: object,
|
||||
Labels: objLabels,
|
||||
Fields: objFields,
|
||||
Uninitialized: objUninitialized,
|
||||
Key: key,
|
||||
Object: object,
|
||||
Labels: objLabels,
|
||||
Fields: objFields,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -398,6 +388,7 @@ func (w *watchCache) Replace(objs []interface{}, resourceVersion string) error {
|
|||
if err := w.store.Replace(toReplace, resourceVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
w.listResourceVersion = version
|
||||
w.resourceVersion = version
|
||||
if w.onReplace != nil {
|
||||
w.onReplace()
|
||||
|
|
@ -420,12 +411,26 @@ func (w *watchCache) SetOnEvent(onEvent func(*watchCacheEvent)) {
|
|||
|
||||
func (w *watchCache) GetAllEventsSinceThreadUnsafe(resourceVersion uint64) ([]*watchCacheEvent, error) {
|
||||
size := w.endIndex - w.startIndex
|
||||
// if we have no watch events in our cache, the oldest one we can successfully deliver to a watcher
|
||||
// is the *next* event we'll receive, which will be at least one greater than our current resourceVersion
|
||||
oldest := w.resourceVersion + 1
|
||||
if size > 0 {
|
||||
var oldest uint64
|
||||
switch {
|
||||
case size >= w.capacity:
|
||||
// Once the watch event buffer is full, the oldest watch event we can deliver
|
||||
// is the first one in the buffer.
|
||||
oldest = w.cache[w.startIndex%w.capacity].resourceVersion
|
||||
case w.listResourceVersion > 0:
|
||||
// If the watch event buffer isn't full, the oldest watch event we can deliver
|
||||
// is one greater than the resource version of the last full list.
|
||||
oldest = w.listResourceVersion + 1
|
||||
case size > 0:
|
||||
// If we've never completed a list, use the resourceVersion of the oldest event
|
||||
// in the buffer.
|
||||
// This should only happen in unit tests that populate the buffer without
|
||||
// performing list/replace operations.
|
||||
oldest = w.cache[w.startIndex%w.capacity].resourceVersion
|
||||
default:
|
||||
return nil, fmt.Errorf("watch cache isn't correctly initialized")
|
||||
}
|
||||
|
||||
if resourceVersion == 0 {
|
||||
// resourceVersion = 0 means that we don't require any specific starting point
|
||||
// and we would like to start watching from ~now.
|
||||
|
|
@ -440,18 +445,17 @@ func (w *watchCache) GetAllEventsSinceThreadUnsafe(resourceVersion uint64) ([]*w
|
|||
if !ok {
|
||||
return nil, fmt.Errorf("not a storeElement: %v", elem)
|
||||
}
|
||||
objLabels, objFields, objUninitialized, err := w.getAttrsFunc(elem.Object)
|
||||
objLabels, objFields, err := w.getAttrsFunc(elem.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result[i] = &watchCacheEvent{
|
||||
Type: watch.Added,
|
||||
Object: elem.Object,
|
||||
ObjLabels: objLabels,
|
||||
ObjFields: objFields,
|
||||
ObjUninitialized: objUninitialized,
|
||||
Key: elem.Key,
|
||||
ResourceVersion: w.resourceVersion,
|
||||
Type: watch.Added,
|
||||
Object: elem.Object,
|
||||
ObjLabels: objLabels,
|
||||
ObjFields: objFields,
|
||||
Key: elem.Key,
|
||||
ResourceVersion: w.resourceVersion,
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
2
vendor/k8s.io/apiserver/pkg/storage/errors.go
generated
vendored
2
vendor/k8s.io/apiserver/pkg/storage/errors.go
generated
vendored
|
|
@ -166,5 +166,5 @@ func NewInternalError(reason string) InternalError {
|
|||
}
|
||||
|
||||
func NewInternalErrorf(format string, a ...interface{}) InternalError {
|
||||
return InternalError{fmt.Sprintf(format, a)}
|
||||
return InternalError{fmt.Sprintf(format, a...)}
|
||||
}
|
||||
|
|
|
|||
2
vendor/k8s.io/apiserver/pkg/storage/errors/doc.go
generated
vendored
2
vendor/k8s.io/apiserver/pkg/storage/errors/doc.go
generated
vendored
|
|
@ -14,5 +14,5 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package etcd provides conversion of etcd errors to API errors.
|
||||
// Package storage provides conversion of storage errors to API errors.
|
||||
package storage // import "k8s.io/apiserver/pkg/storage/errors"
|
||||
|
|
|
|||
2
vendor/k8s.io/apiserver/pkg/storage/etcd/OWNERS
generated
vendored
Executable file → Normal file
2
vendor/k8s.io/apiserver/pkg/storage/etcd/OWNERS
generated
vendored
Executable file → Normal file
|
|
@ -1,3 +1,5 @@
|
|||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
|
|
|
|||
27
vendor/k8s.io/apiserver/pkg/storage/etcd/api_object_versioner.go
generated
vendored
27
vendor/k8s.io/apiserver/pkg/storage/etcd/api_object_versioner.go
generated
vendored
|
|
@ -82,11 +82,11 @@ func (a APIObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, e
|
|||
return strconv.ParseUint(version, 10, 64)
|
||||
}
|
||||
|
||||
// ParseWatchResourceVersion takes a resource version argument and converts it to
|
||||
// the etcd version we should pass to helper.Watch(). Because resourceVersion is
|
||||
// ParseResourceVersion takes a resource version argument and converts it to
|
||||
// the etcd version. For watch we should pass to helper.Watch(). Because resourceVersion is
|
||||
// an opaque value, the default watch behavior for non-zero watch is to watch
|
||||
// the next value (if you pass "1", you will see updates from "2" onwards).
|
||||
func (a APIObjectVersioner) ParseWatchResourceVersion(resourceVersion string) (uint64, error) {
|
||||
func (a APIObjectVersioner) ParseResourceVersion(resourceVersion string) (uint64, error) {
|
||||
if resourceVersion == "" || resourceVersion == "0" {
|
||||
return 0, nil
|
||||
}
|
||||
|
|
@ -101,26 +101,7 @@ func (a APIObjectVersioner) ParseWatchResourceVersion(resourceVersion string) (u
|
|||
return version, nil
|
||||
}
|
||||
|
||||
// ParseListResourceVersion takes a resource version argument and converts it to
|
||||
// the etcd version.
|
||||
// TODO: reevaluate whether it is really clearer to have both this and the
|
||||
// Watch version of this function, since they perform the same logic.
|
||||
func (a APIObjectVersioner) ParseListResourceVersion(resourceVersion string) (uint64, error) {
|
||||
if resourceVersion == "" {
|
||||
return 0, nil
|
||||
}
|
||||
version, err := strconv.ParseUint(resourceVersion, 10, 64)
|
||||
if err != nil {
|
||||
return 0, storage.NewInvalidError(field.ErrorList{
|
||||
// Validation errors are supposed to return version-specific field
|
||||
// paths, but this is probably close enough.
|
||||
field.Invalid(field.NewPath("resourceVersion"), resourceVersion, err.Error()),
|
||||
})
|
||||
}
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// APIObjectVersioner implements Versioner
|
||||
// Versioner implements Versioner
|
||||
var Versioner storage.Versioner = APIObjectVersioner{}
|
||||
|
||||
// CompareResourceVersion compares etcd resource versions. Outside this API they are all strings,
|
||||
|
|
|
|||
652
vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go
generated
vendored
652
vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_helper.go
generated
vendored
|
|
@ -1,652 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
etcd "github.com/coreos/etcd/client"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilcache "k8s.io/apimachinery/pkg/util/cache"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
"k8s.io/apiserver/pkg/storage/etcd/metrics"
|
||||
etcdutil "k8s.io/apiserver/pkg/storage/etcd/util"
|
||||
utiltrace "k8s.io/apiserver/pkg/util/trace"
|
||||
)
|
||||
|
||||
// ValueTransformer allows a string value to be transformed before being read from or written to the underlying store. The methods
|
||||
// must be able to undo the transformation caused by the other.
|
||||
type ValueTransformer interface {
|
||||
// TransformStringFromStorage may transform the provided string from its underlying storage representation or return an error.
|
||||
// Stale is true if the object on disk is stale and a write to etcd should be issued, even if the contents of the object
|
||||
// have not changed.
|
||||
TransformStringFromStorage(string) (value string, stale bool, err error)
|
||||
// TransformStringToStorage may transform the provided string into the appropriate form in storage or return an error.
|
||||
TransformStringToStorage(string) (value string, err error)
|
||||
}
|
||||
|
||||
type identityTransformer struct{}
|
||||
|
||||
func (identityTransformer) TransformStringFromStorage(s string) (string, bool, error) {
|
||||
return s, false, nil
|
||||
}
|
||||
func (identityTransformer) TransformStringToStorage(s string) (string, error) { return s, nil }
|
||||
|
||||
// IdentityTransformer performs no transformation on the provided values.
|
||||
var IdentityTransformer ValueTransformer = identityTransformer{}
|
||||
|
||||
// Creates a new storage interface from the client
|
||||
// TODO: deprecate in favor of storage.Config abstraction over time
|
||||
func NewEtcdStorage(client etcd.Client, codec runtime.Codec, prefix string, quorum bool, cacheSize int, transformer ValueTransformer) storage.Interface {
|
||||
return &etcdHelper{
|
||||
etcdMembersAPI: etcd.NewMembersAPI(client),
|
||||
etcdKeysAPI: etcd.NewKeysAPI(client),
|
||||
codec: codec,
|
||||
versioner: APIObjectVersioner{},
|
||||
transformer: transformer,
|
||||
pathPrefix: path.Join("/", prefix),
|
||||
quorum: quorum,
|
||||
cache: utilcache.NewCache(cacheSize),
|
||||
}
|
||||
}
|
||||
|
||||
// etcdHelper is the reference implementation of storage.Interface.
|
||||
type etcdHelper struct {
|
||||
etcdMembersAPI etcd.MembersAPI
|
||||
etcdKeysAPI etcd.KeysAPI
|
||||
codec runtime.Codec
|
||||
transformer ValueTransformer
|
||||
// Note that versioner is required for etcdHelper to work correctly.
|
||||
// The public constructors (NewStorage & NewEtcdStorage) are setting it
|
||||
// correctly, so be careful when manipulating with it manually.
|
||||
// optional, has to be set to perform any atomic operations
|
||||
versioner storage.Versioner
|
||||
// prefix for all etcd keys
|
||||
pathPrefix string
|
||||
// if true, perform quorum read
|
||||
quorum bool
|
||||
|
||||
// We cache objects stored in etcd. For keys we use Node.ModifiedIndex which is equivalent
|
||||
// to resourceVersion.
|
||||
// This depends on etcd's indexes being globally unique across all objects/types. This will
|
||||
// have to revisited if we decide to do things like multiple etcd clusters, or etcd will
|
||||
// support multi-object transaction that will result in many objects with the same index.
|
||||
// Number of entries stored in the cache is controlled by maxEtcdCacheEntries constant.
|
||||
// TODO: Measure how much this cache helps after the conversion code is optimized.
|
||||
cache utilcache.Cache
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
func (h *etcdHelper) Versioner() storage.Versioner {
|
||||
return h.versioner
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
func (h *etcdHelper) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {
|
||||
trace := utiltrace.New("etcdHelper::Create " + getTypeName(obj))
|
||||
defer trace.LogIfLong(250 * time.Millisecond)
|
||||
if ctx == nil {
|
||||
glog.Errorf("Context is nil")
|
||||
}
|
||||
key = path.Join(h.pathPrefix, key)
|
||||
data, err := runtime.Encode(h.codec, obj)
|
||||
trace.Step("Object encoded")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if version, err := h.versioner.ObjectResourceVersion(obj); err == nil && version != 0 {
|
||||
return errors.New("resourceVersion may not be set on objects to be created")
|
||||
}
|
||||
if err := h.versioner.PrepareObjectForStorage(obj); err != nil {
|
||||
return fmt.Errorf("PrepareObjectForStorage returned an error: %v", err)
|
||||
}
|
||||
trace.Step("Version checked")
|
||||
|
||||
startTime := time.Now()
|
||||
opts := etcd.SetOptions{
|
||||
TTL: time.Duration(ttl) * time.Second,
|
||||
PrevExist: etcd.PrevNoExist,
|
||||
}
|
||||
|
||||
newBody, err := h.transformer.TransformStringToStorage(string(data))
|
||||
if err != nil {
|
||||
return storage.NewInternalError(err.Error())
|
||||
}
|
||||
|
||||
response, err := h.etcdKeysAPI.Set(ctx, key, newBody, &opts)
|
||||
trace.Step("Object created")
|
||||
metrics.RecordEtcdRequestLatency("create", getTypeName(obj), startTime)
|
||||
if err != nil {
|
||||
return toStorageErr(err, key, 0)
|
||||
}
|
||||
if out != nil {
|
||||
if _, err := conversion.EnforcePtr(out); err != nil {
|
||||
panic("unable to convert output object to pointer")
|
||||
}
|
||||
_, _, _, err = h.extractObj(response, err, out, false, false)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func checkPreconditions(key string, preconditions *storage.Preconditions, out runtime.Object) error {
|
||||
if preconditions == nil {
|
||||
return nil
|
||||
}
|
||||
objMeta, err := meta.Accessor(out)
|
||||
if err != nil {
|
||||
return storage.NewInternalErrorf("can't enforce preconditions %v on un-introspectable object %v, got error: %v", *preconditions, out, err)
|
||||
}
|
||||
if preconditions.UID != nil && *preconditions.UID != objMeta.GetUID() {
|
||||
errMsg := fmt.Sprintf("Precondition failed: UID in precondition: %v, UID in object meta: %v", preconditions.UID, objMeta.GetUID())
|
||||
return storage.NewInvalidObjError(key, errMsg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
func (h *etcdHelper) Delete(ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions) error {
|
||||
if ctx == nil {
|
||||
glog.Errorf("Context is nil")
|
||||
}
|
||||
key = path.Join(h.pathPrefix, key)
|
||||
v, err := conversion.EnforcePtr(out)
|
||||
if err != nil {
|
||||
panic("unable to convert output object to pointer")
|
||||
}
|
||||
|
||||
if preconditions == nil {
|
||||
startTime := time.Now()
|
||||
response, err := h.etcdKeysAPI.Delete(ctx, key, nil)
|
||||
metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime)
|
||||
if !etcdutil.IsEtcdNotFound(err) {
|
||||
// if the object that existed prior to the delete is returned by etcd, update the out object.
|
||||
if err != nil || response.PrevNode != nil {
|
||||
_, _, _, err = h.extractObj(response, err, out, false, true)
|
||||
}
|
||||
}
|
||||
return toStorageErr(err, key, 0)
|
||||
}
|
||||
|
||||
// Check the preconditions match.
|
||||
obj := reflect.New(v.Type()).Interface().(runtime.Object)
|
||||
for {
|
||||
_, node, res, _, err := h.bodyAndExtractObj(ctx, key, obj, false)
|
||||
if err != nil {
|
||||
return toStorageErr(err, key, 0)
|
||||
}
|
||||
if err := checkPreconditions(key, preconditions, obj); err != nil {
|
||||
return toStorageErr(err, key, 0)
|
||||
}
|
||||
index := uint64(0)
|
||||
if node != nil {
|
||||
index = node.ModifiedIndex
|
||||
} else if res != nil {
|
||||
index = res.Index
|
||||
}
|
||||
opt := etcd.DeleteOptions{PrevIndex: index}
|
||||
startTime := time.Now()
|
||||
response, err := h.etcdKeysAPI.Delete(ctx, key, &opt)
|
||||
metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime)
|
||||
if !etcdutil.IsEtcdTestFailed(err) {
|
||||
if !etcdutil.IsEtcdNotFound(err) {
|
||||
// if the object that existed prior to the delete is returned by etcd, update the out object.
|
||||
if err != nil || response.PrevNode != nil {
|
||||
_, _, _, err = h.extractObj(response, err, out, false, true)
|
||||
}
|
||||
}
|
||||
return toStorageErr(err, key, 0)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key)
|
||||
}
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
func (h *etcdHelper) Watch(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate) (watch.Interface, error) {
|
||||
if ctx == nil {
|
||||
glog.Errorf("Context is nil")
|
||||
}
|
||||
watchRV, err := h.versioner.ParseWatchResourceVersion(resourceVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key = path.Join(h.pathPrefix, key)
|
||||
w := newEtcdWatcher(false, h.quorum, nil, pred, h.codec, h.versioner, nil, h.transformer, h)
|
||||
go w.etcdWatch(ctx, h.etcdKeysAPI, key, watchRV)
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
func (h *etcdHelper) WatchList(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate) (watch.Interface, error) {
|
||||
if ctx == nil {
|
||||
glog.Errorf("Context is nil")
|
||||
}
|
||||
watchRV, err := h.versioner.ParseWatchResourceVersion(resourceVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key = path.Join(h.pathPrefix, key)
|
||||
w := newEtcdWatcher(true, h.quorum, exceptKey(key), pred, h.codec, h.versioner, nil, h.transformer, h)
|
||||
go w.etcdWatch(ctx, h.etcdKeysAPI, key, watchRV)
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
func (h *etcdHelper) Get(ctx context.Context, key string, resourceVersion string, objPtr runtime.Object, ignoreNotFound bool) error {
|
||||
if ctx == nil {
|
||||
glog.Errorf("Context is nil")
|
||||
}
|
||||
key = path.Join(h.pathPrefix, key)
|
||||
_, _, _, _, err := h.bodyAndExtractObj(ctx, key, objPtr, ignoreNotFound)
|
||||
return err
|
||||
}
|
||||
|
||||
// bodyAndExtractObj performs the normal Get path to etcd, returning the parsed node and response for additional information
|
||||
// about the response, like the current etcd index and the ttl.
|
||||
func (h *etcdHelper) bodyAndExtractObj(ctx context.Context, key string, objPtr runtime.Object, ignoreNotFound bool) (body string, node *etcd.Node, res *etcd.Response, stale bool, err error) {
|
||||
if ctx == nil {
|
||||
glog.Errorf("Context is nil")
|
||||
}
|
||||
startTime := time.Now()
|
||||
|
||||
opts := &etcd.GetOptions{
|
||||
Quorum: h.quorum,
|
||||
}
|
||||
|
||||
response, err := h.etcdKeysAPI.Get(ctx, key, opts)
|
||||
metrics.RecordEtcdRequestLatency("get", getTypeName(objPtr), startTime)
|
||||
if err != nil && !etcdutil.IsEtcdNotFound(err) {
|
||||
return "", nil, nil, false, toStorageErr(err, key, 0)
|
||||
}
|
||||
body, node, stale, err = h.extractObj(response, err, objPtr, ignoreNotFound, false)
|
||||
return body, node, response, stale, toStorageErr(err, key, 0)
|
||||
}
|
||||
|
||||
func (h *etcdHelper) extractObj(response *etcd.Response, inErr error, objPtr runtime.Object, ignoreNotFound, prevNode bool) (body string, node *etcd.Node, stale bool, err error) {
|
||||
if response != nil {
|
||||
if prevNode {
|
||||
node = response.PrevNode
|
||||
} else {
|
||||
node = response.Node
|
||||
}
|
||||
}
|
||||
if inErr != nil || node == nil || len(node.Value) == 0 {
|
||||
if ignoreNotFound {
|
||||
v, err := conversion.EnforcePtr(objPtr)
|
||||
if err != nil {
|
||||
return "", nil, false, err
|
||||
}
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
return "", nil, false, nil
|
||||
} else if inErr != nil {
|
||||
return "", nil, false, inErr
|
||||
}
|
||||
return "", nil, false, fmt.Errorf("unable to locate a value on the response: %#v", response)
|
||||
}
|
||||
|
||||
body, stale, err = h.transformer.TransformStringFromStorage(node.Value)
|
||||
if err != nil {
|
||||
return body, nil, stale, storage.NewInternalError(err.Error())
|
||||
}
|
||||
out, gvk, err := h.codec.Decode([]byte(body), nil, objPtr)
|
||||
if err != nil {
|
||||
return body, nil, stale, err
|
||||
}
|
||||
if out != objPtr {
|
||||
return body, nil, stale, fmt.Errorf("unable to decode object %s into %v", gvk.String(), reflect.TypeOf(objPtr))
|
||||
}
|
||||
// being unable to set the version does not prevent the object from being extracted
|
||||
_ = h.versioner.UpdateObject(objPtr, node.ModifiedIndex)
|
||||
return body, node, stale, err
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
func (h *etcdHelper) GetToList(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate, listObj runtime.Object) error {
|
||||
if ctx == nil {
|
||||
glog.Errorf("Context is nil")
|
||||
}
|
||||
trace := utiltrace.New("GetToList " + getTypeName(listObj))
|
||||
listPtr, err := meta.GetItemsPtr(listObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key = path.Join(h.pathPrefix, key)
|
||||
startTime := time.Now()
|
||||
trace.Step("About to read etcd node")
|
||||
|
||||
opts := &etcd.GetOptions{
|
||||
Quorum: h.quorum,
|
||||
}
|
||||
response, err := h.etcdKeysAPI.Get(ctx, key, opts)
|
||||
trace.Step("Etcd node read")
|
||||
metrics.RecordEtcdRequestLatency("get", getTypeName(listPtr), startTime)
|
||||
if err != nil {
|
||||
if etcdutil.IsEtcdNotFound(err) {
|
||||
if etcdErr, ok := err.(etcd.Error); ok {
|
||||
return h.versioner.UpdateList(listObj, etcdErr.Index, "")
|
||||
}
|
||||
return fmt.Errorf("unexpected error from storage: %#v", err)
|
||||
}
|
||||
return toStorageErr(err, key, 0)
|
||||
}
|
||||
|
||||
nodes := make([]*etcd.Node, 0)
|
||||
nodes = append(nodes, response.Node)
|
||||
|
||||
if err := h.decodeNodeList(nodes, pred, listPtr); err != nil {
|
||||
return err
|
||||
}
|
||||
trace.Step("Object decoded")
|
||||
if err := h.versioner.UpdateList(listObj, response.Index, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeNodeList walks the tree of each node in the list and decodes into the specified object
|
||||
func (h *etcdHelper) decodeNodeList(nodes []*etcd.Node, pred storage.SelectionPredicate, slicePtr interface{}) error {
|
||||
trace := utiltrace.New("decodeNodeList " + getTypeName(slicePtr))
|
||||
defer trace.LogIfLong(400 * time.Millisecond)
|
||||
v, err := conversion.EnforcePtr(slicePtr)
|
||||
if err != nil || v.Kind() != reflect.Slice {
|
||||
// This should not happen at runtime.
|
||||
panic("need ptr to slice")
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Dir {
|
||||
// IMPORTANT: do not log each key as a discrete step in the trace log
|
||||
// as it produces an immense amount of log spam when there is a large
|
||||
// amount of content in the list.
|
||||
if err := h.decodeNodeList(node.Nodes, pred, slicePtr); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if obj, found := h.getFromCache(node.ModifiedIndex, pred); found {
|
||||
// obj != nil iff it matches the pred function.
|
||||
if obj != nil {
|
||||
v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))
|
||||
}
|
||||
} else {
|
||||
body, _, err := h.transformer.TransformStringFromStorage(node.Value)
|
||||
if err != nil {
|
||||
// omit items from lists and watches that cannot be transformed, but log the error
|
||||
utilruntime.HandleError(fmt.Errorf("unable to transform key %q: %v", node.Key, err))
|
||||
continue
|
||||
}
|
||||
|
||||
obj, _, err := h.codec.Decode([]byte(body), nil, reflect.New(v.Type().Elem()).Interface().(runtime.Object))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// being unable to set the version does not prevent the object from being extracted
|
||||
_ = h.versioner.UpdateObject(obj, node.ModifiedIndex)
|
||||
if matched, err := pred.Matches(obj); err == nil && matched {
|
||||
v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))
|
||||
}
|
||||
if node.ModifiedIndex != 0 {
|
||||
h.addToCache(node.ModifiedIndex, obj)
|
||||
}
|
||||
}
|
||||
}
|
||||
trace.Step(fmt.Sprintf("Decoded %v nodes", len(nodes)))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
func (h *etcdHelper) List(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate, listObj runtime.Object) error {
|
||||
if ctx == nil {
|
||||
glog.Errorf("Context is nil")
|
||||
}
|
||||
trace := utiltrace.New("List " + getTypeName(listObj))
|
||||
defer trace.LogIfLong(400 * time.Millisecond)
|
||||
listPtr, err := meta.GetItemsPtr(listObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key = path.Join(h.pathPrefix, key)
|
||||
startTime := time.Now()
|
||||
trace.Step("About to list etcd node")
|
||||
nodes, index, err := h.listEtcdNode(ctx, key)
|
||||
trace.Step("Etcd node listed")
|
||||
metrics.RecordEtcdRequestLatency("list", getTypeName(listPtr), startTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := h.decodeNodeList(nodes, pred, listPtr); err != nil {
|
||||
return err
|
||||
}
|
||||
trace.Step("Node list decoded")
|
||||
if err := h.versioner.UpdateList(listObj, index, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *etcdHelper) listEtcdNode(ctx context.Context, key string) ([]*etcd.Node, uint64, error) {
|
||||
if ctx == nil {
|
||||
glog.Errorf("Context is nil")
|
||||
}
|
||||
opts := etcd.GetOptions{
|
||||
Recursive: true,
|
||||
Sort: true,
|
||||
Quorum: h.quorum,
|
||||
}
|
||||
result, err := h.etcdKeysAPI.Get(ctx, key, &opts)
|
||||
if err != nil {
|
||||
var index uint64
|
||||
if etcdError, ok := err.(etcd.Error); ok {
|
||||
index = etcdError.Index
|
||||
}
|
||||
nodes := make([]*etcd.Node, 0)
|
||||
if etcdutil.IsEtcdNotFound(err) {
|
||||
return nodes, index, nil
|
||||
} else {
|
||||
return nodes, index, toStorageErr(err, key, 0)
|
||||
}
|
||||
}
|
||||
return result.Node.Nodes, result.Index, nil
|
||||
}
|
||||
|
||||
// Implements storage.Interface.
|
||||
func (h *etcdHelper) GuaranteedUpdate(
|
||||
ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,
|
||||
preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, _ ...runtime.Object) error {
|
||||
// Ignore the suggestion about current object.
|
||||
if ctx == nil {
|
||||
glog.Errorf("Context is nil")
|
||||
}
|
||||
v, err := conversion.EnforcePtr(ptrToType)
|
||||
if err != nil {
|
||||
// Panic is appropriate, because this is a programming error.
|
||||
panic("need ptr to type")
|
||||
}
|
||||
key = path.Join(h.pathPrefix, key)
|
||||
for {
|
||||
obj := reflect.New(v.Type()).Interface().(runtime.Object)
|
||||
origBody, node, res, stale, err := h.bodyAndExtractObj(ctx, key, obj, ignoreNotFound)
|
||||
if err != nil {
|
||||
return toStorageErr(err, key, 0)
|
||||
}
|
||||
if err := checkPreconditions(key, preconditions, obj); err != nil {
|
||||
return toStorageErr(err, key, 0)
|
||||
}
|
||||
meta := storage.ResponseMeta{}
|
||||
if node != nil {
|
||||
meta.TTL = node.TTL
|
||||
meta.ResourceVersion = node.ModifiedIndex
|
||||
}
|
||||
// Get the object to be written by calling tryUpdate.
|
||||
ret, newTTL, err := tryUpdate(obj, meta)
|
||||
if err != nil {
|
||||
return toStorageErr(err, key, 0)
|
||||
}
|
||||
|
||||
index := uint64(0)
|
||||
ttl := uint64(0)
|
||||
if node != nil {
|
||||
index = node.ModifiedIndex
|
||||
if node.TTL != 0 {
|
||||
ttl = uint64(node.TTL)
|
||||
}
|
||||
if node.Expiration != nil && ttl == 0 {
|
||||
ttl = 1
|
||||
}
|
||||
} else if res != nil {
|
||||
index = res.Index
|
||||
}
|
||||
|
||||
if newTTL != nil {
|
||||
if ttl != 0 && *newTTL == 0 {
|
||||
// TODO: remove this after we have verified this is no longer an issue
|
||||
glog.V(4).Infof("GuaranteedUpdate is clearing TTL for %q, may not be intentional", key)
|
||||
}
|
||||
ttl = *newTTL
|
||||
}
|
||||
|
||||
// Since update object may have a resourceVersion set, we need to clear it here.
|
||||
if err := h.versioner.PrepareObjectForStorage(ret); err != nil {
|
||||
return errors.New("resourceVersion cannot be set on objects store in etcd")
|
||||
}
|
||||
|
||||
newBodyData, err := runtime.Encode(h.codec, ret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newBody := string(newBodyData)
|
||||
data, err := h.transformer.TransformStringToStorage(newBody)
|
||||
if err != nil {
|
||||
return storage.NewInternalError(err.Error())
|
||||
}
|
||||
|
||||
// First time this key has been used, try creating new value.
|
||||
if index == 0 {
|
||||
startTime := time.Now()
|
||||
opts := etcd.SetOptions{
|
||||
TTL: time.Duration(ttl) * time.Second,
|
||||
PrevExist: etcd.PrevNoExist,
|
||||
}
|
||||
response, err := h.etcdKeysAPI.Set(ctx, key, data, &opts)
|
||||
metrics.RecordEtcdRequestLatency("create", getTypeName(ptrToType), startTime)
|
||||
if etcdutil.IsEtcdNodeExist(err) {
|
||||
continue
|
||||
}
|
||||
_, _, _, err = h.extractObj(response, err, ptrToType, false, false)
|
||||
return toStorageErr(err, key, 0)
|
||||
}
|
||||
|
||||
// If we don't send an update, we simply return the currently existing
|
||||
// version of the object. However, the value transformer may indicate that
|
||||
// the on disk representation has changed and that we must commit an update.
|
||||
if newBody == origBody && !stale {
|
||||
_, _, _, err := h.extractObj(res, nil, ptrToType, ignoreNotFound, false)
|
||||
return err
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
// Swap origBody with data, if origBody is the latest etcd data.
|
||||
opts := etcd.SetOptions{
|
||||
PrevIndex: index,
|
||||
TTL: time.Duration(ttl) * time.Second,
|
||||
}
|
||||
response, err := h.etcdKeysAPI.Set(ctx, key, data, &opts)
|
||||
metrics.RecordEtcdRequestLatency("compareAndSwap", getTypeName(ptrToType), startTime)
|
||||
if etcdutil.IsEtcdTestFailed(err) {
|
||||
// Try again.
|
||||
continue
|
||||
}
|
||||
_, _, _, err = h.extractObj(response, err, ptrToType, false, false)
|
||||
return toStorageErr(err, key, int64(index))
|
||||
}
|
||||
}
|
||||
|
||||
func (*etcdHelper) Count(pathPerfix string) (int64, error) {
|
||||
return 0, fmt.Errorf("Count is unimplemented for etcd2!")
|
||||
}
|
||||
|
||||
// etcdCache defines interface used for caching objects stored in etcd. Objects are keyed by
|
||||
// their Node.ModifiedIndex, which is unique across all types.
|
||||
// All implementations must be thread-safe.
|
||||
type etcdCache interface {
|
||||
getFromCache(index uint64, pred storage.SelectionPredicate) (runtime.Object, bool)
|
||||
addToCache(index uint64, obj runtime.Object)
|
||||
}
|
||||
|
||||
func getTypeName(obj interface{}) string {
|
||||
return reflect.TypeOf(obj).String()
|
||||
}
|
||||
|
||||
func (h *etcdHelper) getFromCache(index uint64, pred storage.SelectionPredicate) (runtime.Object, bool) {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
metrics.ObserveGetCache(startTime)
|
||||
}()
|
||||
obj, found := h.cache.Get(index)
|
||||
if found {
|
||||
if matched, err := pred.Matches(obj.(runtime.Object)); err != nil || !matched {
|
||||
return nil, true
|
||||
}
|
||||
// We should not return the object itself to avoid polluting the cache if someone
|
||||
// modifies returned values.
|
||||
objCopy := obj.(runtime.Object).DeepCopyObject()
|
||||
metrics.ObserveCacheHit()
|
||||
return objCopy.(runtime.Object), true
|
||||
}
|
||||
metrics.ObserveCacheMiss()
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (h *etcdHelper) addToCache(index uint64, obj runtime.Object) {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
metrics.ObserveAddCache(startTime)
|
||||
}()
|
||||
objCopy := obj.DeepCopyObject()
|
||||
isOverwrite := h.cache.Add(index, objCopy)
|
||||
if !isOverwrite {
|
||||
metrics.ObserveNewEntry()
|
||||
}
|
||||
}
|
||||
|
||||
func toStorageErr(err error, key string, rv int64) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
switch {
|
||||
case etcdutil.IsEtcdNotFound(err):
|
||||
return storage.NewKeyNotFoundError(key, rv)
|
||||
case etcdutil.IsEtcdNodeExist(err):
|
||||
return storage.NewKeyExistsError(key, rv)
|
||||
case etcdutil.IsEtcdTestFailed(err):
|
||||
return storage.NewResourceVersionConflictsError(key, rv)
|
||||
case etcdutil.IsEtcdUnreachable(err):
|
||||
return storage.NewUnreachableError(key, rv)
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
500
vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher.go
generated
vendored
500
vendor/k8s.io/apiserver/pkg/storage/etcd/etcd_watcher.go
generated
vendored
|
|
@ -1,500 +0,0 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package etcd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
etcdutil "k8s.io/apiserver/pkg/storage/etcd/util"
|
||||
|
||||
etcd "github.com/coreos/etcd/client"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// Etcd watch event actions
|
||||
const (
|
||||
EtcdCreate = "create"
|
||||
EtcdGet = "get"
|
||||
EtcdSet = "set"
|
||||
EtcdCAS = "compareAndSwap"
|
||||
EtcdDelete = "delete"
|
||||
EtcdCAD = "compareAndDelete"
|
||||
EtcdExpire = "expire"
|
||||
)
|
||||
|
||||
// TransformFunc attempts to convert an object to another object for use with a watcher.
|
||||
type TransformFunc func(runtime.Object) (runtime.Object, error)
|
||||
|
||||
// includeFunc returns true if the given key should be considered part of a watch
|
||||
type includeFunc func(key string) bool
|
||||
|
||||
// exceptKey is an includeFunc that returns false when the provided key matches the watched key
|
||||
func exceptKey(except string) includeFunc {
|
||||
return func(key string) bool {
|
||||
return key != except
|
||||
}
|
||||
}
|
||||
|
||||
// etcdWatcher converts a native etcd watch to a watch.Interface.
|
||||
type etcdWatcher struct {
|
||||
// HighWaterMarks for performance debugging.
|
||||
// Important: Since HighWaterMark is using sync/atomic, it has to be at the top of the struct due to a bug on 32-bit platforms
|
||||
// See: https://golang.org/pkg/sync/atomic/ for more information
|
||||
incomingHWM storage.HighWaterMark
|
||||
outgoingHWM storage.HighWaterMark
|
||||
|
||||
encoding runtime.Codec
|
||||
// Note that versioner is required for etcdWatcher to work correctly.
|
||||
// There is no public constructor of it, so be careful when manipulating
|
||||
// with it manually.
|
||||
versioner storage.Versioner
|
||||
transform TransformFunc
|
||||
valueTransformer ValueTransformer
|
||||
|
||||
list bool // If we're doing a recursive watch, should be true.
|
||||
quorum bool // If we enable quorum, should be true
|
||||
include includeFunc
|
||||
pred storage.SelectionPredicate
|
||||
|
||||
etcdIncoming chan *etcd.Response
|
||||
etcdError chan error
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
etcdCallEnded chan struct{}
|
||||
|
||||
outgoing chan watch.Event
|
||||
userStop chan struct{}
|
||||
stopped bool
|
||||
stopLock sync.Mutex
|
||||
// wg is used to avoid calls to etcd after Stop(), and to make sure
|
||||
// that the translate goroutine is not leaked.
|
||||
wg sync.WaitGroup
|
||||
|
||||
// Injectable for testing. Send the event down the outgoing channel.
|
||||
emit func(watch.Event)
|
||||
|
||||
cache etcdCache
|
||||
}
|
||||
|
||||
// watchWaitDuration is the amount of time to wait for an error from watch.
|
||||
const watchWaitDuration = 100 * time.Millisecond
|
||||
|
||||
// newEtcdWatcher returns a new etcdWatcher; if list is true, watch sub-nodes.
|
||||
// The versioner must be able to handle the objects that transform creates.
|
||||
func newEtcdWatcher(list bool, quorum bool, include includeFunc, pred storage.SelectionPredicate,
|
||||
encoding runtime.Codec, versioner storage.Versioner, transform TransformFunc,
|
||||
valueTransformer ValueTransformer, cache etcdCache) *etcdWatcher {
|
||||
w := &etcdWatcher{
|
||||
encoding: encoding,
|
||||
versioner: versioner,
|
||||
transform: transform,
|
||||
valueTransformer: valueTransformer,
|
||||
|
||||
list: list,
|
||||
quorum: quorum,
|
||||
include: include,
|
||||
pred: pred,
|
||||
// Buffer this channel, so that the etcd client is not forced
|
||||
// to context switch with every object it gets, and so that a
|
||||
// long time spent decoding an object won't block the *next*
|
||||
// object. Basically, we see a lot of "401 window exceeded"
|
||||
// errors from etcd, and that's due to the client not streaming
|
||||
// results but rather getting them one at a time. So we really
|
||||
// want to never block the etcd client, if possible. The 100 is
|
||||
// mostly arbitrary--we know it goes as high as 50, though.
|
||||
// There's a V(2) log message that prints the length so we can
|
||||
// monitor how much of this buffer is actually used.
|
||||
etcdIncoming: make(chan *etcd.Response, 100),
|
||||
etcdError: make(chan error, 1),
|
||||
// Similarly to etcdIncomming, we don't want to force context
|
||||
// switch on every new incoming object.
|
||||
outgoing: make(chan watch.Event, 100),
|
||||
userStop: make(chan struct{}),
|
||||
stopped: false,
|
||||
wg: sync.WaitGroup{},
|
||||
cache: cache,
|
||||
ctx: nil,
|
||||
cancel: nil,
|
||||
}
|
||||
w.emit = func(e watch.Event) {
|
||||
if curLen := int64(len(w.outgoing)); w.outgoingHWM.Update(curLen) {
|
||||
// Monitor if this gets backed up, and how much.
|
||||
glog.V(1).Infof("watch (%v): %v objects queued in outgoing channel.", reflect.TypeOf(e.Object).String(), curLen)
|
||||
}
|
||||
// Give up on user stop, without this we leak a lot of goroutines in tests.
|
||||
select {
|
||||
case w.outgoing <- e:
|
||||
case <-w.userStop:
|
||||
}
|
||||
}
|
||||
// translate will call done. We need to Add() here because otherwise,
|
||||
// if Stop() gets called before translate gets started, there'd be a
|
||||
// problem.
|
||||
w.wg.Add(1)
|
||||
go w.translate()
|
||||
return w
|
||||
}
|
||||
|
||||
// etcdWatch calls etcd's Watch function, and handles any errors. Meant to be called
|
||||
// as a goroutine.
|
||||
func (w *etcdWatcher) etcdWatch(ctx context.Context, client etcd.KeysAPI, key string, resourceVersion uint64) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer close(w.etcdError)
|
||||
defer close(w.etcdIncoming)
|
||||
|
||||
// All calls to etcd are coming from this function - once it is finished
|
||||
// no other call to etcd should be generated by this watcher.
|
||||
done := func() {}
|
||||
|
||||
// We need to be prepared, that Stop() can be called at any time.
|
||||
// It can potentially also be called, even before this function is called.
|
||||
// If that is the case, we simply skip all the code here.
|
||||
// See #18928 for more details.
|
||||
var watcher etcd.Watcher
|
||||
returned := func() bool {
|
||||
w.stopLock.Lock()
|
||||
defer w.stopLock.Unlock()
|
||||
if w.stopped {
|
||||
// Watcher has already been stopped - don't event initiate it here.
|
||||
return true
|
||||
}
|
||||
w.wg.Add(1)
|
||||
done = w.wg.Done
|
||||
// Perform initialization of watcher under lock - we want to avoid situation when
|
||||
// Stop() is called in the meantime (which in tests can cause etcd termination and
|
||||
// strange behavior here).
|
||||
if resourceVersion == 0 {
|
||||
latest, err := etcdGetInitialWatchState(ctx, client, key, w.list, w.quorum, w.etcdIncoming)
|
||||
if err != nil {
|
||||
w.etcdError <- err
|
||||
return true
|
||||
}
|
||||
resourceVersion = latest
|
||||
}
|
||||
|
||||
opts := etcd.WatcherOptions{
|
||||
Recursive: w.list,
|
||||
AfterIndex: resourceVersion,
|
||||
}
|
||||
watcher = client.Watcher(key, &opts)
|
||||
w.ctx, w.cancel = context.WithCancel(ctx)
|
||||
return false
|
||||
}()
|
||||
defer done()
|
||||
if returned {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
resp, err := watcher.Next(w.ctx)
|
||||
if err != nil {
|
||||
w.etcdError <- err
|
||||
return
|
||||
}
|
||||
w.etcdIncoming <- resp
|
||||
}
|
||||
}
|
||||
|
||||
// etcdGetInitialWatchState turns an etcd Get request into a watch equivalent
|
||||
func etcdGetInitialWatchState(ctx context.Context, client etcd.KeysAPI, key string, recursive bool, quorum bool, incoming chan<- *etcd.Response) (resourceVersion uint64, err error) {
|
||||
opts := etcd.GetOptions{
|
||||
Recursive: recursive,
|
||||
Sort: false,
|
||||
Quorum: quorum,
|
||||
}
|
||||
resp, err := client.Get(ctx, key, &opts)
|
||||
if err != nil {
|
||||
if !etcdutil.IsEtcdNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("watch was unable to retrieve the current index for the provided key (%q): %v", key, err))
|
||||
return resourceVersion, toStorageErr(err, key, 0)
|
||||
}
|
||||
if etcdError, ok := err.(etcd.Error); ok {
|
||||
resourceVersion = etcdError.Index
|
||||
}
|
||||
return resourceVersion, nil
|
||||
}
|
||||
resourceVersion = resp.Index
|
||||
convertRecursiveResponse(resp.Node, resp, incoming)
|
||||
return
|
||||
}
|
||||
|
||||
// convertRecursiveResponse turns a recursive get response from etcd into individual response objects
|
||||
// by copying the original response. This emulates the behavior of a recursive watch.
|
||||
func convertRecursiveResponse(node *etcd.Node, response *etcd.Response, incoming chan<- *etcd.Response) {
|
||||
if node.Dir {
|
||||
for i := range node.Nodes {
|
||||
convertRecursiveResponse(node.Nodes[i], response, incoming)
|
||||
}
|
||||
return
|
||||
}
|
||||
copied := *response
|
||||
copied.Action = "get"
|
||||
copied.Node = node
|
||||
incoming <- &copied
|
||||
}
|
||||
|
||||
// translate pulls stuff from etcd, converts, and pushes out the outgoing channel. Meant to be
|
||||
// called as a goroutine.
|
||||
func (w *etcdWatcher) translate() {
|
||||
defer w.wg.Done()
|
||||
defer close(w.outgoing)
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
for {
|
||||
select {
|
||||
case err := <-w.etcdError:
|
||||
if err != nil {
|
||||
var status *metav1.Status
|
||||
switch {
|
||||
case etcdutil.IsEtcdWatchExpired(err):
|
||||
status = &metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Message: err.Error(),
|
||||
Code: http.StatusGone, // Gone
|
||||
Reason: metav1.StatusReasonExpired,
|
||||
}
|
||||
// TODO: need to generate errors using api/errors which has a circular dependency on this package
|
||||
// no other way to inject errors
|
||||
// case etcdutil.IsEtcdUnreachable(err):
|
||||
// status = errors.NewServerTimeout(...)
|
||||
default:
|
||||
status = &metav1.Status{
|
||||
Status: metav1.StatusFailure,
|
||||
Message: err.Error(),
|
||||
Code: http.StatusInternalServerError,
|
||||
Reason: metav1.StatusReasonInternalError,
|
||||
}
|
||||
}
|
||||
w.emit(watch.Event{
|
||||
Type: watch.Error,
|
||||
Object: status,
|
||||
})
|
||||
}
|
||||
return
|
||||
case <-w.userStop:
|
||||
return
|
||||
case res, ok := <-w.etcdIncoming:
|
||||
if ok {
|
||||
if curLen := int64(len(w.etcdIncoming)); w.incomingHWM.Update(curLen) {
|
||||
// Monitor if this gets backed up, and how much.
|
||||
glog.V(1).Infof("watch: %v objects queued in incoming channel.", curLen)
|
||||
}
|
||||
w.sendResult(res)
|
||||
}
|
||||
// If !ok, don't return here-- must wait for etcdError channel
|
||||
// to give an error or be closed.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// decodeObject extracts an object from the provided etcd node or returns an error.
|
||||
func (w *etcdWatcher) decodeObject(node *etcd.Node) (runtime.Object, error) {
|
||||
if obj, found := w.cache.getFromCache(node.ModifiedIndex, storage.Everything); found {
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
body, _, err := w.valueTransformer.TransformStringFromStorage(node.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj, err := runtime.Decode(w.encoding, []byte(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ensure resource version is set on the object we load from etcd
|
||||
if err := w.versioner.UpdateObject(obj, node.ModifiedIndex); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", node.ModifiedIndex, obj, err))
|
||||
}
|
||||
|
||||
// perform any necessary transformation
|
||||
if w.transform != nil {
|
||||
obj, err = w.transform(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failure to transform api object %#v: %v", obj, err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if node.ModifiedIndex != 0 {
|
||||
w.cache.addToCache(node.ModifiedIndex, obj)
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (w *etcdWatcher) sendAdd(res *etcd.Response) {
|
||||
if res.Node == nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unexpected nil node: %#v", res))
|
||||
return
|
||||
}
|
||||
if w.include != nil && !w.include(res.Node.Key) {
|
||||
return
|
||||
}
|
||||
obj, err := w.decodeObject(res.Node)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failure to decode api object: %v\n'%v' from %#v %#v", err, string(res.Node.Value), res, res.Node))
|
||||
// TODO: expose an error through watch.Interface?
|
||||
// Ignore this value. If we stop the watch on a bad value, a client that uses
|
||||
// the resourceVersion to resume will never be able to get past a bad value.
|
||||
return
|
||||
}
|
||||
if matched, err := w.pred.Matches(obj); err != nil || !matched {
|
||||
return
|
||||
}
|
||||
action := watch.Added
|
||||
w.emit(watch.Event{
|
||||
Type: action,
|
||||
Object: obj,
|
||||
})
|
||||
}
|
||||
|
||||
func (w *etcdWatcher) sendModify(res *etcd.Response) {
|
||||
if res.Node == nil {
|
||||
glog.Errorf("unexpected nil node: %#v", res)
|
||||
return
|
||||
}
|
||||
if w.include != nil && !w.include(res.Node.Key) {
|
||||
return
|
||||
}
|
||||
curObj, err := w.decodeObject(res.Node)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failure to decode api object: %v\n'%v' from %#v %#v", err, string(res.Node.Value), res, res.Node))
|
||||
// TODO: expose an error through watch.Interface?
|
||||
// Ignore this value. If we stop the watch on a bad value, a client that uses
|
||||
// the resourceVersion to resume will never be able to get past a bad value.
|
||||
return
|
||||
}
|
||||
curObjPasses := false
|
||||
if matched, err := w.pred.Matches(curObj); err == nil && matched {
|
||||
curObjPasses = true
|
||||
}
|
||||
oldObjPasses := false
|
||||
var oldObj runtime.Object
|
||||
if res.PrevNode != nil && res.PrevNode.Value != "" {
|
||||
// Ignore problems reading the old object.
|
||||
if oldObj, err = w.decodeObject(res.PrevNode); err == nil {
|
||||
if err := w.versioner.UpdateObject(oldObj, res.Node.ModifiedIndex); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failure to version api object (%d) %#v: %v", res.Node.ModifiedIndex, oldObj, err))
|
||||
}
|
||||
if matched, err := w.pred.Matches(oldObj); err == nil && matched {
|
||||
oldObjPasses = true
|
||||
}
|
||||
}
|
||||
}
|
||||
// Some changes to an object may cause it to start or stop matching a pred.
|
||||
// We need to report those as adds/deletes. So we have to check both the previous
|
||||
// and current value of the object.
|
||||
switch {
|
||||
case curObjPasses && oldObjPasses:
|
||||
w.emit(watch.Event{
|
||||
Type: watch.Modified,
|
||||
Object: curObj,
|
||||
})
|
||||
case curObjPasses && !oldObjPasses:
|
||||
w.emit(watch.Event{
|
||||
Type: watch.Added,
|
||||
Object: curObj,
|
||||
})
|
||||
case !curObjPasses && oldObjPasses:
|
||||
w.emit(watch.Event{
|
||||
Type: watch.Deleted,
|
||||
Object: oldObj,
|
||||
})
|
||||
}
|
||||
// Do nothing if neither new nor old object passed the pred.
|
||||
}
|
||||
|
||||
func (w *etcdWatcher) sendDelete(res *etcd.Response) {
|
||||
if res.PrevNode == nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unexpected nil prev node: %#v", res))
|
||||
return
|
||||
}
|
||||
if w.include != nil && !w.include(res.PrevNode.Key) {
|
||||
return
|
||||
}
|
||||
node := *res.PrevNode
|
||||
if res.Node != nil {
|
||||
// Note that this sends the *old* object with the etcd index for the time at
|
||||
// which it gets deleted. This will allow users to restart the watch at the right
|
||||
// index.
|
||||
node.ModifiedIndex = res.Node.ModifiedIndex
|
||||
}
|
||||
obj, err := w.decodeObject(&node)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failure to decode api object: %v\nfrom %#v %#v", err, res, res.Node))
|
||||
// TODO: expose an error through watch.Interface?
|
||||
// Ignore this value. If we stop the watch on a bad value, a client that uses
|
||||
// the resourceVersion to resume will never be able to get past a bad value.
|
||||
return
|
||||
}
|
||||
if matched, err := w.pred.Matches(obj); err != nil || !matched {
|
||||
return
|
||||
}
|
||||
w.emit(watch.Event{
|
||||
Type: watch.Deleted,
|
||||
Object: obj,
|
||||
})
|
||||
}
|
||||
|
||||
func (w *etcdWatcher) sendResult(res *etcd.Response) {
|
||||
switch res.Action {
|
||||
case EtcdCreate, EtcdGet:
|
||||
// "Get" will only happen in watch 0 case, where we explicitly want ADDED event
|
||||
// for initial state.
|
||||
w.sendAdd(res)
|
||||
case EtcdSet, EtcdCAS:
|
||||
w.sendModify(res)
|
||||
case EtcdDelete, EtcdExpire, EtcdCAD:
|
||||
w.sendDelete(res)
|
||||
default:
|
||||
utilruntime.HandleError(fmt.Errorf("unknown action: %v", res.Action))
|
||||
}
|
||||
}
|
||||
|
||||
// ResultChan implements watch.Interface.
|
||||
func (w *etcdWatcher) ResultChan() <-chan watch.Event {
|
||||
return w.outgoing
|
||||
}
|
||||
|
||||
// Stop implements watch.Interface.
|
||||
func (w *etcdWatcher) Stop() {
|
||||
w.stopLock.Lock()
|
||||
if w.cancel != nil {
|
||||
w.cancel()
|
||||
w.cancel = nil
|
||||
}
|
||||
if !w.stopped {
|
||||
w.stopped = true
|
||||
close(w.userStop)
|
||||
}
|
||||
w.stopLock.Unlock()
|
||||
|
||||
// Wait until all calls to etcd are finished and no other
|
||||
// will be issued.
|
||||
w.wg.Wait()
|
||||
}
|
||||
105
vendor/k8s.io/apiserver/pkg/storage/etcd/metrics/metrics.go
generated
vendored
105
vendor/k8s.io/apiserver/pkg/storage/etcd/metrics/metrics.go
generated
vendored
|
|
@ -25,37 +25,37 @@ import (
|
|||
|
||||
var (
|
||||
cacheHitCounterOpts = prometheus.CounterOpts{
|
||||
Name: "etcd_helper_cache_hit_count",
|
||||
Name: "etcd_helper_cache_hit_total",
|
||||
Help: "Counter of etcd helper cache hits.",
|
||||
}
|
||||
cacheHitCounter = prometheus.NewCounter(cacheHitCounterOpts)
|
||||
cacheMissCounterOpts = prometheus.CounterOpts{
|
||||
Name: "etcd_helper_cache_miss_count",
|
||||
Name: "etcd_helper_cache_miss_total",
|
||||
Help: "Counter of etcd helper cache miss.",
|
||||
}
|
||||
cacheMissCounter = prometheus.NewCounter(cacheMissCounterOpts)
|
||||
cacheEntryCounterOpts = prometheus.CounterOpts{
|
||||
Name: "etcd_helper_cache_entry_count",
|
||||
Name: "etcd_helper_cache_entry_total",
|
||||
Help: "Counter of etcd helper cache entries. This can be different from etcd_helper_cache_miss_count " +
|
||||
"because two concurrent threads can miss the cache and generate the same entry twice.",
|
||||
}
|
||||
cacheEntryCounter = prometheus.NewCounter(cacheEntryCounterOpts)
|
||||
cacheGetLatency = prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "etcd_request_cache_get_latencies_summary",
|
||||
Help: "Latency in microseconds of getting an object from etcd cache",
|
||||
cacheGetLatency = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "etcd_request_cache_get_duration_seconds",
|
||||
Help: "Latency in seconds of getting an object from etcd cache",
|
||||
},
|
||||
)
|
||||
cacheAddLatency = prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "etcd_request_cache_add_latencies_summary",
|
||||
Help: "Latency in microseconds of adding an object to etcd cache",
|
||||
cacheAddLatency = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "etcd_request_cache_add_duration_seconds",
|
||||
Help: "Latency in seconds of adding an object to etcd cache",
|
||||
},
|
||||
)
|
||||
etcdRequestLatenciesSummary = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "etcd_request_latencies_summary",
|
||||
Help: "Etcd request latency summary in microseconds for each operation and object type.",
|
||||
etcdRequestLatency = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "etcd_request_duration_seconds",
|
||||
Help: "Etcd request latency in seconds for each operation and object type.",
|
||||
},
|
||||
[]string{"operation", "type"},
|
||||
)
|
||||
|
|
@ -66,6 +66,42 @@ var (
|
|||
},
|
||||
[]string{"resource"},
|
||||
)
|
||||
|
||||
deprecatedCacheHitCounterOpts = prometheus.CounterOpts{
|
||||
Name: "etcd_helper_cache_hit_count",
|
||||
Help: "(Deprecated) Counter of etcd helper cache hits.",
|
||||
}
|
||||
deprecatedCacheHitCounter = prometheus.NewCounter(deprecatedCacheHitCounterOpts)
|
||||
deprecatedCacheMissCounterOpts = prometheus.CounterOpts{
|
||||
Name: "etcd_helper_cache_miss_count",
|
||||
Help: "(Deprecated) Counter of etcd helper cache miss.",
|
||||
}
|
||||
deprecatedCacheMissCounter = prometheus.NewCounter(deprecatedCacheMissCounterOpts)
|
||||
deprecatedCacheEntryCounterOpts = prometheus.CounterOpts{
|
||||
Name: "etcd_helper_cache_entry_count",
|
||||
Help: "(Deprecated) Counter of etcd helper cache entries. This can be different from etcd_helper_cache_miss_count " +
|
||||
"because two concurrent threads can miss the cache and generate the same entry twice.",
|
||||
}
|
||||
deprecatedCacheEntryCounter = prometheus.NewCounter(deprecatedCacheEntryCounterOpts)
|
||||
deprecatedCacheGetLatency = prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "etcd_request_cache_get_latencies_summary",
|
||||
Help: "(Deprecated) Latency in microseconds of getting an object from etcd cache",
|
||||
},
|
||||
)
|
||||
deprecatedCacheAddLatency = prometheus.NewSummary(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "etcd_request_cache_add_latencies_summary",
|
||||
Help: "(Deprecated) Latency in microseconds of adding an object to etcd cache",
|
||||
},
|
||||
)
|
||||
deprecatedEtcdRequestLatenciesSummary = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "etcd_request_latencies_summary",
|
||||
Help: "(Deprecated) Etcd request latency summary in microseconds for each operation and object type.",
|
||||
},
|
||||
[]string{"operation", "type"},
|
||||
)
|
||||
)
|
||||
|
||||
var registerMetrics sync.Once
|
||||
|
|
@ -79,8 +115,16 @@ func Register() {
|
|||
prometheus.MustRegister(cacheEntryCounter)
|
||||
prometheus.MustRegister(cacheAddLatency)
|
||||
prometheus.MustRegister(cacheGetLatency)
|
||||
prometheus.MustRegister(etcdRequestLatenciesSummary)
|
||||
prometheus.MustRegister(etcdRequestLatency)
|
||||
prometheus.MustRegister(objectCounts)
|
||||
|
||||
// TODO(danielqsj): Remove the following metrics, they are deprecated
|
||||
prometheus.MustRegister(deprecatedCacheHitCounter)
|
||||
prometheus.MustRegister(deprecatedCacheMissCounter)
|
||||
prometheus.MustRegister(deprecatedCacheEntryCounter)
|
||||
prometheus.MustRegister(deprecatedCacheAddLatency)
|
||||
prometheus.MustRegister(deprecatedCacheGetLatency)
|
||||
prometheus.MustRegister(deprecatedEtcdRequestLatenciesSummary)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
@ -89,27 +133,33 @@ func UpdateObjectCount(resourcePrefix string, count int64) {
|
|||
}
|
||||
|
||||
func RecordEtcdRequestLatency(verb, resource string, startTime time.Time) {
|
||||
etcdRequestLatenciesSummary.WithLabelValues(verb, resource).Observe(float64(time.Since(startTime) / time.Microsecond))
|
||||
etcdRequestLatency.WithLabelValues(verb, resource).Observe(sinceInSeconds(startTime))
|
||||
deprecatedEtcdRequestLatenciesSummary.WithLabelValues(verb, resource).Observe(sinceInMicroseconds(startTime))
|
||||
}
|
||||
|
||||
func ObserveGetCache(startTime time.Time) {
|
||||
cacheGetLatency.Observe(float64(time.Since(startTime) / time.Microsecond))
|
||||
cacheGetLatency.Observe(sinceInSeconds(startTime))
|
||||
deprecatedCacheGetLatency.Observe(sinceInMicroseconds(startTime))
|
||||
}
|
||||
|
||||
func ObserveAddCache(startTime time.Time) {
|
||||
cacheAddLatency.Observe(float64(time.Since(startTime) / time.Microsecond))
|
||||
cacheAddLatency.Observe(sinceInSeconds(startTime))
|
||||
deprecatedCacheAddLatency.Observe(sinceInMicroseconds(startTime))
|
||||
}
|
||||
|
||||
func ObserveCacheHit() {
|
||||
cacheHitCounter.Inc()
|
||||
deprecatedCacheHitCounter.Inc()
|
||||
}
|
||||
|
||||
func ObserveCacheMiss() {
|
||||
cacheMissCounter.Inc()
|
||||
deprecatedCacheMissCounter.Inc()
|
||||
}
|
||||
|
||||
func ObserveNewEntry() {
|
||||
cacheEntryCounter.Inc()
|
||||
deprecatedCacheEntryCounter.Inc()
|
||||
}
|
||||
|
||||
func Reset() {
|
||||
|
|
@ -118,5 +168,20 @@ func Reset() {
|
|||
cacheEntryCounter = prometheus.NewCounter(cacheEntryCounterOpts)
|
||||
// TODO: Reset cacheAddLatency.
|
||||
// TODO: Reset cacheGetLatency.
|
||||
etcdRequestLatenciesSummary.Reset()
|
||||
etcdRequestLatency.Reset()
|
||||
|
||||
deprecatedCacheHitCounter = prometheus.NewCounter(deprecatedCacheHitCounterOpts)
|
||||
deprecatedCacheMissCounter = prometheus.NewCounter(deprecatedCacheMissCounterOpts)
|
||||
deprecatedCacheEntryCounter = prometheus.NewCounter(deprecatedCacheEntryCounterOpts)
|
||||
deprecatedEtcdRequestLatenciesSummary.Reset()
|
||||
}
|
||||
|
||||
// sinceInMicroseconds gets the time since the specified start in microseconds.
|
||||
func sinceInMicroseconds(start time.Time) float64 {
|
||||
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
|
||||
}
|
||||
|
||||
// sinceInSeconds gets the time since the specified start in seconds.
|
||||
func sinceInSeconds(start time.Time) float64 {
|
||||
return time.Since(start).Seconds()
|
||||
}
|
||||
|
|
|
|||
19
vendor/k8s.io/apiserver/pkg/storage/etcd/util/doc.go
generated
vendored
19
vendor/k8s.io/apiserver/pkg/storage/etcd/util/doc.go
generated
vendored
|
|
@ -1,19 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package util holds generic etcd-related utility functions that any user of ectd might want to
|
||||
// use, without pulling in kubernetes-specific code.
|
||||
package util // import "k8s.io/apiserver/pkg/storage/etcd/util"
|
||||
99
vendor/k8s.io/apiserver/pkg/storage/etcd/util/etcd_util.go
generated
vendored
99
vendor/k8s.io/apiserver/pkg/storage/etcd/util/etcd_util.go
generated
vendored
|
|
@ -1,99 +0,0 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
etcd "github.com/coreos/etcd/client"
|
||||
)
|
||||
|
||||
// IsEtcdNotFound returns true if and only if err is an etcd not found error.
|
||||
func IsEtcdNotFound(err error) bool {
|
||||
return isEtcdErrorNum(err, etcd.ErrorCodeKeyNotFound)
|
||||
}
|
||||
|
||||
// IsEtcdNodeExist returns true if and only if err is an etcd node already exist error.
|
||||
func IsEtcdNodeExist(err error) bool {
|
||||
return isEtcdErrorNum(err, etcd.ErrorCodeNodeExist)
|
||||
}
|
||||
|
||||
// IsEtcdTestFailed returns true if and only if err is an etcd write conflict.
|
||||
func IsEtcdTestFailed(err error) bool {
|
||||
return isEtcdErrorNum(err, etcd.ErrorCodeTestFailed)
|
||||
}
|
||||
|
||||
// IsEtcdWatchExpired returns true if and only if err indicates the watch has expired.
|
||||
func IsEtcdWatchExpired(err error) bool {
|
||||
// NOTE: This seems weird why it wouldn't be etcd.ErrorCodeWatcherCleared
|
||||
// I'm using the previous matching value
|
||||
return isEtcdErrorNum(err, etcd.ErrorCodeEventIndexCleared)
|
||||
}
|
||||
|
||||
// IsEtcdUnreachable returns true if and only if err indicates the server could not be reached.
|
||||
func IsEtcdUnreachable(err error) bool {
|
||||
// NOTE: The logic has changed previous error code no longer applies
|
||||
return err == etcd.ErrClusterUnavailable
|
||||
}
|
||||
|
||||
// isEtcdErrorNum returns true if and only if err is an etcd error, whose errorCode matches errorCode
|
||||
func isEtcdErrorNum(err error, errorCode int) bool {
|
||||
if err != nil {
|
||||
if etcdError, ok := err.(etcd.Error); ok {
|
||||
return etcdError.Code == errorCode
|
||||
}
|
||||
// NOTE: There are other error types returned
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetEtcdVersion performs a version check against the provided Etcd server,
|
||||
// returning the string response, and error (if any).
|
||||
func GetEtcdVersion(host string) (string, error) {
|
||||
response, err := http.Get(host + "/version")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
if response.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("unsuccessful response from etcd server %q: %v", host, err)
|
||||
}
|
||||
versionBytes, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(versionBytes), nil
|
||||
}
|
||||
|
||||
type etcdHealth struct {
|
||||
// Note this has to be public so the json library can modify it.
|
||||
Health string `json:"health"`
|
||||
}
|
||||
|
||||
func EtcdHealthCheck(data []byte) error {
|
||||
obj := etcdHealth{}
|
||||
if err := json.Unmarshal(data, &obj); err != nil {
|
||||
return err
|
||||
}
|
||||
if obj.Health != "true" {
|
||||
return fmt.Errorf("Unhealthy status: %s", obj.Health)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
2
vendor/k8s.io/apiserver/pkg/storage/etcd3/OWNERS
generated
vendored
Executable file → Normal file
2
vendor/k8s.io/apiserver/pkg/storage/etcd3/OWNERS
generated
vendored
Executable file → Normal file
|
|
@ -1,3 +1,5 @@
|
|||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- wojtek-t
|
||||
- timothysc
|
||||
|
|
|
|||
8
vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go
generated
vendored
8
vendor/k8s.io/apiserver/pkg/storage/etcd3/compact.go
generated
vendored
|
|
@ -23,7 +23,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -51,7 +51,7 @@ func StartCompactor(ctx context.Context, client *clientv3.Client, compactInterva
|
|||
// Currently we rely on endpoints to differentiate clusters.
|
||||
for _, ep := range client.Endpoints() {
|
||||
if _, ok := endpointsMap[ep]; ok {
|
||||
glog.V(4).Infof("compactor already exists for endpoints %v", client.Endpoints())
|
||||
klog.V(4).Infof("compactor already exists for endpoints %v", client.Endpoints())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -121,7 +121,7 @@ func compactor(ctx context.Context, client *clientv3.Client, interval time.Durat
|
|||
|
||||
compactTime, rev, err = compact(ctx, client, compactTime, rev)
|
||||
if err != nil {
|
||||
glog.Errorf("etcd: endpoint (%v) compact failed: %v", client.Endpoints(), err)
|
||||
klog.Errorf("etcd: endpoint (%v) compact failed: %v", client.Endpoints(), err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
|
@ -157,6 +157,6 @@ func compact(ctx context.Context, client *clientv3.Client, t, rev int64) (int64,
|
|||
if _, err = client.Compact(ctx, rev); err != nil {
|
||||
return curTime, curRev, err
|
||||
}
|
||||
glog.V(4).Infof("etcd: compacted rev (%d), endpoints (%v)", rev, client.Endpoints())
|
||||
klog.V(4).Infof("etcd: compacted rev (%d), endpoints (%v)", rev, client.Endpoints())
|
||||
return curTime, curRev, nil
|
||||
}
|
||||
|
|
|
|||
35
vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go
generated
vendored
35
vendor/k8s.io/apiserver/pkg/storage/etcd3/errors.go
generated
vendored
|
|
@ -20,6 +20,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
etcdrpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
func interpretWatchError(err error) error {
|
||||
|
|
@ -30,13 +31,41 @@ func interpretWatchError(err error) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func interpretListError(err error, paging bool) error {
|
||||
const (
|
||||
expired string = "The resourceVersion for the provided list is too old."
|
||||
continueExpired string = "The provided continue parameter is too old " +
|
||||
"to display a consistent list result. You can start a new list without " +
|
||||
"the continue parameter."
|
||||
inconsistentContinue string = "The provided continue parameter is too old " +
|
||||
"to display a consistent list result. You can start a new list without " +
|
||||
"the continue parameter, or use the continue token in this response to " +
|
||||
"retrieve the remainder of the results. Continuing with the provided " +
|
||||
"token results in an inconsistent list - objects that were created, " +
|
||||
"modified, or deleted between the time the first chunk was returned " +
|
||||
"and now may show up in the list."
|
||||
)
|
||||
|
||||
func interpretListError(err error, paging bool, continueKey, keyPrefix string) error {
|
||||
switch {
|
||||
case err == etcdrpc.ErrCompacted:
|
||||
if paging {
|
||||
return errors.NewResourceExpired("The provided from parameter is too old to display a consistent list result. You must start a new list without the from.")
|
||||
return handleCompactedErrorForPaging(continueKey, keyPrefix)
|
||||
}
|
||||
return errors.NewResourceExpired("The resourceVersion for the provided list is too old.")
|
||||
return errors.NewResourceExpired(expired)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func handleCompactedErrorForPaging(continueKey, keyPrefix string) error {
|
||||
// continueToken.ResoureVersion=-1 means that the apiserver can
|
||||
// continue the list at the latest resource version. We don't use rv=0
|
||||
// for this purpose to distinguish from a bad token that has empty rv.
|
||||
newToken, err := encodeContinue(continueKey, keyPrefix, -1)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return errors.NewResourceExpired(continueExpired)
|
||||
}
|
||||
statusError := errors.NewResourceExpired(inconsistentContinue)
|
||||
statusError.ErrStatus.ListMeta.Continue = newToken
|
||||
return statusError
|
||||
}
|
||||
|
|
|
|||
2
vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go
generated
vendored
2
vendor/k8s.io/apiserver/pkg/storage/etcd3/lease_manager.go
generated
vendored
|
|
@ -51,7 +51,7 @@ func newDefaultLeaseManager(client *clientv3.Client) *leaseManager {
|
|||
// value x means x*100%.
|
||||
func newLeaseManager(client *clientv3.Client, leaseReuseDurationSeconds int64, leaseReuseDurationPercent float64) *leaseManager {
|
||||
return &leaseManager{
|
||||
client: client,
|
||||
client: client,
|
||||
leaseReuseDurationSeconds: leaseReuseDurationSeconds,
|
||||
leaseReuseDurationPercent: leaseReuseDurationPercent,
|
||||
}
|
||||
|
|
|
|||
84
vendor/k8s.io/apiserver/pkg/storage/etcd3/logger.go
generated
vendored
Normal file
84
vendor/k8s.io/apiserver/pkg/storage/etcd3/logger.go
generated
vendored
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package etcd3
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
clientv3.SetLogger(klogWrapper{})
|
||||
}
|
||||
|
||||
type klogWrapper struct{}
|
||||
|
||||
const klogWrapperDepth = 4
|
||||
|
||||
func (klogWrapper) Info(args ...interface{}) {
|
||||
klog.InfoDepth(klogWrapperDepth, args...)
|
||||
}
|
||||
|
||||
func (klogWrapper) Infoln(args ...interface{}) {
|
||||
klog.InfoDepth(klogWrapperDepth, fmt.Sprintln(args...))
|
||||
}
|
||||
|
||||
func (klogWrapper) Infof(format string, args ...interface{}) {
|
||||
klog.InfoDepth(klogWrapperDepth, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (klogWrapper) Warning(args ...interface{}) {
|
||||
klog.WarningDepth(klogWrapperDepth, args...)
|
||||
}
|
||||
|
||||
func (klogWrapper) Warningln(args ...interface{}) {
|
||||
klog.WarningDepth(klogWrapperDepth, fmt.Sprintln(args...))
|
||||
}
|
||||
|
||||
func (klogWrapper) Warningf(format string, args ...interface{}) {
|
||||
klog.WarningDepth(klogWrapperDepth, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (klogWrapper) Error(args ...interface{}) {
|
||||
klog.ErrorDepth(klogWrapperDepth, args...)
|
||||
}
|
||||
|
||||
func (klogWrapper) Errorln(args ...interface{}) {
|
||||
klog.ErrorDepth(klogWrapperDepth, fmt.Sprintln(args...))
|
||||
}
|
||||
|
||||
func (klogWrapper) Errorf(format string, args ...interface{}) {
|
||||
klog.ErrorDepth(klogWrapperDepth, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (klogWrapper) Fatal(args ...interface{}) {
|
||||
klog.FatalDepth(klogWrapperDepth, args...)
|
||||
}
|
||||
|
||||
func (klogWrapper) Fatalln(args ...interface{}) {
|
||||
klog.FatalDepth(klogWrapperDepth, fmt.Sprintln(args...))
|
||||
}
|
||||
|
||||
func (klogWrapper) Fatalf(format string, args ...interface{}) {
|
||||
klog.FatalDepth(klogWrapperDepth, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (klogWrapper) V(l int) bool {
|
||||
return bool(klog.V(klog.Level(l)))
|
||||
}
|
||||
70
vendor/k8s.io/apiserver/pkg/storage/etcd3/preflight/checks.go
generated
vendored
70
vendor/k8s.io/apiserver/pkg/storage/etcd3/preflight/checks.go
generated
vendored
|
|
@ -1,70 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package preflight
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
const connectionTimeout = 1 * time.Second
|
||||
|
||||
// EtcdConnection holds the Etcd server list
|
||||
type EtcdConnection struct {
|
||||
ServerList []string
|
||||
}
|
||||
|
||||
func (EtcdConnection) serverReachable(connURL *url.URL) bool {
|
||||
scheme := connURL.Scheme
|
||||
if scheme == "http" || scheme == "https" || scheme == "tcp" {
|
||||
scheme = "tcp"
|
||||
}
|
||||
if conn, err := net.DialTimeout(scheme, connURL.Host, connectionTimeout); err == nil {
|
||||
defer conn.Close()
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func parseServerURI(serverURI string) (*url.URL, error) {
|
||||
connURL, err := url.Parse(serverURI)
|
||||
if err != nil {
|
||||
return &url.URL{}, fmt.Errorf("unable to parse etcd url: %v", err)
|
||||
}
|
||||
return connURL, nil
|
||||
}
|
||||
|
||||
// CheckEtcdServers will attempt to reach all etcd servers once. If any
|
||||
// can be reached, return true.
|
||||
func (con EtcdConnection) CheckEtcdServers() (done bool, err error) {
|
||||
// Attempt to reach every Etcd server randomly.
|
||||
serverNumber := len(con.ServerList)
|
||||
serverPerms := rand.Perm(serverNumber)
|
||||
for _, index := range serverPerms {
|
||||
host, err := parseServerURI(con.ServerList[index])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if con.serverReachable(host) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
74
vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go
generated
vendored
74
vendor/k8s.io/apiserver/pkg/storage/etcd3/store.go
generated
vendored
|
|
@ -29,18 +29,17 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
"k8s.io/apiserver/pkg/storage/etcd"
|
||||
"k8s.io/apiserver/pkg/storage/value"
|
||||
utiltrace "k8s.io/apiserver/pkg/util/trace"
|
||||
utiltrace "k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
// authenticatedDataString satisfies the value.Context interface. It uses the key to
|
||||
|
|
@ -73,11 +72,6 @@ type store struct {
|
|||
leaseManager *leaseManager
|
||||
}
|
||||
|
||||
type elemForDecode struct {
|
||||
data []byte
|
||||
rev uint64
|
||||
}
|
||||
|
||||
type objState struct {
|
||||
obj runtime.Object
|
||||
meta *storage.ResponseMeta
|
||||
|
|
@ -88,16 +82,10 @@ type objState struct {
|
|||
|
||||
// New returns an etcd3 implementation of storage.Interface.
|
||||
func New(c *clientv3.Client, codec runtime.Codec, prefix string, transformer value.Transformer, pagingEnabled bool) storage.Interface {
|
||||
return newStore(c, true, pagingEnabled, codec, prefix, transformer)
|
||||
return newStore(c, pagingEnabled, codec, prefix, transformer)
|
||||
}
|
||||
|
||||
// NewWithNoQuorumRead returns etcd3 implementation of storage.Interface
|
||||
// where Get operations don't require quorum read.
|
||||
func NewWithNoQuorumRead(c *clientv3.Client, codec runtime.Codec, prefix string, transformer value.Transformer, pagingEnabled bool) storage.Interface {
|
||||
return newStore(c, false, pagingEnabled, codec, prefix, transformer)
|
||||
}
|
||||
|
||||
func newStore(c *clientv3.Client, quorumRead, pagingEnabled bool, codec runtime.Codec, prefix string, transformer value.Transformer) *store {
|
||||
func newStore(c *clientv3.Client, pagingEnabled bool, codec runtime.Codec, prefix string, transformer value.Transformer) *store {
|
||||
versioner := etcd.APIObjectVersioner{}
|
||||
result := &store{
|
||||
client: c,
|
||||
|
|
@ -112,11 +100,6 @@ func newStore(c *clientv3.Client, quorumRead, pagingEnabled bool, codec runtime.
|
|||
watcher: newWatcher(c, codec, versioner, transformer),
|
||||
leaseManager: newDefaultLeaseManager(c),
|
||||
}
|
||||
if !quorumRead {
|
||||
// In case of non-quorum reads, we can set WithSerializable()
|
||||
// options for all Get operations.
|
||||
result.getOps = append(result.getOps, clientv3.WithSerializable())
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
|
|
@ -238,7 +221,7 @@ func (s *store) conditionalDelete(ctx context.Context, key string, out runtime.O
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkPreconditions(key, preconditions, origState.obj); err != nil {
|
||||
if err := preconditions.Check(key, origState.obj); err != nil {
|
||||
return err
|
||||
}
|
||||
txnResp, err := s.client.KV.Txn(ctx).If(
|
||||
|
|
@ -253,7 +236,7 @@ func (s *store) conditionalDelete(ctx context.Context, key string, out runtime.O
|
|||
}
|
||||
if !txnResp.Succeeded {
|
||||
getResp = (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
|
||||
glog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key)
|
||||
klog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key)
|
||||
continue
|
||||
}
|
||||
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
|
||||
|
|
@ -299,7 +282,7 @@ func (s *store) GuaranteedUpdate(
|
|||
|
||||
transformContext := authenticatedDataString(key)
|
||||
for {
|
||||
if err := checkPreconditions(key, preconditions, origState.obj); err != nil {
|
||||
if err := preconditions.Check(key, origState.obj); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -369,7 +352,7 @@ func (s *store) GuaranteedUpdate(
|
|||
trace.Step("Transaction committed")
|
||||
if !txnResp.Succeeded {
|
||||
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
|
||||
glog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key)
|
||||
klog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key)
|
||||
origState, err = s.getState(getResp, key, v, ignoreNotFound)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -513,10 +496,11 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor
|
|||
options = append(options, clientv3.WithLimit(pred.Limit))
|
||||
}
|
||||
|
||||
var returnedRV int64
|
||||
var returnedRV, continueRV int64
|
||||
var continueKey string
|
||||
switch {
|
||||
case s.pagingEnabled && len(pred.Continue) > 0:
|
||||
continueKey, continueRV, err := decodeContinue(pred.Continue, keyPrefix)
|
||||
continueKey, continueRV, err = decodeContinue(pred.Continue, keyPrefix)
|
||||
if err != nil {
|
||||
return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err))
|
||||
}
|
||||
|
|
@ -529,12 +513,16 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor
|
|||
options = append(options, clientv3.WithRange(rangeEnd))
|
||||
key = continueKey
|
||||
|
||||
options = append(options, clientv3.WithRev(continueRV))
|
||||
returnedRV = continueRV
|
||||
|
||||
// If continueRV > 0, the LIST request needs a specific resource version.
|
||||
// continueRV==0 is invalid.
|
||||
// If continueRV < 0, the request is for the latest resource version.
|
||||
if continueRV > 0 {
|
||||
options = append(options, clientv3.WithRev(continueRV))
|
||||
returnedRV = continueRV
|
||||
}
|
||||
case s.pagingEnabled && pred.Limit > 0:
|
||||
if len(resourceVersion) > 0 {
|
||||
fromRV, err := s.versioner.ParseListResourceVersion(resourceVersion)
|
||||
fromRV, err := s.versioner.ParseResourceVersion(resourceVersion)
|
||||
if err != nil {
|
||||
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
|
||||
}
|
||||
|
|
@ -549,7 +537,7 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor
|
|||
|
||||
default:
|
||||
if len(resourceVersion) > 0 {
|
||||
fromRV, err := s.versioner.ParseListResourceVersion(resourceVersion)
|
||||
fromRV, err := s.versioner.ParseResourceVersion(resourceVersion)
|
||||
if err != nil {
|
||||
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
|
||||
}
|
||||
|
|
@ -568,7 +556,7 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor
|
|||
for {
|
||||
getResp, err := s.client.KV.Get(ctx, key, options...)
|
||||
if err != nil {
|
||||
return interpretListError(err, len(pred.Continue) > 0)
|
||||
return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix)
|
||||
}
|
||||
hasMore = getResp.More
|
||||
|
||||
|
|
@ -594,8 +582,7 @@ func (s *store) List(ctx context.Context, key, resourceVersion string, pred stor
|
|||
|
||||
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key))
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to transform key %q: %v", kv.Key, err))
|
||||
continue
|
||||
return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err)
|
||||
}
|
||||
|
||||
if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner); err != nil {
|
||||
|
|
@ -676,7 +663,7 @@ func (s *store) WatchList(ctx context.Context, key string, resourceVersion strin
|
|||
}
|
||||
|
||||
func (s *store) watch(ctx context.Context, key string, rv string, pred storage.SelectionPredicate, recursive bool) (watch.Interface, error) {
|
||||
rev, err := s.versioner.ParseWatchResourceVersion(rv)
|
||||
rev, err := s.versioner.ParseResourceVersion(rv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -796,21 +783,6 @@ func appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.Selec
|
|||
return nil
|
||||
}
|
||||
|
||||
func checkPreconditions(key string, preconditions *storage.Preconditions, out runtime.Object) error {
|
||||
if preconditions == nil {
|
||||
return nil
|
||||
}
|
||||
objMeta, err := meta.Accessor(out)
|
||||
if err != nil {
|
||||
return storage.NewInternalErrorf("can't enforce preconditions %v on un-introspectable object %v, got error: %v", *preconditions, out, err)
|
||||
}
|
||||
if preconditions.UID != nil && *preconditions.UID != objMeta.GetUID() {
|
||||
errMsg := fmt.Sprintf("Precondition failed: UID in precondition: %v, UID in object meta: %v", *preconditions.UID, objMeta.GetUID())
|
||||
return storage.NewInvalidObjError(key, errMsg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func notFound(key string) clientv3.Cmp {
|
||||
return clientv3.Compare(clientv3.ModRevision(key), "=", 0)
|
||||
}
|
||||
|
|
|
|||
12
vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go
generated
vendored
12
vendor/k8s.io/apiserver/pkg/storage/etcd3/watcher.go
generated
vendored
|
|
@ -32,7 +32,7 @@ import (
|
|||
"k8s.io/apiserver/pkg/storage/value"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -191,7 +191,7 @@ func (wc *watchChan) sync() error {
|
|||
func (wc *watchChan) startWatching(watchClosedCh chan struct{}) {
|
||||
if wc.initialRev == 0 {
|
||||
if err := wc.sync(); err != nil {
|
||||
glog.Errorf("failed to sync with latest state: %v", err)
|
||||
klog.Errorf("failed to sync with latest state: %v", err)
|
||||
wc.sendError(err)
|
||||
return
|
||||
}
|
||||
|
|
@ -205,7 +205,7 @@ func (wc *watchChan) startWatching(watchClosedCh chan struct{}) {
|
|||
if wres.Err() != nil {
|
||||
err := wres.Err()
|
||||
// If there is an error on server (e.g. compaction), the channel will return it before closed.
|
||||
glog.Errorf("watch chan error: %v", err)
|
||||
klog.Errorf("watch chan error: %v", err)
|
||||
wc.sendError(err)
|
||||
return
|
||||
}
|
||||
|
|
@ -232,7 +232,7 @@ func (wc *watchChan) processEvent(wg *sync.WaitGroup) {
|
|||
continue
|
||||
}
|
||||
if len(wc.resultChan) == outgoingBufSize {
|
||||
glog.Warningf("Fast watcher, slow processing. Number of buffered events: %d."+
|
||||
klog.V(3).Infof("Fast watcher, slow processing. Number of buffered events: %d."+
|
||||
"Probably caused by slow dispatching events to watchers", outgoingBufSize)
|
||||
}
|
||||
// If user couldn't receive results fast enough, we also block incoming events from watcher.
|
||||
|
|
@ -265,7 +265,7 @@ func (wc *watchChan) acceptAll() bool {
|
|||
func (wc *watchChan) transform(e *event) (res *watch.Event) {
|
||||
curObj, oldObj, err := wc.prepareObjs(e)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to prepare current and previous objects: %v", err)
|
||||
klog.Errorf("failed to prepare current and previous objects: %v", err)
|
||||
wc.sendError(err)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -339,7 +339,7 @@ func (wc *watchChan) sendError(err error) {
|
|||
|
||||
func (wc *watchChan) sendEvent(e *event) {
|
||||
if len(wc.incomingEventChan) == incomingBufSize {
|
||||
glog.Warningf("Fast watcher, slow processing. Number of buffered events: %d."+
|
||||
klog.V(3).Infof("Fast watcher, slow processing. Number of buffered events: %d."+
|
||||
"Probably caused by slow decoding, user not receiving fast, or other processing logic",
|
||||
incomingBufSize)
|
||||
}
|
||||
|
|
|
|||
46
vendor/k8s.io/apiserver/pkg/storage/interfaces.go
generated
vendored
46
vendor/k8s.io/apiserver/pkg/storage/interfaces.go
generated
vendored
|
|
@ -18,7 +18,9 @@ package storage
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
|
@ -49,16 +51,12 @@ type Versioner interface {
|
|||
// Should return an error if the specified object does not have a persistable version.
|
||||
ObjectResourceVersion(obj runtime.Object) (uint64, error)
|
||||
|
||||
// ParseWatchResourceVersion takes a resource version argument and
|
||||
// converts it to the storage backend we should pass to helper.Watch().
|
||||
// ParseResourceVersion takes a resource version argument and
|
||||
// converts it to the storage backend. For watch we should pass to helper.Watch().
|
||||
// Because resourceVersion is an opaque value, the default watch
|
||||
// behavior for non-zero watch is to watch the next value (if you pass
|
||||
// "1", you will see updates from "2" onwards).
|
||||
ParseWatchResourceVersion(resourceVersion string) (uint64, error)
|
||||
// ParseListResourceVersion takes a resource version argument and
|
||||
// converts it to the storage backend version. Appropriate for
|
||||
// everything that's not intended as an argument for watch.
|
||||
ParseListResourceVersion(resourceVersion string) (uint64, error)
|
||||
ParseResourceVersion(resourceVersion string) (uint64, error)
|
||||
}
|
||||
|
||||
// ResponseMeta contains information about the database metadata that is associated with
|
||||
|
|
@ -88,8 +86,6 @@ type TriggerPublisherFunc func(obj runtime.Object) []MatchValue
|
|||
var Everything = SelectionPredicate{
|
||||
Label: labels.Everything(),
|
||||
Field: fields.Everything(),
|
||||
// TODO: split this into a new top level constant?
|
||||
IncludeUninitialized: true,
|
||||
}
|
||||
|
||||
// Pass an UpdateFunc to Interface.GuaranteedUpdate to make an update
|
||||
|
|
@ -102,6 +98,9 @@ type Preconditions struct {
|
|||
// Specifies the target UID.
|
||||
// +optional
|
||||
UID *types.UID `json:"uid,omitempty"`
|
||||
// Specifies the target ResourceVersion
|
||||
// +optional
|
||||
ResourceVersion *string `json:"resourceVersion,omitempty"`
|
||||
}
|
||||
|
||||
// NewUIDPreconditions returns a Preconditions with UID set.
|
||||
|
|
@ -110,6 +109,35 @@ func NewUIDPreconditions(uid string) *Preconditions {
|
|||
return &Preconditions{UID: &u}
|
||||
}
|
||||
|
||||
func (p *Preconditions) Check(key string, obj runtime.Object) error {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
objMeta, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return NewInternalErrorf(
|
||||
"can't enforce preconditions %v on un-introspectable object %v, got error: %v",
|
||||
*p,
|
||||
obj,
|
||||
err)
|
||||
}
|
||||
if p.UID != nil && *p.UID != objMeta.GetUID() {
|
||||
err := fmt.Sprintf(
|
||||
"Precondition failed: UID in precondition: %v, UID in object meta: %v",
|
||||
*p.UID,
|
||||
objMeta.GetUID())
|
||||
return NewInvalidObjError(key, err)
|
||||
}
|
||||
if p.ResourceVersion != nil && *p.ResourceVersion != objMeta.GetResourceVersion() {
|
||||
err := fmt.Sprintf(
|
||||
"Precondition failed: ResourceVersion in precondition: %v, ResourceVersion in object meta: %v",
|
||||
*p.ResourceVersion,
|
||||
objMeta.GetResourceVersion())
|
||||
return NewInvalidObjError(key, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Interface offers a common interface for object marshaling/unmarshaling operations and
|
||||
// hides all the storage-related operations behind it.
|
||||
type Interface interface {
|
||||
|
|
|
|||
49
vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go
generated
vendored
49
vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go
generated
vendored
|
|
@ -25,59 +25,58 @@ import (
|
|||
|
||||
// AttrFunc returns label and field sets and the uninitialized flag for List or Watch to match.
|
||||
// In any failure to parse given object, it returns error.
|
||||
type AttrFunc func(obj runtime.Object) (labels.Set, fields.Set, bool, error)
|
||||
type AttrFunc func(obj runtime.Object) (labels.Set, fields.Set, error)
|
||||
|
||||
// FieldMutationFunc allows the mutation of the field selection fields. It is mutating to
|
||||
// avoid the extra allocation on this common path
|
||||
type FieldMutationFunc func(obj runtime.Object, fieldSet fields.Set) error
|
||||
|
||||
func DefaultClusterScopedAttr(obj runtime.Object) (labels.Set, fields.Set, bool, error) {
|
||||
func DefaultClusterScopedAttr(obj runtime.Object) (labels.Set, fields.Set, error) {
|
||||
metadata, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
return nil, nil, err
|
||||
}
|
||||
fieldSet := fields.Set{
|
||||
"metadata.name": metadata.GetName(),
|
||||
}
|
||||
|
||||
return labels.Set(metadata.GetLabels()), fieldSet, metadata.GetInitializers() != nil, nil
|
||||
return labels.Set(metadata.GetLabels()), fieldSet, nil
|
||||
}
|
||||
|
||||
func DefaultNamespaceScopedAttr(obj runtime.Object) (labels.Set, fields.Set, bool, error) {
|
||||
func DefaultNamespaceScopedAttr(obj runtime.Object) (labels.Set, fields.Set, error) {
|
||||
metadata, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
return nil, nil, err
|
||||
}
|
||||
fieldSet := fields.Set{
|
||||
"metadata.name": metadata.GetName(),
|
||||
"metadata.namespace": metadata.GetNamespace(),
|
||||
}
|
||||
|
||||
return labels.Set(metadata.GetLabels()), fieldSet, metadata.GetInitializers() != nil, nil
|
||||
return labels.Set(metadata.GetLabels()), fieldSet, nil
|
||||
}
|
||||
|
||||
func (f AttrFunc) WithFieldMutation(fieldMutator FieldMutationFunc) AttrFunc {
|
||||
return func(obj runtime.Object) (labels.Set, fields.Set, bool, error) {
|
||||
labelSet, fieldSet, initialized, err := f(obj)
|
||||
return func(obj runtime.Object) (labels.Set, fields.Set, error) {
|
||||
labelSet, fieldSet, err := f(obj)
|
||||
if err != nil {
|
||||
return nil, nil, false, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := fieldMutator(obj, fieldSet); err != nil {
|
||||
return nil, nil, false, err
|
||||
return nil, nil, err
|
||||
}
|
||||
return labelSet, fieldSet, initialized, nil
|
||||
return labelSet, fieldSet, nil
|
||||
}
|
||||
}
|
||||
|
||||
// SelectionPredicate is used to represent the way to select objects from api storage.
|
||||
type SelectionPredicate struct {
|
||||
Label labels.Selector
|
||||
Field fields.Selector
|
||||
IncludeUninitialized bool
|
||||
GetAttrs AttrFunc
|
||||
IndexFields []string
|
||||
Limit int64
|
||||
Continue string
|
||||
Label labels.Selector
|
||||
Field fields.Selector
|
||||
GetAttrs AttrFunc
|
||||
IndexFields []string
|
||||
Limit int64
|
||||
Continue string
|
||||
}
|
||||
|
||||
// Matches returns true if the given object's labels and fields (as
|
||||
|
|
@ -87,13 +86,10 @@ func (s *SelectionPredicate) Matches(obj runtime.Object) (bool, error) {
|
|||
if s.Empty() {
|
||||
return true, nil
|
||||
}
|
||||
labels, fields, uninitialized, err := s.GetAttrs(obj)
|
||||
labels, fields, err := s.GetAttrs(obj)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !s.IncludeUninitialized && uninitialized {
|
||||
return false, nil
|
||||
}
|
||||
matched := s.Label.Matches(labels)
|
||||
if matched && s.Field != nil {
|
||||
matched = matched && s.Field.Matches(fields)
|
||||
|
|
@ -103,10 +99,7 @@ func (s *SelectionPredicate) Matches(obj runtime.Object) (bool, error) {
|
|||
|
||||
// MatchesObjectAttributes returns true if the given labels and fields
|
||||
// match s.Label and s.Field.
|
||||
func (s *SelectionPredicate) MatchesObjectAttributes(l labels.Set, f fields.Set, uninitialized bool) bool {
|
||||
if !s.IncludeUninitialized && uninitialized {
|
||||
return false
|
||||
}
|
||||
func (s *SelectionPredicate) MatchesObjectAttributes(l labels.Set, f fields.Set) bool {
|
||||
if s.Label.Empty() && s.Field.Empty() {
|
||||
return true
|
||||
}
|
||||
|
|
@ -146,5 +139,5 @@ func (s *SelectionPredicate) MatcherIndex() []MatchValue {
|
|||
|
||||
// Empty returns true if the predicate performs no filtering.
|
||||
func (s *SelectionPredicate) Empty() bool {
|
||||
return s.Label.Empty() && s.Field.Empty() && s.IncludeUninitialized
|
||||
return s.Label.Empty() && s.Field.Empty()
|
||||
}
|
||||
|
|
|
|||
2
vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS
generated
vendored
Executable file → Normal file
2
vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS
generated
vendored
Executable file → Normal file
|
|
@ -1,3 +1,5 @@
|
|||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
|
|
|
|||
35
vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go
generated
vendored
35
vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go
generated
vendored
|
|
@ -25,24 +25,29 @@ import (
|
|||
|
||||
const (
|
||||
StorageTypeUnset = ""
|
||||
StorageTypeETCD2 = "etcd2"
|
||||
StorageTypeETCD3 = "etcd3"
|
||||
|
||||
DefaultCompactInterval = 5 * time.Minute
|
||||
)
|
||||
|
||||
// Config is configuration for creating a storage backend.
|
||||
type Config struct {
|
||||
// Type defines the type of storage backend, e.g. "etcd2", etcd3". Default ("") is "etcd3".
|
||||
Type string
|
||||
// Prefix is the prefix to all keys passed to storage.Interface methods.
|
||||
Prefix string
|
||||
// TransportConfig holds all connection related info, i.e. equal TransportConfig means equal servers we talk to.
|
||||
type TransportConfig struct {
|
||||
// ServerList is the list of storage servers to connect with.
|
||||
ServerList []string
|
||||
// TLS credentials
|
||||
KeyFile string
|
||||
CertFile string
|
||||
CAFile string
|
||||
}
|
||||
|
||||
// Config is configuration for creating a storage backend.
|
||||
type Config struct {
|
||||
// Type defines the type of storage backend. Default ("") is "etcd3".
|
||||
Type string
|
||||
// Prefix is the prefix to all keys passed to storage.Interface methods.
|
||||
Prefix string
|
||||
// Transport holds all connection related info, i.e. equal TransportConfig means equal servers we talk to.
|
||||
Transport TransportConfig
|
||||
// Quorum indicates that whether read operations should be quorum-level consistent.
|
||||
Quorum bool
|
||||
// Paging indicates whether the server implementation should allow paging (if it is
|
||||
|
|
@ -50,31 +55,27 @@ type Config struct {
|
|||
// resource type not wishing to allow paging, and is not intended for end users to
|
||||
// set.
|
||||
Paging bool
|
||||
// DeserializationCacheSize is the size of cache of deserialized objects.
|
||||
// Currently this is only supported in etcd2.
|
||||
// We will drop the cache once using protobuf.
|
||||
DeserializationCacheSize int
|
||||
|
||||
Codec runtime.Codec
|
||||
// EncodeVersioner is the same groupVersioner used to build the
|
||||
// storage encoder. Given a list of kinds the input object might belong
|
||||
// to, the EncodeVersioner outputs the gvk the object will be
|
||||
// converted to before persisted in etcd.
|
||||
EncodeVersioner runtime.GroupVersioner
|
||||
// Transformer allows the value to be transformed prior to persisting into etcd.
|
||||
Transformer value.Transformer
|
||||
|
||||
// CompactionInterval is an interval of requesting compaction from apiserver.
|
||||
// If the value is 0, no compaction will be issued.
|
||||
CompactionInterval time.Duration
|
||||
|
||||
// CountMetricPollPeriod specifies how often should count metric be updated
|
||||
CountMetricPollPeriod time.Duration
|
||||
}
|
||||
|
||||
func NewDefaultConfig(prefix string, codec runtime.Codec) *Config {
|
||||
return &Config{
|
||||
Prefix: prefix,
|
||||
// Default cache size to 0 - if unset, its size will be set based on target
|
||||
// memory usage.
|
||||
DeserializationCacheSize: 0,
|
||||
Prefix: prefix,
|
||||
Codec: codec,
|
||||
CompactionInterval: DefaultCompactInterval,
|
||||
Quorum: true,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
81
vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd2.go
generated
vendored
81
vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd2.go
generated
vendored
|
|
@ -1,81 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package factory
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
etcd2client "github.com/coreos/etcd/client"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
"k8s.io/apiserver/pkg/storage/etcd"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
)
|
||||
|
||||
func newETCD2Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, error) {
|
||||
tr, err := newTransportForETCD2(c.CertFile, c.KeyFile, c.CAFile)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
client, err := newETCD2Client(tr, c.ServerList)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
s := etcd.NewEtcdStorage(client, c.Codec, c.Prefix, c.Quorum, c.DeserializationCacheSize, etcd.IdentityTransformer)
|
||||
return s, tr.CloseIdleConnections, nil
|
||||
}
|
||||
|
||||
func newETCD2Client(tr *http.Transport, serverList []string) (etcd2client.Client, error) {
|
||||
cli, err := etcd2client.New(etcd2client.Config{
|
||||
Endpoints: serverList,
|
||||
Transport: tr,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cli, nil
|
||||
}
|
||||
|
||||
func newTransportForETCD2(certFile, keyFile, caFile string) (*http.Transport, error) {
|
||||
info := transport.TLSInfo{
|
||||
CertFile: certFile,
|
||||
KeyFile: keyFile,
|
||||
CAFile: caFile,
|
||||
}
|
||||
cfg, err := info.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Copied from etcd.DefaultTransport declaration.
|
||||
// TODO: Determine if transport needs optimization
|
||||
tr := utilnet.SetTransportDefaults(&http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
MaxIdleConnsPerHost: 500,
|
||||
TLSClientConfig: cfg,
|
||||
})
|
||||
return tr, nil
|
||||
}
|
||||
147
vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go
generated
vendored
147
vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/etcd3.go
generated
vendored
|
|
@ -18,11 +18,18 @@ package factory
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
"k8s.io/apiserver/pkg/storage/etcd3"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
|
|
@ -39,7 +46,43 @@ const keepaliveTimeout = 10 * time.Second
|
|||
// on heavily loaded arm64 CPUs (issue #64649)
|
||||
const dialTimeout = 20 * time.Second
|
||||
|
||||
func newETCD3Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, error) {
|
||||
func newETCD3HealthCheck(c storagebackend.Config) (func() error, error) {
|
||||
// constructing the etcd v3 client blocks and times out if etcd is not available.
|
||||
// retry in a loop in the background until we successfully create the client, storing the client or error encountered
|
||||
|
||||
clientValue := &atomic.Value{}
|
||||
|
||||
clientErrMsg := &atomic.Value{}
|
||||
clientErrMsg.Store("etcd client connection not yet established")
|
||||
|
||||
go wait.PollUntil(time.Second, func() (bool, error) {
|
||||
client, err := newETCD3Client(c.Transport)
|
||||
if err != nil {
|
||||
clientErrMsg.Store(err.Error())
|
||||
return false, nil
|
||||
}
|
||||
clientValue.Store(client)
|
||||
clientErrMsg.Store("")
|
||||
return true, nil
|
||||
}, wait.NeverStop)
|
||||
|
||||
return func() error {
|
||||
if errMsg := clientErrMsg.Load().(string); len(errMsg) > 0 {
|
||||
return fmt.Errorf(errMsg)
|
||||
}
|
||||
client := clientValue.Load().(*clientv3.Client)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
// See https://github.com/etcd-io/etcd/blob/master/etcdctl/ctlv3/command/ep_command.go#L118
|
||||
_, err := client.Get(ctx, path.Join(c.Prefix, "health"))
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("error getting data from etcd: %v", err)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newETCD3Client(c storagebackend.TransportConfig) (*clientv3.Client, error) {
|
||||
tlsInfo := transport.TLSInfo{
|
||||
CertFile: c.CertFile,
|
||||
KeyFile: c.KeyFile,
|
||||
|
|
@ -47,7 +90,7 @@ func newETCD3Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, e
|
|||
}
|
||||
tlsConfig, err := tlsInfo.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
// NOTE: Client relies on nil tlsConfig
|
||||
// for non-secure connections, update the implicit variable
|
||||
|
|
@ -58,25 +101,103 @@ func newETCD3Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, e
|
|||
DialTimeout: dialTimeout,
|
||||
DialKeepAliveTime: keepaliveTime,
|
||||
DialKeepAliveTimeout: keepaliveTimeout,
|
||||
Endpoints: c.ServerList,
|
||||
TLS: tlsConfig,
|
||||
DialOptions: []grpc.DialOption{
|
||||
grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),
|
||||
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),
|
||||
},
|
||||
Endpoints: c.ServerList,
|
||||
TLS: tlsConfig,
|
||||
}
|
||||
client, err := clientv3.New(cfg)
|
||||
|
||||
return clientv3.New(cfg)
|
||||
}
|
||||
|
||||
type runningCompactor struct {
|
||||
interval time.Duration
|
||||
cancel context.CancelFunc
|
||||
client *clientv3.Client
|
||||
refs int
|
||||
}
|
||||
|
||||
var (
|
||||
lock sync.Mutex
|
||||
compactors = map[string]*runningCompactor{}
|
||||
)
|
||||
|
||||
// startCompactorOnce start one compactor per transport. If the interval get smaller on repeated calls, the
|
||||
// compactor is replaced. A destroy func is returned. If all destroy funcs with the same transport are called,
|
||||
// the compactor is stopped.
|
||||
func startCompactorOnce(c storagebackend.TransportConfig, interval time.Duration) (func(), error) {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
key := fmt.Sprintf("%v", c) // gives: {[server1 server2] keyFile certFile caFile}
|
||||
if compactor, foundBefore := compactors[key]; !foundBefore || compactor.interval > interval {
|
||||
compactorClient, err := newETCD3Client(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if foundBefore {
|
||||
// replace compactor
|
||||
compactor.cancel()
|
||||
compactor.client.Close()
|
||||
} else {
|
||||
// start new compactor
|
||||
compactor = &runningCompactor{}
|
||||
compactors[key] = compactor
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
compactor.interval = interval
|
||||
compactor.cancel = cancel
|
||||
compactor.client = compactorClient
|
||||
|
||||
etcd3.StartCompactor(ctx, compactorClient, interval)
|
||||
}
|
||||
|
||||
compactors[key].refs++
|
||||
|
||||
return func() {
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
compactor := compactors[key]
|
||||
compactor.refs--
|
||||
if compactor.refs == 0 {
|
||||
compactor.cancel()
|
||||
compactor.client.Close()
|
||||
delete(compactors, key)
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newETCD3Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, error) {
|
||||
stopCompactor, err := startCompactorOnce(c.Transport, c.CompactionInterval)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
etcd3.StartCompactor(ctx, client, c.CompactionInterval)
|
||||
|
||||
client, err := newETCD3Client(c.Transport)
|
||||
if err != nil {
|
||||
stopCompactor()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var once sync.Once
|
||||
destroyFunc := func() {
|
||||
cancel()
|
||||
client.Close()
|
||||
// we know that storage destroy funcs are called multiple times (due to reuse in subresources).
|
||||
// Hence, we only destroy once.
|
||||
// TODO: fix duplicated storage destroy calls higher level
|
||||
once.Do(func() {
|
||||
stopCompactor()
|
||||
client.Close()
|
||||
})
|
||||
}
|
||||
transformer := c.Transformer
|
||||
if transformer == nil {
|
||||
transformer = value.IdentityTransformer
|
||||
}
|
||||
if c.Quorum {
|
||||
return etcd3.New(client, c.Codec, c.Prefix, transformer, c.Paging), destroyFunc, nil
|
||||
}
|
||||
return etcd3.NewWithNoQuorumRead(client, c.Codec, c.Prefix, transformer, c.Paging), destroyFunc, nil
|
||||
return etcd3.New(client, c.Codec, c.Prefix, transformer, c.Paging), destroyFunc, nil
|
||||
}
|
||||
|
|
|
|||
20
vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go
generated
vendored
20
vendor/k8s.io/apiserver/pkg/storage/storagebackend/factory/factory.go
generated
vendored
|
|
@ -29,15 +29,23 @@ type DestroyFunc func()
|
|||
// Create creates a storage backend based on given config.
|
||||
func Create(c storagebackend.Config) (storage.Interface, DestroyFunc, error) {
|
||||
switch c.Type {
|
||||
case storagebackend.StorageTypeETCD2:
|
||||
return newETCD2Storage(c)
|
||||
case "etcd2":
|
||||
return nil, nil, fmt.Errorf("%v is no longer a supported storage backend", c.Type)
|
||||
case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD3:
|
||||
// TODO: We have the following features to implement:
|
||||
// - Support secure connection by using key, cert, and CA files.
|
||||
// - Honor "https" scheme to support secure connection in gRPC.
|
||||
// - Support non-quorum read.
|
||||
return newETCD3Storage(c)
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unknown storage type: %s", c.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// CreateHealthCheck creates a healthcheck function based on given config.
|
||||
func CreateHealthCheck(c storagebackend.Config) (func() error, error) {
|
||||
switch c.Type {
|
||||
case "etcd2":
|
||||
return nil, fmt.Errorf("%v is no longer a supported storage backend", c.Type)
|
||||
case storagebackend.StorageTypeUnset, storagebackend.StorageTypeETCD3:
|
||||
return newETCD3HealthCheck(c)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown storage type: %s", c.Type)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
26
vendor/k8s.io/apiserver/pkg/storage/util.go
generated
vendored
26
vendor/k8s.io/apiserver/pkg/storage/util.go
generated
vendored
|
|
@ -18,7 +18,6 @@ package storage
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
|
|
@ -72,31 +71,6 @@ func NoNamespaceKeyFunc(prefix string, obj runtime.Object) (string, error) {
|
|||
return prefix + "/" + name, nil
|
||||
}
|
||||
|
||||
// hasPathPrefix returns true if the string matches pathPrefix exactly, or if is prefixed with pathPrefix at a path segment boundary
|
||||
func hasPathPrefix(s, pathPrefix string) bool {
|
||||
// Short circuit if s doesn't contain the prefix at all
|
||||
if !strings.HasPrefix(s, pathPrefix) {
|
||||
return false
|
||||
}
|
||||
|
||||
pathPrefixLength := len(pathPrefix)
|
||||
|
||||
if len(s) == pathPrefixLength {
|
||||
// Exact match
|
||||
return true
|
||||
}
|
||||
if strings.HasSuffix(pathPrefix, "/") {
|
||||
// pathPrefix already ensured a path segment boundary
|
||||
return true
|
||||
}
|
||||
if s[pathPrefixLength:pathPrefixLength+1] == "/" {
|
||||
// The next character in s is a path segment boundary
|
||||
// Check this instead of normalizing pathPrefix to avoid allocating on every call
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HighWaterMark is a thread-safe object for tracking the maximum value seen
|
||||
// for some quantity.
|
||||
type HighWaterMark int64
|
||||
|
|
|
|||
46
vendor/k8s.io/apiserver/pkg/storage/value/metrics.go
generated
vendored
46
vendor/k8s.io/apiserver/pkg/storage/value/metrics.go
generated
vendored
|
|
@ -30,11 +30,23 @@ const (
|
|||
|
||||
var (
|
||||
transformerLatencies = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "transformation_duration_seconds",
|
||||
Help: "Latencies in seconds of value transformation operations.",
|
||||
// In-process transformations (ex. AES CBC) complete on the order of 20 microseconds. However, when
|
||||
// external KMS is involved latencies may climb into milliseconds.
|
||||
Buckets: prometheus.ExponentialBuckets(5e-6, 2, 14),
|
||||
},
|
||||
[]string{"transformation_type"},
|
||||
)
|
||||
deprecatedTransformerLatencies = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "transformation_latencies_microseconds",
|
||||
Help: "Latencies in microseconds of value transformation operations.",
|
||||
Help: "(Deprecated) Latencies in microseconds of value transformation operations.",
|
||||
// In-process transformations (ex. AES CBC) complete on the order of 20 microseconds. However, when
|
||||
// external KMS is involved latencies may climb into milliseconds.
|
||||
Buckets: prometheus.ExponentialBuckets(5, 2, 14),
|
||||
|
|
@ -61,11 +73,20 @@ var (
|
|||
)
|
||||
|
||||
dataKeyGenerationLatencies = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "data_key_generation_duration_seconds",
|
||||
Help: "Latencies in seconds of data encryption key(DEK) generation operations.",
|
||||
Buckets: prometheus.ExponentialBuckets(5e-6, 2, 14),
|
||||
},
|
||||
)
|
||||
deprecatedDataKeyGenerationLatencies = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "data_key_generation_latencies_microseconds",
|
||||
Help: "Latencies in microseconds of data encryption key(DEK) generation operations.",
|
||||
Help: "(Deprecated) Latencies in microseconds of data encryption key(DEK) generation operations.",
|
||||
Buckets: prometheus.ExponentialBuckets(5, 2, 14),
|
||||
},
|
||||
)
|
||||
|
|
@ -84,9 +105,11 @@ var registerMetrics sync.Once
|
|||
func RegisterMetrics() {
|
||||
registerMetrics.Do(func() {
|
||||
prometheus.MustRegister(transformerLatencies)
|
||||
prometheus.MustRegister(deprecatedTransformerLatencies)
|
||||
prometheus.MustRegister(transformerFailuresTotal)
|
||||
prometheus.MustRegister(envelopeTransformationCacheMissTotal)
|
||||
prometheus.MustRegister(dataKeyGenerationLatencies)
|
||||
prometheus.MustRegister(deprecatedDataKeyGenerationLatencies)
|
||||
prometheus.MustRegister(dataKeyGenerationFailuresTotal)
|
||||
})
|
||||
}
|
||||
|
|
@ -98,8 +121,8 @@ func RecordTransformation(transformationType string, start time.Time, err error)
|
|||
return
|
||||
}
|
||||
|
||||
since := sinceInMicroseconds(start)
|
||||
transformerLatencies.WithLabelValues(transformationType).Observe(float64(since))
|
||||
transformerLatencies.WithLabelValues(transformationType).Observe(sinceInSeconds(start))
|
||||
deprecatedTransformerLatencies.WithLabelValues(transformationType).Observe(sinceInMicroseconds(start))
|
||||
}
|
||||
|
||||
// RecordCacheMiss records a miss on Key Encryption Key(KEK) - call to KMS was required to decrypt KEK.
|
||||
|
|
@ -114,11 +137,16 @@ func RecordDataKeyGeneration(start time.Time, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
since := sinceInMicroseconds(start)
|
||||
dataKeyGenerationLatencies.Observe(float64(since))
|
||||
dataKeyGenerationLatencies.Observe(sinceInSeconds(start))
|
||||
deprecatedDataKeyGenerationLatencies.Observe(sinceInMicroseconds(start))
|
||||
}
|
||||
|
||||
func sinceInMicroseconds(start time.Time) int64 {
|
||||
elapsedNanoseconds := time.Since(start).Nanoseconds()
|
||||
return elapsedNanoseconds / int64(time.Microsecond)
|
||||
// sinceInMicroseconds gets the time since the specified start in microseconds.
|
||||
func sinceInMicroseconds(start time.Time) float64 {
|
||||
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
|
||||
}
|
||||
|
||||
// sinceInSeconds gets the time since the specified start in seconds.
|
||||
func sinceInSeconds(start time.Time) float64 {
|
||||
return time.Since(start).Seconds()
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue