vendor: revendor metrics-server, custom-metrics-apiserver

This commit is contained in:
Sergiusz Urbaniak 2020-10-28 15:52:52 +01:00
parent 752ce84723
commit 523aa52367
1010 changed files with 91458 additions and 29107 deletions

View file

@ -34,6 +34,6 @@ func (ep *errPicker) String() string {
return ep.p.String()
}
func (ep *errPicker) Pick(context.Context, balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
func (ep *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
return nil, nil, ep.err
}

View file

@ -52,7 +52,7 @@ type rrBalanced struct {
func (rb *rrBalanced) String() string { return rb.p.String() }
// Pick is called for every client request.
func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
rb.mu.RLock()
n := len(rb.scs)
rb.mu.RUnlock()

View file

@ -111,7 +111,7 @@ func (e *ResolverGroup) Close() {
}
// Build creates or reuses an etcd resolver for the etcd cluster name identified by the authority part of the target.
func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
if len(target.Authority) < 1 {
return nil, fmt.Errorf("'etcd' target scheme requires non-empty authority identifying etcd cluster being routed to")
}
@ -179,7 +179,7 @@ func epsToAddrs(eps ...string) (addrs []resolver.Address) {
return addrs
}
func (*Resolver) ResolveNow(o resolver.ResolveNowOption) {}
func (*Resolver) ResolveNow(o resolver.ResolveNowOptions) {}
func (r *Resolver) Close() {
es, err := bldr.getResolverGroup(r.endpointID)

View file

@ -37,7 +37,6 @@ import (
"google.golang.org/grpc/codes"
grpccredentials "google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
@ -397,13 +396,6 @@ func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCrede
return creds
}
// WithRequireLeader requires client requests to only succeed
// when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context {
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewOutgoingContext(ctx, md)
}
func newClient(cfg *Config) (*Client, error) {
if cfg == nil {
cfg = &Config{}

64
vendor/go.etcd.io/etcd/clientv3/ctx.go generated vendored Normal file
View file

@ -0,0 +1,64 @@
// Copyright 2020 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clientv3
import (
"context"
"strings"
"go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
"go.etcd.io/etcd/version"
"google.golang.org/grpc/metadata"
)
// WithRequireLeader requires client requests to only succeed
// when the cluster has a leader.
func WithRequireLeader(ctx context.Context) context.Context {
md, ok := metadata.FromOutgoingContext(ctx)
if !ok { // no outgoing metadata ctx key, create one
md = metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewOutgoingContext(ctx, md)
}
copied := md.Copy() // avoid racey updates
// overwrite/add 'hasleader' key/value
metadataSet(copied, rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
return metadata.NewOutgoingContext(ctx, copied)
}
// embeds client version
func withVersion(ctx context.Context) context.Context {
md, ok := metadata.FromOutgoingContext(ctx)
if !ok { // no outgoing metadata ctx key, create one
md = metadata.Pairs(rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
return metadata.NewOutgoingContext(ctx, md)
}
copied := md.Copy() // avoid racey updates
// overwrite/add version key/value
metadataSet(copied, rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
return metadata.NewOutgoingContext(ctx, copied)
}
func metadataGet(md metadata.MD, k string) []string {
k = strings.ToLower(k)
return md[k]
}
func metadataSet(md metadata.MD, k string, vals ...string) {
if len(vals) == 0 {
return
}
k = strings.ToLower(k)
md[k] = vals
}

View file

@ -20,6 +20,7 @@ import (
"io"
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
"go.uber.org/zap"
"google.golang.org/grpc"
)
@ -68,6 +69,7 @@ type Maintenance interface {
}
type maintenance struct {
lg *zap.Logger
dial func(endpoint string) (pb.MaintenanceClient, func(), error)
remote pb.MaintenanceClient
callOpts []grpc.CallOption
@ -75,6 +77,7 @@ type maintenance struct {
func NewMaintenance(c *Client) Maintenance {
api := &maintenance{
lg: c.lg,
dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
conn, err := c.Dial(endpoint)
if err != nil {
@ -93,6 +96,7 @@ func NewMaintenance(c *Client) Maintenance {
func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
api := &maintenance{
lg: c.lg,
dial: func(string) (pb.MaintenanceClient, func(), error) {
return remote, func() {}, nil
},
@ -193,23 +197,32 @@ func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
return nil, toErr(ctx, err)
}
m.lg.Info("opened snapshot stream; downloading")
pr, pw := io.Pipe()
go func() {
for {
resp, err := ss.Recv()
if err != nil {
switch err {
case io.EOF:
m.lg.Info("completed snapshot read; closing")
default:
m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err))
}
pw.CloseWithError(err)
return
}
if resp == nil && err == nil {
break
}
// can "resp == nil && err == nil"
// before we receive snapshot SHA digest?
// No, server sends EOF with an empty response
// after it sends SHA digest at the end
if _, werr := pw.Write(resp.Blob); werr != nil {
pw.CloseWithError(werr)
return
}
}
pw.Close()
}()
return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
}

View file

@ -38,6 +38,7 @@ import (
func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor {
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
ctx = withVersion(ctx)
grpcOpts, retryOpts := filterCallOptions(opts)
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
// short circuit for simplicity, and avoiding allocations.
@ -103,6 +104,7 @@ func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOpt
func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor {
intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
ctx = withVersion(ctx)
grpcOpts, retryOpts := filterCallOptions(opts)
callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
// short circuit for simplicity, and avoiding allocations.
@ -113,10 +115,9 @@ func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOp
return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
}
newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
logger.Warn("retry stream intercept", zap.Error(err))
if err != nil {
// TODO(mwitkow): Maybe dial and transport errors should be retriable?
return nil, err
logger.Error("streamer failed to create ClientStream", zap.Error(err))
return nil, err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
}
retryingStreamer := &serverStreamingRetryingStream{
client: c,
@ -185,6 +186,7 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
if !attemptRetry {
return lastErr // success or hard failure
}
// We start off from attempt 1, because zeroth was already made on normal SendMsg().
for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
@ -192,12 +194,13 @@ func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
}
newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
if err != nil {
// TODO(mwitkow): Maybe dial and transport errors should be retriable?
return err
s.client.lg.Error("failed reestablishStreamAndResendBuffer", zap.Error(err))
return err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
}
s.setStream(newStream)
s.client.lg.Warn("retrying RecvMsg", zap.Error(lastErr))
attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
//fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr)
if !attemptRetry {
return lastErr
}

View file

@ -25,6 +25,7 @@ import (
pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
mvccpb "go.etcd.io/etcd/mvcc/mvccpb"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
@ -140,6 +141,7 @@ type watcher struct {
// streams holds all the active grpc streams keyed by ctx value.
streams map[string]*watchGrpcStream
lg *zap.Logger
}
// watchGrpcStream tracks all watch resources attached to a single grpc stream.
@ -176,6 +178,8 @@ type watchGrpcStream struct {
resumec chan struct{}
// closeErr is the error that closed the watch stream
closeErr error
lg *zap.Logger
}
// watchStreamRequest is a union of the supported watch request operation types
@ -242,6 +246,7 @@ func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
}
if c != nil {
w.callOpts = c.callOpts
w.lg = c.lg
}
return w
}
@ -273,6 +278,7 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
errc: make(chan error, 1),
closingc: make(chan *watcherStream),
resumec: make(chan struct{}),
lg: w.lg,
}
go wgs.run()
return wgs
@ -544,10 +550,18 @@ func (w *watchGrpcStream) run() {
w.resuming = append(w.resuming, ws)
if len(w.resuming) == 1 {
// head of resume queue, can register a new watcher
wc.Send(ws.initReq.toPB())
if err := wc.Send(ws.initReq.toPB()); err != nil {
if w.lg != nil {
w.lg.Debug("error when sending request", zap.Error(err))
}
}
}
case *progressRequest:
wc.Send(wreq.toPB())
if err := wc.Send(wreq.toPB()); err != nil {
if w.lg != nil {
w.lg.Debug("error when sending request", zap.Error(err))
}
}
}
// new events from the watch client
@ -571,7 +585,11 @@ func (w *watchGrpcStream) run() {
}
if ws := w.nextResume(); ws != nil {
wc.Send(ws.initReq.toPB())
if err := wc.Send(ws.initReq.toPB()); err != nil {
if w.lg != nil {
w.lg.Debug("error when sending request", zap.Error(err))
}
}
}
// reset for next iteration
@ -616,7 +634,14 @@ func (w *watchGrpcStream) run() {
},
}
req := &pb.WatchRequest{RequestUnion: cr}
wc.Send(req)
if w.lg != nil {
w.lg.Debug("sending watch cancel request for failed dispatch", zap.Int64("watch-id", pbresp.WatchId))
}
if err := wc.Send(req); err != nil {
if w.lg != nil {
w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", pbresp.WatchId), zap.Error(err))
}
}
}
// watch client failed on Recv; spawn another if possible
@ -629,7 +654,11 @@ func (w *watchGrpcStream) run() {
return
}
if ws := w.nextResume(); ws != nil {
wc.Send(ws.initReq.toPB())
if err := wc.Send(ws.initReq.toPB()); err != nil {
if w.lg != nil {
w.lg.Debug("error when sending request", zap.Error(err))
}
}
}
cancelSet = make(map[int64]struct{})
@ -637,6 +666,25 @@ func (w *watchGrpcStream) run() {
return
case ws := <-w.closingc:
if ws.id != -1 {
// client is closing an established watch; close it on the server proactively instead of waiting
// to close when the next message arrives
cancelSet[ws.id] = struct{}{}
cr := &pb.WatchRequest_CancelRequest{
CancelRequest: &pb.WatchCancelRequest{
WatchId: ws.id,
},
}
req := &pb.WatchRequest{RequestUnion: cr}
if w.lg != nil {
w.lg.Debug("sending watch cancel request for closed watcher", zap.Int64("watch-id", ws.id))
}
if err := wc.Send(req); err != nil {
if w.lg != nil {
w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", ws.id), zap.Error(err))
}
}
}
w.closeSubstream(ws)
delete(closing, ws)
// no more watchers on this stream, shutdown