Add vendor folder to git

This commit is contained in:
Lucas Käldström 2017-06-26 19:23:05 +03:00
parent 66cf5eaafb
commit 183585f56f
No known key found for this signature in database
GPG key ID: 600FEFBBD0D40D21
6916 changed files with 2629581 additions and 1 deletions

139
vendor/github.com/coreos/etcd/integration/bridge.go generated vendored Normal file
View file

@ -0,0 +1,139 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"fmt"
"io"
"net"
"sync"
"github.com/coreos/etcd/pkg/transport"
)
// bridge creates a unix socket bridge to another unix socket, making it possible
// to disconnect grpc network connections without closing the logical grpc connection.
type bridge struct {
inaddr string
outaddr string
l net.Listener
conns map[*bridgeConn]struct{}
stopc chan struct{}
wg sync.WaitGroup
mu sync.Mutex
}
func newBridge(addr string) (*bridge, error) {
b := &bridge{
// bridge "port" is ("%05d%05d0", port, pid) since go1.8 expects the port to be a number
inaddr: addr + "0",
outaddr: addr,
conns: make(map[*bridgeConn]struct{}),
stopc: make(chan struct{}, 1),
}
l, err := transport.NewUnixListener(b.inaddr)
if err != nil {
return nil, fmt.Errorf("listen failed on socket %s (%v)", addr, err)
}
b.l = l
b.wg.Add(1)
go b.serveListen()
return b, nil
}
func (b *bridge) URL() string { return "unix://" + b.inaddr }
func (b *bridge) Close() {
b.l.Close()
select {
case b.stopc <- struct{}{}:
default:
}
b.wg.Wait()
}
func (b *bridge) Reset() {
b.mu.Lock()
defer b.mu.Unlock()
for bc := range b.conns {
bc.Close()
}
b.conns = make(map[*bridgeConn]struct{})
}
func (b *bridge) serveListen() {
defer func() {
b.l.Close()
b.mu.Lock()
for bc := range b.conns {
bc.Close()
}
b.mu.Unlock()
b.wg.Done()
}()
for {
inc, ierr := b.l.Accept()
if ierr != nil {
return
}
outc, oerr := net.Dial("unix", b.outaddr)
if oerr != nil {
inc.Close()
return
}
bc := &bridgeConn{inc, outc}
b.wg.Add(1)
b.mu.Lock()
b.conns[bc] = struct{}{}
go b.serveConn(bc)
b.mu.Unlock()
}
}
func (b *bridge) serveConn(bc *bridgeConn) {
defer func() {
bc.Close()
b.mu.Lock()
delete(b.conns, bc)
b.mu.Unlock()
b.wg.Done()
}()
var wg sync.WaitGroup
wg.Add(2)
go func() {
io.Copy(bc.out, bc.in)
wg.Done()
}()
go func() {
io.Copy(bc.in, bc.out)
wg.Done()
}()
wg.Wait()
}
type bridgeConn struct {
in net.Conn
out net.Conn
}
func (bc *bridgeConn) Close() {
bc.in.Close()
bc.out.Close()
}

898
vendor/github.com/coreos/etcd/integration/cluster.go generated vendored Normal file
View file

@ -0,0 +1,898 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"crypto/tls"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/http/httptest"
"os"
"reflect"
"sort"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api"
"github.com/coreos/etcd/etcdserver/api/v2http"
"github.com/coreos/etcd/etcdserver/api/v3rpc"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/rafthttp"
"github.com/coreos/pkg/capnslog"
)
const (
tickDuration = 10 * time.Millisecond
clusterName = "etcd"
requestTimeout = 20 * time.Second
basePort = 21000
UrlScheme = "unix"
UrlSchemeTLS = "unixs"
)
var (
electionTicks = 10
// integration test uses unique ports, counting up, to listen for each
// member, ensuring restarted members can listen on the same port again.
localListenCount int64 = 0
testTLSInfo = transport.TLSInfo{
KeyFile: "./fixtures/server.key.insecure",
CertFile: "./fixtures/server.crt",
TrustedCAFile: "./fixtures/ca.crt",
ClientCertAuth: true,
}
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "integration")
)
type ClusterConfig struct {
Size int
PeerTLS *transport.TLSInfo
ClientTLS *transport.TLSInfo
DiscoveryURL string
UseGRPC bool
QuotaBackendBytes int64
}
type cluster struct {
cfg *ClusterConfig
Members []*member
}
func init() {
// manually enable v3 capability since we know the cluster members all support v3.
api.EnableCapability(api.V3rpcCapability)
}
func schemeFromTLSInfo(tls *transport.TLSInfo) string {
if tls == nil {
return UrlScheme
}
return UrlSchemeTLS
}
func (c *cluster) fillClusterForMembers() error {
if c.cfg.DiscoveryURL != "" {
// cluster will be discovered
return nil
}
addrs := make([]string, 0)
for _, m := range c.Members {
scheme := schemeFromTLSInfo(m.PeerTLSInfo)
for _, l := range m.PeerListeners {
addrs = append(addrs, fmt.Sprintf("%s=%s://%s", m.Name, scheme, l.Addr().String()))
}
}
clusterStr := strings.Join(addrs, ",")
var err error
for _, m := range c.Members {
m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
if err != nil {
return err
}
}
return nil
}
func newCluster(t *testing.T, cfg *ClusterConfig) *cluster {
c := &cluster{cfg: cfg}
ms := make([]*member, cfg.Size)
for i := 0; i < cfg.Size; i++ {
ms[i] = c.mustNewMember(t)
}
c.Members = ms
if err := c.fillClusterForMembers(); err != nil {
t.Fatal(err)
}
return c
}
// NewCluster returns an unlaunched cluster of the given size which has been
// set to use static bootstrap.
func NewCluster(t *testing.T, size int) *cluster {
return newCluster(t, &ClusterConfig{Size: size})
}
// NewClusterByConfig returns an unlaunched cluster defined by a cluster configuration
func NewClusterByConfig(t *testing.T, cfg *ClusterConfig) *cluster {
return newCluster(t, cfg)
}
func (c *cluster) Launch(t *testing.T) {
errc := make(chan error)
for _, m := range c.Members {
// Members are launched in separate goroutines because if they boot
// using discovery url, they have to wait for others to register to continue.
go func(m *member) {
errc <- m.Launch()
}(m)
}
for range c.Members {
if err := <-errc; err != nil {
t.Fatalf("error setting up member: %v", err)
}
}
// wait cluster to be stable to receive future client requests
c.waitMembersMatch(t, c.HTTPMembers())
c.waitVersion()
}
func (c *cluster) URL(i int) string {
return c.Members[i].ClientURLs[0].String()
}
// URLs returns a list of all active client URLs in the cluster
func (c *cluster) URLs() []string {
urls := make([]string, 0)
for _, m := range c.Members {
select {
case <-m.s.StopNotify():
continue
default:
}
for _, u := range m.ClientURLs {
urls = append(urls, u.String())
}
}
return urls
}
// HTTPMembers returns a list of all active members as client.Members
func (c *cluster) HTTPMembers() []client.Member {
ms := []client.Member{}
for _, m := range c.Members {
pScheme := schemeFromTLSInfo(m.PeerTLSInfo)
cScheme := schemeFromTLSInfo(m.ClientTLSInfo)
cm := client.Member{Name: m.Name}
for _, ln := range m.PeerListeners {
cm.PeerURLs = append(cm.PeerURLs, pScheme+"://"+ln.Addr().String())
}
for _, ln := range m.ClientListeners {
cm.ClientURLs = append(cm.ClientURLs, cScheme+"://"+ln.Addr().String())
}
ms = append(ms, cm)
}
return ms
}
func (c *cluster) mustNewMember(t *testing.T) *member {
m := mustNewMember(t,
memberConfig{
name: c.name(rand.Int()),
peerTLS: c.cfg.PeerTLS,
clientTLS: c.cfg.ClientTLS,
quotaBackendBytes: c.cfg.QuotaBackendBytes,
})
m.DiscoveryURL = c.cfg.DiscoveryURL
if c.cfg.UseGRPC {
if err := m.listenGRPC(); err != nil {
t.Fatal(err)
}
}
return m
}
func (c *cluster) addMember(t *testing.T) {
m := c.mustNewMember(t)
scheme := schemeFromTLSInfo(c.cfg.PeerTLS)
// send add request to the cluster
var err error
for i := 0; i < len(c.Members); i++ {
clientURL := c.URL(i)
peerURL := scheme + "://" + m.PeerListeners[0].Addr().String()
if err = c.addMemberByURL(t, clientURL, peerURL); err == nil {
break
}
}
if err != nil {
t.Fatalf("add member failed on all members error: %v", err)
}
m.InitialPeerURLsMap = types.URLsMap{}
for _, mm := range c.Members {
m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs
}
m.InitialPeerURLsMap[m.Name] = m.PeerURLs
m.NewCluster = false
if err := m.Launch(); err != nil {
t.Fatal(err)
}
c.Members = append(c.Members, m)
// wait cluster to be stable to receive future client requests
c.waitMembersMatch(t, c.HTTPMembers())
}
func (c *cluster) addMemberByURL(t *testing.T, clientURL, peerURL string) error {
cc := MustNewHTTPClient(t, []string{clientURL}, c.cfg.ClientTLS)
ma := client.NewMembersAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
if _, err := ma.Add(ctx, peerURL); err != nil {
return err
}
cancel()
// wait for the add node entry applied in the cluster
members := append(c.HTTPMembers(), client.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}})
c.waitMembersMatch(t, members)
return nil
}
func (c *cluster) AddMember(t *testing.T) {
c.addMember(t)
}
func (c *cluster) RemoveMember(t *testing.T, id uint64) {
if err := c.removeMember(t, id); err != nil {
t.Fatal(err)
}
}
func (c *cluster) removeMember(t *testing.T, id uint64) error {
// send remove request to the cluster
cc := MustNewHTTPClient(t, c.URLs(), c.cfg.ClientTLS)
ma := client.NewMembersAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
if err := ma.Remove(ctx, types.ID(id).String()); err != nil {
return err
}
cancel()
newMembers := make([]*member, 0)
for _, m := range c.Members {
if uint64(m.s.ID()) != id {
newMembers = append(newMembers, m)
} else {
select {
case <-m.s.StopNotify():
m.Terminate(t)
// 1s stop delay + election timeout + 1s disk and network delay + connection write timeout
// TODO: remove connection write timeout by selecting on http response closeNotifier
// blocking on https://github.com/golang/go/issues/9524
case <-time.After(time.Second + time.Duration(electionTicks)*tickDuration + time.Second + rafthttp.ConnWriteTimeout):
t.Fatalf("failed to remove member %s in time", m.s.ID())
}
}
}
c.Members = newMembers
c.waitMembersMatch(t, c.HTTPMembers())
return nil
}
func (c *cluster) Terminate(t *testing.T) {
for _, m := range c.Members {
m.Terminate(t)
}
}
func (c *cluster) waitMembersMatch(t *testing.T, membs []client.Member) {
for _, u := range c.URLs() {
cc := MustNewHTTPClient(t, []string{u}, c.cfg.ClientTLS)
ma := client.NewMembersAPI(cc)
for {
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
ms, err := ma.List(ctx)
cancel()
if err == nil && isMembersEqual(ms, membs) {
break
}
time.Sleep(tickDuration)
}
}
return
}
func (c *cluster) WaitLeader(t *testing.T) int { return c.waitLeader(t, c.Members) }
// waitLeader waits until given members agree on the same leader.
func (c *cluster) waitLeader(t *testing.T, membs []*member) int {
possibleLead := make(map[uint64]bool)
var lead uint64
for _, m := range membs {
possibleLead[uint64(m.s.ID())] = true
}
for lead == 0 || !possibleLead[lead] {
lead = 0
for _, m := range membs {
select {
case <-m.s.StopNotify():
continue
default:
}
if lead != 0 && lead != m.s.Lead() {
lead = 0
time.Sleep(10 * tickDuration)
break
}
lead = m.s.Lead()
}
}
for i, m := range membs {
if uint64(m.s.ID()) == lead {
return i
}
}
return -1
}
func (c *cluster) WaitNoLeader(t *testing.T) { c.waitNoLeader(t, c.Members) }
// waitNoLeader waits until given members lose leader.
func (c *cluster) waitNoLeader(t *testing.T, membs []*member) {
noLeader := false
for !noLeader {
noLeader = true
for _, m := range membs {
select {
case <-m.s.StopNotify():
continue
default:
}
if m.s.Lead() != 0 {
noLeader = false
time.Sleep(10 * tickDuration)
break
}
}
}
}
func (c *cluster) waitVersion() {
for _, m := range c.Members {
for {
if m.s.ClusterVersion() != nil {
break
}
time.Sleep(tickDuration)
}
}
}
func (c *cluster) name(i int) string {
return fmt.Sprint(i)
}
// isMembersEqual checks whether two members equal except ID field.
// The given wmembs should always set ID field to empty string.
func isMembersEqual(membs []client.Member, wmembs []client.Member) bool {
sort.Sort(SortableMemberSliceByPeerURLs(membs))
sort.Sort(SortableMemberSliceByPeerURLs(wmembs))
for i := range membs {
membs[i].ID = ""
}
return reflect.DeepEqual(membs, wmembs)
}
func newLocalListener(t *testing.T) net.Listener {
c := atomic.AddInt64(&localListenCount, 1)
// Go 1.8+ allows only numbers in port
addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+basePort, os.Getpid())
return NewListenerWithAddr(t, addr)
}
func NewListenerWithAddr(t *testing.T, addr string) net.Listener {
l, err := transport.NewUnixListener(addr)
if err != nil {
t.Fatal(err)
}
return l
}
type member struct {
etcdserver.ServerConfig
PeerListeners, ClientListeners []net.Listener
grpcListener net.Listener
// PeerTLSInfo enables peer TLS when set
PeerTLSInfo *transport.TLSInfo
// ClientTLSInfo enables client TLS when set
ClientTLSInfo *transport.TLSInfo
raftHandler *testutil.PauseableHandler
s *etcdserver.EtcdServer
hss []*httptest.Server
grpcServer *grpc.Server
grpcAddr string
grpcBridge *bridge
keepDataDirTerminate bool
}
func (m *member) GRPCAddr() string { return m.grpcAddr }
type memberConfig struct {
name string
peerTLS *transport.TLSInfo
clientTLS *transport.TLSInfo
quotaBackendBytes int64
}
// mustNewMember return an inited member with the given name. If peerTLS is
// set, it will use https scheme to communicate between peers.
func mustNewMember(t *testing.T, mcfg memberConfig) *member {
var err error
m := &member{}
peerScheme := schemeFromTLSInfo(mcfg.peerTLS)
clientScheme := schemeFromTLSInfo(mcfg.clientTLS)
pln := newLocalListener(t)
m.PeerListeners = []net.Listener{pln}
m.PeerURLs, err = types.NewURLs([]string{peerScheme + "://" + pln.Addr().String()})
if err != nil {
t.Fatal(err)
}
m.PeerTLSInfo = mcfg.peerTLS
cln := newLocalListener(t)
m.ClientListeners = []net.Listener{cln}
m.ClientURLs, err = types.NewURLs([]string{clientScheme + "://" + cln.Addr().String()})
if err != nil {
t.Fatal(err)
}
m.ClientTLSInfo = mcfg.clientTLS
m.Name = mcfg.name
m.DataDir, err = ioutil.TempDir(os.TempDir(), "etcd")
if err != nil {
t.Fatal(err)
}
clusterStr := fmt.Sprintf("%s=%s://%s", mcfg.name, peerScheme, pln.Addr().String())
m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
if err != nil {
t.Fatal(err)
}
m.InitialClusterToken = clusterName
m.NewCluster = true
m.BootstrapTimeout = 10 * time.Millisecond
if m.PeerTLSInfo != nil {
m.ServerConfig.PeerTLSInfo = *m.PeerTLSInfo
}
m.ElectionTicks = electionTicks
m.TickMs = uint(tickDuration / time.Millisecond)
m.QuotaBackendBytes = mcfg.quotaBackendBytes
return m
}
// listenGRPC starts a grpc server over a unix domain socket on the member
func (m *member) listenGRPC() error {
// prefix with localhost so cert has right domain
m.grpcAddr = "localhost:" + m.Name
l, err := transport.NewUnixListener(m.grpcAddr)
if err != nil {
return fmt.Errorf("listen failed on grpc socket %s (%v)", m.grpcAddr, err)
}
m.grpcBridge, err = newBridge(m.grpcAddr)
if err != nil {
l.Close()
return err
}
m.grpcAddr = m.grpcBridge.URL()
m.grpcListener = l
return nil
}
func (m *member) electionTimeout() time.Duration {
return time.Duration(m.s.Cfg.ElectionTicks) * time.Millisecond
}
func (m *member) DropConnections() { m.grpcBridge.Reset() }
// NewClientV3 creates a new grpc client connection to the member
func NewClientV3(m *member) (*clientv3.Client, error) {
if m.grpcAddr == "" {
return nil, fmt.Errorf("member not configured for grpc")
}
cfg := clientv3.Config{
Endpoints: []string{m.grpcAddr},
DialTimeout: 5 * time.Second,
}
if m.ClientTLSInfo != nil {
tls, err := m.ClientTLSInfo.ClientConfig()
if err != nil {
return nil, err
}
cfg.TLS = tls
}
return newClientV3(cfg)
}
// Clone returns a member with the same server configuration. The returned
// member will not set PeerListeners and ClientListeners.
func (m *member) Clone(t *testing.T) *member {
mm := &member{}
mm.ServerConfig = m.ServerConfig
var err error
clientURLStrs := m.ClientURLs.StringSlice()
mm.ClientURLs, err = types.NewURLs(clientURLStrs)
if err != nil {
// this should never fail
panic(err)
}
peerURLStrs := m.PeerURLs.StringSlice()
mm.PeerURLs, err = types.NewURLs(peerURLStrs)
if err != nil {
// this should never fail
panic(err)
}
clusterStr := m.InitialPeerURLsMap.String()
mm.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
if err != nil {
// this should never fail
panic(err)
}
mm.InitialClusterToken = m.InitialClusterToken
mm.ElectionTicks = m.ElectionTicks
mm.PeerTLSInfo = m.PeerTLSInfo
mm.ClientTLSInfo = m.ClientTLSInfo
return mm
}
// Launch starts a member based on ServerConfig, PeerListeners
// and ClientListeners.
func (m *member) Launch() error {
plog.Printf("launching %s (%s)", m.Name, m.grpcAddr)
var err error
if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil {
return fmt.Errorf("failed to initialize the etcd server: %v", err)
}
m.s.SyncTicker = time.Tick(500 * time.Millisecond)
m.s.Start()
m.raftHandler = &testutil.PauseableHandler{Next: v2http.NewPeerHandler(m.s)}
for _, ln := range m.PeerListeners {
hs := &httptest.Server{
Listener: ln,
Config: &http.Server{Handler: m.raftHandler},
}
if m.PeerTLSInfo == nil {
hs.Start()
} else {
hs.TLS, err = m.PeerTLSInfo.ServerConfig()
if err != nil {
return err
}
hs.StartTLS()
}
m.hss = append(m.hss, hs)
}
for _, ln := range m.ClientListeners {
hs := &httptest.Server{
Listener: ln,
Config: &http.Server{Handler: v2http.NewClientHandler(m.s, m.ServerConfig.ReqTimeout())},
}
if m.ClientTLSInfo == nil {
hs.Start()
} else {
hs.TLS, err = m.ClientTLSInfo.ServerConfig()
if err != nil {
return err
}
hs.StartTLS()
}
m.hss = append(m.hss, hs)
}
if m.grpcListener != nil {
var (
tlscfg *tls.Config
)
if m.ClientTLSInfo != nil && !m.ClientTLSInfo.Empty() {
tlscfg, err = m.ClientTLSInfo.ServerConfig()
if err != nil {
return err
}
}
m.grpcServer = v3rpc.Server(m.s, tlscfg)
go m.grpcServer.Serve(m.grpcListener)
}
plog.Printf("launched %s (%s)", m.Name, m.grpcAddr)
return nil
}
func (m *member) WaitOK(t *testing.T) {
cc := MustNewHTTPClient(t, []string{m.URL()}, m.ClientTLSInfo)
kapi := client.NewKeysAPI(cc)
for {
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
_, err := kapi.Get(ctx, "/", nil)
if err != nil {
time.Sleep(tickDuration)
continue
}
cancel()
break
}
for m.s.Leader() == 0 {
time.Sleep(tickDuration)
}
}
func (m *member) URL() string { return m.ClientURLs[0].String() }
func (m *member) Pause() {
m.raftHandler.Pause()
m.s.PauseSending()
}
func (m *member) Resume() {
m.raftHandler.Resume()
m.s.ResumeSending()
}
// Close stops the member's etcdserver and closes its connections
func (m *member) Close() {
if m.grpcBridge != nil {
m.grpcBridge.Close()
m.grpcBridge = nil
}
if m.grpcServer != nil {
m.grpcServer.Stop()
m.grpcServer = nil
}
m.s.HardStop()
for _, hs := range m.hss {
hs.CloseClientConnections()
hs.Close()
}
}
// Stop stops the member, but the data dir of the member is preserved.
func (m *member) Stop(t *testing.T) {
plog.Printf("stopping %s (%s)", m.Name, m.grpcAddr)
m.Close()
m.hss = nil
plog.Printf("stopped %s (%s)", m.Name, m.grpcAddr)
}
// checkLeaderTransition waits for leader transition, returning the new leader ID.
func checkLeaderTransition(t *testing.T, m *member, oldLead uint64) uint64 {
interval := time.Duration(m.s.Cfg.TickMs) * time.Millisecond
for m.s.Lead() == 0 || (m.s.Lead() == oldLead) {
time.Sleep(interval)
}
return m.s.Lead()
}
// StopNotify unblocks when a member stop completes
func (m *member) StopNotify() <-chan struct{} {
return m.s.StopNotify()
}
// Restart starts the member using the preserved data dir.
func (m *member) Restart(t *testing.T) error {
plog.Printf("restarting %s (%s)", m.Name, m.grpcAddr)
newPeerListeners := make([]net.Listener, 0)
for _, ln := range m.PeerListeners {
newPeerListeners = append(newPeerListeners, NewListenerWithAddr(t, ln.Addr().String()))
}
m.PeerListeners = newPeerListeners
newClientListeners := make([]net.Listener, 0)
for _, ln := range m.ClientListeners {
newClientListeners = append(newClientListeners, NewListenerWithAddr(t, ln.Addr().String()))
}
m.ClientListeners = newClientListeners
if m.grpcListener != nil {
if err := m.listenGRPC(); err != nil {
t.Fatal(err)
}
}
err := m.Launch()
plog.Printf("restarted %s (%s)", m.Name, m.grpcAddr)
return err
}
// Terminate stops the member and removes the data dir.
func (m *member) Terminate(t *testing.T) {
plog.Printf("terminating %s (%s)", m.Name, m.grpcAddr)
m.Close()
if !m.keepDataDirTerminate {
if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil {
t.Fatal(err)
}
}
plog.Printf("terminated %s (%s)", m.Name, m.grpcAddr)
}
// Metric gets the metric value for a member
func (m *member) Metric(metricName string) (string, error) {
cfgtls := transport.TLSInfo{}
tr, err := transport.NewTimeoutTransport(cfgtls, time.Second, time.Second, time.Second)
if err != nil {
return "", err
}
cli := &http.Client{Transport: tr}
resp, err := cli.Get(m.ClientURLs[0].String() + "/metrics")
if err != nil {
return "", err
}
defer resp.Body.Close()
b, rerr := ioutil.ReadAll(resp.Body)
if rerr != nil {
return "", rerr
}
lines := strings.Split(string(b), "\n")
for _, l := range lines {
if strings.HasPrefix(l, metricName) {
return strings.Split(l, " ")[1], nil
}
}
return "", nil
}
// InjectPartition drops connections from m to others, vice versa.
func (m *member) InjectPartition(t *testing.T, others []*member) {
for _, other := range others {
m.s.CutPeer(other.s.ID())
other.s.CutPeer(m.s.ID())
}
}
// RecoverPartition recovers connections from m to others, vice versa.
func (m *member) RecoverPartition(t *testing.T, others []*member) {
for _, other := range others {
m.s.MendPeer(other.s.ID())
other.s.MendPeer(m.s.ID())
}
}
func MustNewHTTPClient(t *testing.T, eps []string, tls *transport.TLSInfo) client.Client {
cfgtls := transport.TLSInfo{}
if tls != nil {
cfgtls = *tls
}
cfg := client.Config{Transport: mustNewTransport(t, cfgtls), Endpoints: eps}
c, err := client.New(cfg)
if err != nil {
t.Fatal(err)
}
return c
}
func mustNewTransport(t *testing.T, tlsInfo transport.TLSInfo) *http.Transport {
// tick in integration test is short, so 1s dial timeout could play well.
tr, err := transport.NewTimeoutTransport(tlsInfo, time.Second, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout)
if err != nil {
t.Fatal(err)
}
return tr
}
type SortableMemberSliceByPeerURLs []client.Member
func (p SortableMemberSliceByPeerURLs) Len() int { return len(p) }
func (p SortableMemberSliceByPeerURLs) Less(i, j int) bool {
return p[i].PeerURLs[0] < p[j].PeerURLs[0]
}
func (p SortableMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
type ClusterV3 struct {
*cluster
mu sync.Mutex
clients []*clientv3.Client
}
// NewClusterV3 returns a launched cluster with a grpc client connection
// for each cluster member.
func NewClusterV3(t *testing.T, cfg *ClusterConfig) *ClusterV3 {
cfg.UseGRPC = true
clus := &ClusterV3{
cluster: NewClusterByConfig(t, cfg),
}
clus.Launch(t)
for _, m := range clus.Members {
client, err := NewClientV3(m)
if err != nil {
t.Fatalf("cannot create client: %v", err)
}
clus.clients = append(clus.clients, client)
}
return clus
}
func (c *ClusterV3) TakeClient(idx int) {
c.mu.Lock()
c.clients[idx] = nil
c.mu.Unlock()
}
func (c *ClusterV3) Terminate(t *testing.T) {
c.mu.Lock()
for _, client := range c.clients {
if client == nil {
continue
}
if err := client.Close(); err != nil {
t.Error(err)
}
}
c.mu.Unlock()
c.cluster.Terminate(t)
}
func (c *ClusterV3) RandClient() *clientv3.Client {
return c.clients[rand.Intn(len(c.clients))]
}
func (c *ClusterV3) Client(i int) *clientv3.Client {
return c.clients[i]
}
type grpcAPI struct {
// Cluster is the cluster API for the client's connection.
Cluster pb.ClusterClient
// KV is the keyvalue API for the client's connection.
KV pb.KVClient
// Lease is the lease API for the client's connection.
Lease pb.LeaseClient
// Watch is the watch API for the client's connection.
Watch pb.WatchClient
// Maintenance is the maintenance API for the client's connection.
Maintenance pb.MaintenanceClient
// Auth is the authentication API for the client's connection.
Auth pb.AuthClient
}

View file

@ -0,0 +1,37 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !cluster_proxy
package integration
import (
"github.com/coreos/etcd/clientv3"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
func toGRPC(c *clientv3.Client) grpcAPI {
return grpcAPI{
pb.NewClusterClient(c.ActiveConnection()),
pb.NewKVClient(c.ActiveConnection()),
pb.NewLeaseClient(c.ActiveConnection()),
pb.NewWatchClient(c.ActiveConnection()),
pb.NewMaintenanceClient(c.ActiveConnection()),
pb.NewAuthClient(c.ActiveConnection()),
}
}
func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) {
return clientv3.New(cfg)
}

View file

@ -0,0 +1,89 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build cluster_proxy
package integration
import (
"sync"
"github.com/coreos/etcd/clientv3"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/proxy/grpcproxy"
)
var (
pmu sync.Mutex
proxies map[*clientv3.Client]grpcClientProxy = make(map[*clientv3.Client]grpcClientProxy)
)
type grpcClientProxy struct {
grpc grpcAPI
wdonec <-chan struct{}
kvdonec <-chan struct{}
}
func toGRPC(c *clientv3.Client) grpcAPI {
pmu.Lock()
defer pmu.Unlock()
if v, ok := proxies[c]; ok {
return v.grpc
}
wp, wpch := grpcproxy.NewWatchProxy(c)
kvp, kvpch := grpcproxy.NewKvProxy(c)
grpc := grpcAPI{
pb.NewClusterClient(c.ActiveConnection()),
grpcproxy.KvServerToKvClient(kvp),
pb.NewLeaseClient(c.ActiveConnection()),
grpcproxy.WatchServerToWatchClient(wp),
pb.NewMaintenanceClient(c.ActiveConnection()),
pb.NewAuthClient(c.ActiveConnection()),
}
proxies[c] = grpcClientProxy{grpc: grpc, wdonec: wpch, kvdonec: kvpch}
return grpc
}
type proxyCloser struct {
clientv3.Watcher
wdonec <-chan struct{}
kvdonec <-chan struct{}
}
func (pc *proxyCloser) Close() error {
// client ctx is canceled before calling close, so kv will close out
<-pc.kvdonec
err := pc.Watcher.Close()
<-pc.wdonec
return err
}
func newClientV3(cfg clientv3.Config) (*clientv3.Client, error) {
c, err := clientv3.New(cfg)
if err != nil {
return nil, err
}
rpc := toGRPC(c)
c.KV = clientv3.NewKVFromKVClient(rpc.KV)
pmu.Lock()
c.Watcher = &proxyCloser{
Watcher: clientv3.NewWatchFromWatchClient(rpc.Watch),
wdonec: proxies[c].wdonec,
kvdonec: proxies[c].kvdonec,
}
pmu.Unlock()
return c, nil
}

View file

@ -0,0 +1,559 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"fmt"
"log"
"math/rand"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/pkg/capnslog"
"golang.org/x/net/context"
)
func init() {
// open microsecond-level time log for integration test debugging
log.SetFlags(log.Ltime | log.Lmicroseconds | log.Lshortfile)
if t := os.Getenv("ETCD_ELECTION_TIMEOUT_TICKS"); t != "" {
if i, err := strconv.ParseInt(t, 10, 64); err == nil {
electionTicks = int(i)
}
}
}
func TestClusterOf1(t *testing.T) { testCluster(t, 1) }
func TestClusterOf3(t *testing.T) { testCluster(t, 3) }
func testCluster(t *testing.T, size int) {
defer testutil.AfterTest(t)
c := NewCluster(t, size)
c.Launch(t)
defer c.Terminate(t)
clusterMustProgress(t, c.Members)
}
func TestTLSClusterOf3(t *testing.T) {
defer testutil.AfterTest(t)
c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
c.Launch(t)
defer c.Terminate(t)
clusterMustProgress(t, c.Members)
}
func TestClusterOf1UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 1) }
func TestClusterOf3UsingDiscovery(t *testing.T) { testClusterUsingDiscovery(t, 3) }
func testClusterUsingDiscovery(t *testing.T, size int) {
defer testutil.AfterTest(t)
dc := NewCluster(t, 1)
dc.Launch(t)
defer dc.Terminate(t)
// init discovery token space
dcc := MustNewHTTPClient(t, dc.URLs(), nil)
dkapi := client.NewKeysAPI(dcc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", size)); err != nil {
t.Fatal(err)
}
cancel()
c := NewClusterByConfig(
t,
&ClusterConfig{Size: size, DiscoveryURL: dc.URL(0) + "/v2/keys"},
)
c.Launch(t)
defer c.Terminate(t)
clusterMustProgress(t, c.Members)
}
func TestTLSClusterOf3UsingDiscovery(t *testing.T) {
defer testutil.AfterTest(t)
dc := NewCluster(t, 1)
dc.Launch(t)
defer dc.Terminate(t)
// init discovery token space
dcc := MustNewHTTPClient(t, dc.URLs(), nil)
dkapi := client.NewKeysAPI(dcc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
if _, err := dkapi.Create(ctx, "/_config/size", fmt.Sprintf("%d", 3)); err != nil {
t.Fatal(err)
}
cancel()
c := NewClusterByConfig(t,
&ClusterConfig{
Size: 3,
PeerTLS: &testTLSInfo,
DiscoveryURL: dc.URL(0) + "/v2/keys"},
)
c.Launch(t)
defer c.Terminate(t)
clusterMustProgress(t, c.Members)
}
func TestDoubleClusterSizeOf1(t *testing.T) { testDoubleClusterSize(t, 1) }
func TestDoubleClusterSizeOf3(t *testing.T) { testDoubleClusterSize(t, 3) }
func testDoubleClusterSize(t *testing.T, size int) {
defer testutil.AfterTest(t)
c := NewCluster(t, size)
c.Launch(t)
defer c.Terminate(t)
for i := 0; i < size; i++ {
c.AddMember(t)
}
clusterMustProgress(t, c.Members)
}
func TestDoubleTLSClusterSizeOf3(t *testing.T) {
defer testutil.AfterTest(t)
c := NewClusterByConfig(t, &ClusterConfig{Size: 3, PeerTLS: &testTLSInfo})
c.Launch(t)
defer c.Terminate(t)
for i := 0; i < 3; i++ {
c.AddMember(t)
}
clusterMustProgress(t, c.Members)
}
func TestDecreaseClusterSizeOf3(t *testing.T) { testDecreaseClusterSize(t, 3) }
func TestDecreaseClusterSizeOf5(t *testing.T) { testDecreaseClusterSize(t, 5) }
func testDecreaseClusterSize(t *testing.T, size int) {
defer testutil.AfterTest(t)
c := NewCluster(t, size)
c.Launch(t)
defer c.Terminate(t)
// TODO: remove the last but one member
for i := 0; i < size-1; i++ {
id := c.Members[len(c.Members)-1].s.ID()
c.RemoveMember(t, uint64(id))
c.waitLeader(t, c.Members)
}
clusterMustProgress(t, c.Members)
}
func TestForceNewCluster(t *testing.T) {
c := NewCluster(t, 3)
c.Launch(t)
cc := MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
kapi := client.NewKeysAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
resp, err := kapi.Create(ctx, "/foo", "bar")
if err != nil {
t.Fatalf("unexpected create error: %v", err)
}
cancel()
// ensure create has been applied in this machine
ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
if _, err = kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
t.Fatalf("unexpected watch error: %v", err)
}
cancel()
c.Members[0].Stop(t)
c.Members[1].Terminate(t)
c.Members[2].Terminate(t)
c.Members[0].ForceNewCluster = true
err = c.Members[0].Restart(t)
if err != nil {
t.Fatalf("unexpected ForceRestart error: %v", err)
}
defer c.Members[0].Terminate(t)
c.waitLeader(t, c.Members[:1])
// use new http client to init new connection
cc = MustNewHTTPClient(t, []string{c.Members[0].URL()}, nil)
kapi = client.NewKeysAPI(cc)
// ensure force restart keep the old data, and new cluster can make progress
ctx, cancel = context.WithTimeout(context.Background(), requestTimeout)
if _, err := kapi.Watcher("/foo", &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(ctx); err != nil {
t.Fatalf("unexpected watch error: %v", err)
}
cancel()
clusterMustProgress(t, c.Members[:1])
}
func TestAddMemberAfterClusterFullRotation(t *testing.T) {
defer testutil.AfterTest(t)
c := NewCluster(t, 3)
c.Launch(t)
defer c.Terminate(t)
// remove all the previous three members and add in three new members.
for i := 0; i < 3; i++ {
c.RemoveMember(t, uint64(c.Members[0].s.ID()))
c.waitLeader(t, c.Members)
c.AddMember(t)
c.waitLeader(t, c.Members)
}
c.AddMember(t)
c.waitLeader(t, c.Members)
clusterMustProgress(t, c.Members)
}
// Ensure we can remove a member then add a new one back immediately.
func TestIssue2681(t *testing.T) {
defer testutil.AfterTest(t)
c := NewCluster(t, 5)
c.Launch(t)
defer c.Terminate(t)
c.RemoveMember(t, uint64(c.Members[4].s.ID()))
c.waitLeader(t, c.Members)
c.AddMember(t)
c.waitLeader(t, c.Members)
clusterMustProgress(t, c.Members)
}
// Ensure we can remove a member after a snapshot then add a new one back.
func TestIssue2746(t *testing.T) { testIssue2746(t, 5) }
// With 3 nodes TestIssue2476 sometimes had a shutdown with an inflight snapshot.
func TestIssue2746WithThree(t *testing.T) { testIssue2746(t, 3) }
func testIssue2746(t *testing.T, members int) {
defer testutil.AfterTest(t)
c := NewCluster(t, members)
for _, m := range c.Members {
m.SnapCount = 10
}
c.Launch(t)
defer c.Terminate(t)
// force a snapshot
for i := 0; i < 20; i++ {
clusterMustProgress(t, c.Members)
}
c.RemoveMember(t, uint64(c.Members[members-1].s.ID()))
c.waitLeader(t, c.Members)
c.AddMember(t)
c.waitLeader(t, c.Members)
clusterMustProgress(t, c.Members)
}
// Ensure etcd will not panic when removing a just started member.
func TestIssue2904(t *testing.T) {
defer testutil.AfterTest(t)
// start 1-member cluster to ensure member 0 is the leader of the cluster.
c := NewCluster(t, 1)
c.Launch(t)
defer c.Terminate(t)
c.AddMember(t)
c.Members[1].Stop(t)
// send remove member-1 request to the cluster.
cc := MustNewHTTPClient(t, c.URLs(), nil)
ma := client.NewMembersAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
// the proposal is not committed because member 1 is stopped, but the
// proposal is appended to leader's raft log.
ma.Remove(ctx, c.Members[1].s.ID().String())
cancel()
// restart member, and expect it to send UpdateAttributes request.
// the log in the leader is like this:
// [..., remove 1, ..., update attr 1, ...]
c.Members[1].Restart(t)
// when the member comes back, it ack the proposal to remove itself,
// and apply it.
<-c.Members[1].s.StopNotify()
// terminate removed member
c.Members[1].Terminate(t)
c.Members = c.Members[:1]
// wait member to be removed.
c.waitMembersMatch(t, c.HTTPMembers())
}
// TestIssue3699 tests minority failure during cluster configuration; it was
// deadlocking.
func TestIssue3699(t *testing.T) {
// start a cluster of 3 nodes a, b, c
defer testutil.AfterTest(t)
c := NewCluster(t, 3)
c.Launch(t)
defer c.Terminate(t)
// make node a unavailable
c.Members[0].Stop(t)
// add node d
c.AddMember(t)
// electing node d as leader makes node a unable to participate
leaderID := c.waitLeader(t, c.Members)
for leaderID != 3 {
c.Members[leaderID].Stop(t)
<-c.Members[leaderID].s.StopNotify()
// do not restart the killed member immediately.
// the member will advance its election timeout after restart,
// so it will have a better chance to become the leader again.
time.Sleep(time.Duration(electionTicks * int(tickDuration)))
c.Members[leaderID].Restart(t)
leaderID = c.waitLeader(t, c.Members)
}
// bring back node a
// node a will remain useless as long as d is the leader.
if err := c.Members[0].Restart(t); err != nil {
t.Fatal(err)
}
select {
// waiting for ReadyNotify can take several seconds
case <-time.After(10 * time.Second):
t.Fatalf("waited too long for ready notification")
case <-c.Members[0].s.StopNotify():
t.Fatalf("should not be stopped")
case <-c.Members[0].s.ReadyNotify():
}
// must waitLeader so goroutines don't leak on terminate
c.waitLeader(t, c.Members)
// try to participate in cluster
cc := MustNewHTTPClient(t, []string{c.URL(0)}, c.cfg.ClientTLS)
kapi := client.NewKeysAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
if _, err := kapi.Set(ctx, "/foo", "bar", nil); err != nil {
t.Fatalf("unexpected error on Set (%v)", err)
}
cancel()
}
// TestRejectUnhealthyAdd ensures an unhealthy cluster rejects adding members.
func TestRejectUnhealthyAdd(t *testing.T) {
defer testutil.AfterTest(t)
c := NewCluster(t, 3)
for _, m := range c.Members {
m.ServerConfig.StrictReconfigCheck = true
}
c.Launch(t)
defer c.Terminate(t)
// make cluster unhealthy and wait for downed peer
c.Members[0].Stop(t)
c.WaitLeader(t)
// all attempts to add member should fail
for i := 1; i < len(c.Members); i++ {
err := c.addMemberByURL(t, c.URL(i), "unix://foo:12345")
if err == nil {
t.Fatalf("should have failed adding peer")
}
// TODO: client should return descriptive error codes for internal errors
if !strings.Contains(err.Error(), "has no leader") {
t.Errorf("unexpected error (%v)", err)
}
}
// make cluster healthy
c.Members[0].Restart(t)
c.WaitLeader(t)
time.Sleep(2 * etcdserver.HealthInterval)
// add member should succeed now that it's healthy
var err error
for i := 1; i < len(c.Members); i++ {
if err = c.addMemberByURL(t, c.URL(i), "unix://foo:12345"); err == nil {
break
}
}
if err != nil {
t.Fatalf("should have added peer to healthy cluster (%v)", err)
}
}
// TestRejectUnhealthyRemove ensures an unhealthy cluster rejects removing members
// if quorum will be lost.
func TestRejectUnhealthyRemove(t *testing.T) {
defer testutil.AfterTest(t)
c := NewCluster(t, 5)
for _, m := range c.Members {
m.ServerConfig.StrictReconfigCheck = true
}
c.Launch(t)
defer c.Terminate(t)
// make cluster unhealthy and wait for downed peer; (3 up, 2 down)
c.Members[0].Stop(t)
c.Members[1].Stop(t)
c.WaitLeader(t)
// reject remove active member since (3,2)-(1,0) => (2,2) lacks quorum
err := c.removeMember(t, uint64(c.Members[2].s.ID()))
if err == nil {
t.Fatalf("should reject quorum breaking remove")
}
// TODO: client should return more descriptive error codes for internal errors
if !strings.Contains(err.Error(), "has no leader") {
t.Errorf("unexpected error (%v)", err)
}
// member stopped after launch; wait for missing heartbeats
time.Sleep(time.Duration(electionTicks * int(tickDuration)))
// permit remove dead member since (3,2) - (0,1) => (3,1) has quorum
if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
t.Fatalf("should accept removing down member")
}
// bring cluster to (4,1)
c.Members[0].Restart(t)
// restarted member must be connected for a HealthInterval before remove is accepted
time.Sleep((3 * etcdserver.HealthInterval) / 2)
// accept remove member since (4,1)-(1,0) => (3,1) has quorum
if err = c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
t.Fatalf("expected to remove member, got error %v", err)
}
}
// TestRestartRemoved ensures that restarting removed member must exit
// if 'initial-cluster-state' is set 'new' and old data directory still exists
// (see https://github.com/coreos/etcd/issues/7512 for more).
func TestRestartRemoved(t *testing.T) {
defer testutil.AfterTest(t)
capnslog.SetGlobalLogLevel(capnslog.INFO)
// 1. start single-member cluster
c := NewCluster(t, 1)
for _, m := range c.Members {
m.ServerConfig.StrictReconfigCheck = true
}
c.Launch(t)
defer c.Terminate(t)
// 2. add a new member
c.AddMember(t)
c.WaitLeader(t)
oldm := c.Members[0]
oldm.keepDataDirTerminate = true
// 3. remove first member, shut down without deleting data
if err := c.removeMember(t, uint64(c.Members[0].s.ID())); err != nil {
t.Fatalf("expected to remove member, got error %v", err)
}
c.WaitLeader(t)
// 4. restart first member with 'initial-cluster-state=new'
// wrong config, expects exit within ReqTimeout
oldm.ServerConfig.NewCluster = false
if err := oldm.Restart(t); err != nil {
t.Fatalf("unexpected ForceRestart error: %v", err)
}
defer func() {
oldm.Close()
os.RemoveAll(oldm.ServerConfig.DataDir)
}()
select {
case <-oldm.s.StopNotify():
case <-time.After(time.Minute):
t.Fatalf("removed member didn't exit within %v", time.Minute)
}
}
// clusterMustProgress ensures that cluster can make progress. It creates
// a random key first, and check the new key could be got from all client urls
// of the cluster.
func clusterMustProgress(t *testing.T, membs []*member) {
cc := MustNewHTTPClient(t, []string{membs[0].URL()}, nil)
kapi := client.NewKeysAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
key := fmt.Sprintf("foo%d", rand.Int())
resp, err := kapi.Create(ctx, "/"+key, "bar")
if err != nil {
t.Fatalf("create on %s error: %v", membs[0].URL(), err)
}
cancel()
for i, m := range membs {
u := m.URL()
mcc := MustNewHTTPClient(t, []string{u}, nil)
mkapi := client.NewKeysAPI(mcc)
mctx, mcancel := context.WithTimeout(context.Background(), requestTimeout)
if _, err := mkapi.Watcher(key, &client.WatcherOptions{AfterIndex: resp.Node.ModifiedIndex - 1}).Next(mctx); err != nil {
t.Fatalf("#%d: watch on %s error: %v", i, u, err)
}
mcancel()
}
}
func TestTransferLeader(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
oldLeadIdx := clus.WaitLeader(t)
oldLeadID := uint64(clus.Members[oldLeadIdx].s.ID())
// ensure followers go through leader transition while learship transfer
idc := make(chan uint64)
for i := range clus.Members {
if oldLeadIdx != i {
go func(m *member) {
idc <- checkLeaderTransition(t, m, oldLeadID)
}(clus.Members[i])
}
}
err := clus.Members[oldLeadIdx].s.TransferLeadership()
if err != nil {
t.Fatal(err)
}
// wait until leader transitions have happened
var newLeadIDs [2]uint64
for i := range newLeadIDs {
select {
case newLeadIDs[i] = <-idc:
case <-time.After(time.Second):
t.Fatal("timed out waiting for leader transition")
}
}
// remaining members must agree on the same leader
if newLeadIDs[0] != newLeadIDs[1] {
t.Fatalf("expected same new leader %d == %d", newLeadIDs[0], newLeadIDs[1])
}
// new leader must be different than the old leader
if oldLeadID == newLeadIDs[0] {
t.Fatalf("expected old leader %d != new leader %d", oldLeadID, newLeadIDs[0])
}
}

25
vendor/github.com/coreos/etcd/integration/doc.go generated vendored Normal file
View file

@ -0,0 +1,25 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package integration implements tests built upon embedded etcd, and focus on
etcd correctness.
Features/goals of the integration tests:
1. test the whole code base except command-line parsing.
2. check internal data, including raft, store and etc.
3. based on goroutines, which is faster than process.
4. mainly tests user behavior and user-facing API.
*/
package integration

111
vendor/github.com/coreos/etcd/integration/embed_test.go generated vendored Normal file
View file

@ -0,0 +1,111 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"testing"
"github.com/coreos/etcd/embed"
)
func TestEmbedEtcd(t *testing.T) {
tests := []struct {
cfg embed.Config
werr string
wpeers int
wclients int
}{
{werr: "multiple discovery"},
{werr: "advertise-client-urls is required"},
{werr: "should be at least"},
{werr: "is too long"},
{wpeers: 1, wclients: 1},
{wpeers: 2, wclients: 1},
{wpeers: 1, wclients: 2},
}
urls := newEmbedURLs(10)
// setup defaults
for i := range tests {
tests[i].cfg = *embed.NewConfig()
}
tests[0].cfg.Durl = "abc"
setupEmbedCfg(&tests[1].cfg, []url.URL{urls[0]}, []url.URL{urls[1]})
tests[1].cfg.ACUrls = nil
tests[2].cfg.TickMs = tests[2].cfg.ElectionMs - 1
tests[3].cfg.ElectionMs = 999999
setupEmbedCfg(&tests[4].cfg, []url.URL{urls[2]}, []url.URL{urls[3]})
setupEmbedCfg(&tests[5].cfg, []url.URL{urls[4]}, []url.URL{urls[5], urls[6]})
setupEmbedCfg(&tests[6].cfg, []url.URL{urls[7], urls[8]}, []url.URL{urls[9]})
dir := filepath.Join(os.TempDir(), fmt.Sprintf("embed-etcd"))
os.RemoveAll(dir)
defer os.RemoveAll(dir)
for i, tt := range tests {
tests[i].cfg.Dir = dir
e, err := embed.StartEtcd(&tests[i].cfg)
if e != nil {
<-e.Server.ReadyNotify() // wait for e.Server to join the cluster
}
if tt.werr != "" {
if err == nil || !strings.Contains(err.Error(), tt.werr) {
t.Errorf("%d: expected error with %q, got %v", i, tt.werr, err)
}
if e != nil {
e.Close()
}
continue
}
if err != nil {
t.Errorf("%d: expected success, got error %v", i, err)
continue
}
if len(e.Peers) != tt.wpeers {
t.Errorf("%d: expected %d peers, got %d", i, tt.wpeers, len(e.Peers))
}
if len(e.Clients) != tt.wclients {
t.Errorf("%d: expected %d peers, got %d", i, tt.wclients, len(e.Clients))
}
e.Close()
}
}
func newEmbedURLs(n int) (urls []url.URL) {
for i := 0; i < n; i++ {
u, _ := url.Parse(fmt.Sprintf("unix://localhost:%d%06d", os.Getpid(), i))
urls = append(urls, *u)
}
return
}
func setupEmbedCfg(cfg *embed.Config, curls []url.URL, purls []url.URL) {
cfg.ClusterState = "new"
cfg.LCUrls, cfg.ACUrls = curls, curls
cfg.LPUrls, cfg.APUrls = purls, purls
cfg.InitialCluster = ""
for i := range purls {
cfg.InitialCluster += ",default=" + purls[i].String()
}
cfg.InitialCluster = cfg.InitialCluster[1:]
}

View file

@ -0,0 +1,23 @@
-----BEGIN CERTIFICATE-----
MIID2zCCAsOgAwIBAgIUZXdXtcOe421Geq9VjM35+SRJUS8wDQYJKoZIhvcNAQEL
BQAwdTEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMRAwDgYDVQQKEwdldGNkLWNhMQswCQYDVQQLEwJDQTEZ
MBcGA1UEAxMQQXV0b2dlbmVyYXRlZCBDQTAeFw0xNjA3MDUxOTQ1MDBaFw0yMTA3
MDQxOTQ1MDBaMHUxDDAKBgNVBAYTA1VTQTETMBEGA1UECBMKQ2FsaWZvcm5pYTEW
MBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEQMA4GA1UEChMHZXRjZC1jYTELMAkGA1UE
CxMCQ0ExGTAXBgNVBAMTEEF1dG9nZW5lcmF0ZWQgQ0EwggEiMA0GCSqGSIb3DQEB
AQUAA4IBDwAwggEKAoIBAQDBMoRjH0ULs+0cRZWZ8BGJ7Fmf152J9uUE3/NgYV3M
4Ntu6l3IYALXT5QSHQZIz5425HP6827mwAOZ/bk6E3yzq6XR/vHzxPFLzBMzFuq/
elQA4nb7eYHICriEFUdJo2EUg3lSD3m6Deof/NjPMgUHtuvhn1OJMezaALZiMZ0K
9B9/1ktW4Roi6FMVFfJM5rKr9EIz6P2mFUpVHI7KSGbeuHiTPq0FLVv7wFPxRFX5
Ygd/nF6bbSsE2LAx/JdY1j0LQi0WUcA/HaWYVOpFSKohO6FmshP5bX0o//wWSkg2
8CSbtqvSxRF/Ril7raZlX713AAZVn8+B83tpjFqOLH+7AgMBAAGjYzBhMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSlyMYprKNDkzyP
gGA5cYnEEe9Y8DAfBgNVHSMEGDAWgBSlyMYprKNDkzyPgGA5cYnEEe9Y8DANBgkq
hkiG9w0BAQsFAAOCAQEAjjZkuoBl6meveg1frQuUhWtgtN/g9JqIjhEQ7tr4H46/
cHz3ngCuJh/GKSt7MTqafP99kqtm1GBs7BcoFKwsNFxNOo/a2MV2oYe2T5ol5U6/
RnmPv7yXzV1WlSC2IxFdtKEIfM859TFrWFN+NyH7yyYzjx+CzFdu6SHMwrQkETKr
R/PJrb0pV+gbeFpe/VfVyT7tFSxRTkSqwvMFNjQmbSLSiIFDNdZmPBmnWk418zoP
lkUESi3OQc4Eh/yQuldDXKl7L8+Ar8DddAu4nsni9EAJWi1u5wPPaLd+3s5USr1f
zFC3tb8o+WfNf+VSxWWPWyZXlcnB2glT+TWW40Ng1w==
-----END CERTIFICATE-----

View file

@ -0,0 +1,24 @@
-----BEGIN CERTIFICATE-----
MIID9TCCAt2gAwIBAgIUXtrXPwZLfKUJiGr6ClP3lqhOuKUwDQYJKoZIhvcNAQEL
BQAwdTEMMAoGA1UEBhMDVVNBMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH
Ew1TYW4gRnJhbmNpc2NvMRAwDgYDVQQKEwdldGNkLWNhMQswCQYDVQQLEwJDQTEZ
MBcGA1UEAxMQQXV0b2dlbmVyYXRlZCBDQTAeFw0xNjA3MDUxOTQ1MDBaFw0xNzA3
MDUxOTQ1MDBaMFUxFTATBgNVBAcTDHRoZSBpbnRlcm5ldDEWMBQGA1UEChMNYXV0
b2dlbmVyYXRlZDEVMBMGA1UECxMMZXRjZCBjbHVzdGVyMQ0wCwYDVQQDEwRldGNk
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArGvOLPmy5i+1j4JitG4V
g99w125ncs2ImhqucmJe6YtSnKruaLdOx93X7lzC3k0umnvLgh643R4eYS5PvrDk
vw1dSYB7BHhveFPmmWd7m7n7bXtgbcdkCmUeTbSeqvptPgyMJOQfXzfOGbEHfu7U
0raulR6KtqAatofKpRZhZgzZQpVkhdd0UTsOwqCWdX3Qe0D1MS922kX99c4UlGyD
OTVL6tulvDBBYgHbGErFmhxdgwm4e6dFfdkPUeHczzUWnKo2sIGBvo4R/NwPIp6G
PnebrO0VWvcQfdSqjYk3BmILl8BVL5W1/EBRLtz9mZuQgc/VC62LvsgXusC9pwXC
3QIDAQABo4GcMIGZMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcD
AQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQULLrktzdBK6iLINQ7
hGRjQbMYXKowHwYDVR0jBBgwFoAUpcjGKayjQ5M8j4BgOXGJxBHvWPAwGgYDVR0R
BBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQCI2Tp4pMYk
LFLzGy4e/5pwpA4x/C2zl01Sv/eC79RA5Zz1NtSF/7LCfL+KPNpNkxzPyTxWOaX5
YMuAbD49ZBQYeEyNUxKcwWEpaVlmlIUj3b21fBXQ7Nw25Uea45bNhdZcdMUOTums
J1/BrA2eoEB0guTlh3E8iadbVmSf6elA9TbYLd7QTTgcb3XclYCwhV3eKdm3IEiX
g4q50iM6/LRz1E5C3LlQ0aNqpGroBv/9ahLVfLr06ziSRcecLJ4485MtJOxP4guA
1tc6qPyw2MLmAlLZfOCHKLbK3KboZI8IANmrpNyL590D9bDl9nLnHmJuitBpIVp1
Hw0I8e4ZYhab
-----END CERTIFICATE-----

View file

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEArGvOLPmy5i+1j4JitG4Vg99w125ncs2ImhqucmJe6YtSnKru
aLdOx93X7lzC3k0umnvLgh643R4eYS5PvrDkvw1dSYB7BHhveFPmmWd7m7n7bXtg
bcdkCmUeTbSeqvptPgyMJOQfXzfOGbEHfu7U0raulR6KtqAatofKpRZhZgzZQpVk
hdd0UTsOwqCWdX3Qe0D1MS922kX99c4UlGyDOTVL6tulvDBBYgHbGErFmhxdgwm4
e6dFfdkPUeHczzUWnKo2sIGBvo4R/NwPIp6GPnebrO0VWvcQfdSqjYk3BmILl8BV
L5W1/EBRLtz9mZuQgc/VC62LvsgXusC9pwXC3QIDAQABAoIBAQCNseKz71hn9tk8
YKiBIt6nix6OzHpTTDlwe3DVK6ZYQ1jWy1o10D773YIwryatzbv41Ld/7YN6o9/P
eWGrkm/J2k/Jsb5nBBqHRlwBwZtBdOv9IyEx1mSObl8i+MZUOI1CKsmZH6fwdkn3
rxY76EYaDGsYvQq93oFVc+7DEMtmMtr03xm2bleEvsUH0VVqLhiAof/PCgOzja/L
mPxhK0FqOmhk94JFo2l0XNMn/b2lpUhrx+xny5RD6/W/k2C1DuzBiFiNZkbPW1r1
n5QccJHpe/S3Y4WZ75yKyQdrcIz6AKSeHNNGw2mYERAOmejpVV+8OIvKY6pzyXi9
EM/BsLaBAoGBAN+XiqHHGilsrvjLKGak2KIaPRxA7EgFKKWBv8DojpXLqgkoloDL
1wS6uG4XE0FeJCiKZk/DpVgPSiKYkQJEFLgU8N3q8OO2cGYW8kfH/TuejWRebtgJ
GC7o5CqAHjFqRbTPJBLLNlSUZP08HVIRhob3t0zkvVRdDjA1rZIM/FlxAoGBAMVp
jTcimGEOhFbOvfLwFeMCFLglTzbxjSnxCLCKF5TbxcBN7iUE2wYRfexBLoP/3+rk
RheyRnMr4PeZ/JPQLHs80TUm9HGg8Phy+jAsIW/rF8BJ4aAExt2T4uLNsj4TXw1y
ckDMBLmZi0OFy4vDtwg4T2wVo55eN/oQfVNFFaotAoGAGLQ8q/08pcENYA3KS/UA
voBZqip+MMLpJ8g7MIxBXMmg4twqLNbYzfv3bqp8BSfqpNQN09hRB3bBASuMMgzl
oSUnK83OicpZht4YLNgq4ZB2HNXWN2Zh1qUCuLNpIpqUUxLj8HOlcBjpQ5WFw9CN
5ZGvHf7T8GNLswXrRIzMwPECgYAC5Q5WDaLQYYcdQsDUTCL2BjTJknp74sTgJZGs
DQpVe3eF316rmkuf5ifDjB0jgGAHMLu6YznXPIB7AP4MKNROJlEnB2A0PljqO71h
cXQ4EOlzP2IYl5lW7HE6RCvl7yDIsLHuM0+qbQ72uYKHlSIc875uZk7U5qrJdu5v
hybPLQKBgAmswE0nM9Fnj4ue9QaDgOvp1p7peZuzywBI4+TTJ/3++5vtUrgRl9Ak
UVzSVvltxhFpFtNfVrZxckDwb6louumRtBrLVWJDlAakvc5eG5tky+SA2u/bdXSr
8tq8c24K19Pg+OLkdZpiJqmKyyV0dVn6NNmiBmiLe2tClNsUHI47
-----END RSA PRIVATE KEY-----

View file

@ -0,0 +1,21 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import "github.com/coreos/pkg/capnslog"
func init() {
capnslog.SetGlobalLogLevel(capnslog.CRITICAL)
}

20
vendor/github.com/coreos/etcd/integration/main_test.go generated vendored Normal file
View file

@ -0,0 +1,20 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package integration
import (
"os"
"testing"
"github.com/coreos/etcd/pkg/testutil"
)
func TestMain(m *testing.M) {
v := m.Run()
if v == 0 && testutil.CheckLeakedGoroutine() {
os.Exit(1)
}
os.Exit(v)
}

View file

@ -0,0 +1,125 @@
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"fmt"
"io/ioutil"
"os"
"reflect"
"testing"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
)
func TestPauseMember(t *testing.T) {
defer testutil.AfterTest(t)
c := NewCluster(t, 5)
c.Launch(t)
defer c.Terminate(t)
for i := 0; i < 5; i++ {
c.Members[i].Pause()
membs := append([]*member{}, c.Members[:i]...)
membs = append(membs, c.Members[i+1:]...)
c.waitLeader(t, membs)
clusterMustProgress(t, membs)
c.Members[i].Resume()
}
c.waitLeader(t, c.Members)
clusterMustProgress(t, c.Members)
}
func TestRestartMember(t *testing.T) {
defer testutil.AfterTest(t)
c := NewCluster(t, 3)
c.Launch(t)
defer c.Terminate(t)
for i := 0; i < 3; i++ {
c.Members[i].Stop(t)
membs := append([]*member{}, c.Members[:i]...)
membs = append(membs, c.Members[i+1:]...)
c.waitLeader(t, membs)
clusterMustProgress(t, membs)
err := c.Members[i].Restart(t)
if err != nil {
t.Fatal(err)
}
}
clusterMustProgress(t, c.Members)
}
func TestLaunchDuplicateMemberShouldFail(t *testing.T) {
size := 3
c := NewCluster(t, size)
m := c.Members[0].Clone(t)
var err error
m.DataDir, err = ioutil.TempDir(os.TempDir(), "etcd")
if err != nil {
t.Fatal(err)
}
c.Launch(t)
defer c.Terminate(t)
if err := m.Launch(); err == nil {
t.Errorf("unexpect successful launch")
}
}
func TestSnapshotAndRestartMember(t *testing.T) {
defer testutil.AfterTest(t)
m := mustNewMember(t, memberConfig{name: "snapAndRestartTest"})
m.SnapCount = 100
m.Launch()
defer m.Terminate(t)
m.WaitOK(t)
resps := make([]*client.Response, 120)
var err error
for i := 0; i < 120; i++ {
cc := MustNewHTTPClient(t, []string{m.URL()}, nil)
kapi := client.NewKeysAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
key := fmt.Sprintf("foo%d", i)
resps[i], err = kapi.Create(ctx, "/"+key, "bar")
if err != nil {
t.Fatalf("#%d: create on %s error: %v", i, m.URL(), err)
}
cancel()
}
m.Stop(t)
m.Restart(t)
m.WaitOK(t)
for i := 0; i < 120; i++ {
cc := MustNewHTTPClient(t, []string{m.URL()}, nil)
kapi := client.NewKeysAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
key := fmt.Sprintf("foo%d", i)
resp, err := kapi.Get(ctx, "/"+key, nil)
if err != nil {
t.Fatalf("#%d: get on %s error: %v", i, m.URL(), err)
}
cancel()
if !reflect.DeepEqual(resp.Node, resps[i].Node) {
t.Errorf("#%d: node = %v, want %v", i, resp.Node, resps[i].Node)
}
}
}

View file

@ -0,0 +1,144 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"testing"
"time"
"github.com/coreos/etcd/pkg/testutil"
)
func TestNetworkPartition5MembersLeaderInMinority(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 5})
defer clus.Terminate(t)
leadIndex := clus.WaitLeader(t)
// minority: leader, follower / majority: follower, follower, follower
minority := []int{leadIndex, (leadIndex + 1) % 5}
majority := []int{(leadIndex + 2) % 5, (leadIndex + 3) % 5, (leadIndex + 4) % 5}
minorityMembers := getMembersByIndexSlice(clus.cluster, minority)
majorityMembers := getMembersByIndexSlice(clus.cluster, majority)
// network partition (bi-directional)
injectPartition(t, minorityMembers, majorityMembers)
// minority leader must be lost
clus.waitNoLeader(t, minorityMembers)
// wait extra election timeout
time.Sleep(2 * majorityMembers[0].electionTimeout())
// new leader must be from majority
clus.waitLeader(t, majorityMembers)
// recover network partition (bi-directional)
recoverPartition(t, minorityMembers, majorityMembers)
// write to majority first
clusterMustProgress(t, append(majorityMembers, minorityMembers...))
}
func TestNetworkPartition5MembersLeaderInMajority(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 5})
defer clus.Terminate(t)
leadIndex := clus.WaitLeader(t)
// majority: leader, follower, follower / minority: follower, follower
majority := []int{leadIndex, (leadIndex + 1) % 5, (leadIndex + 2) % 5}
minority := []int{(leadIndex + 3) % 5, (leadIndex + 4) % 5}
majorityMembers := getMembersByIndexSlice(clus.cluster, majority)
minorityMembers := getMembersByIndexSlice(clus.cluster, minority)
// network partition (bi-directional)
injectPartition(t, majorityMembers, minorityMembers)
// minority leader must be lost
clus.waitNoLeader(t, minorityMembers)
// wait extra election timeout
time.Sleep(2 * majorityMembers[0].electionTimeout())
// leader must be hold in majority
leadIndex2 := clus.waitLeader(t, majorityMembers)
leadID, leadID2 := clus.Members[leadIndex].s.ID(), majorityMembers[leadIndex2].s.ID()
if leadID != leadID2 {
t.Fatalf("unexpected leader change from %s, got %s", leadID, leadID2)
}
// recover network partition (bi-directional)
recoverPartition(t, majorityMembers, minorityMembers)
// write to majority first
clusterMustProgress(t, append(majorityMembers, minorityMembers...))
}
func TestNetworkPartition4Members(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 4})
defer clus.Terminate(t)
leadIndex := clus.WaitLeader(t)
// groupA: leader, follower / groupB: follower, follower
groupA := []int{leadIndex, (leadIndex + 1) % 4}
groupB := []int{(leadIndex + 2) % 4, (leadIndex + 3) % 4}
leaderPartition := getMembersByIndexSlice(clus.cluster, groupA)
followerPartition := getMembersByIndexSlice(clus.cluster, groupB)
// network partition (bi-directional)
injectPartition(t, leaderPartition, followerPartition)
// no group has quorum, so leader must be lost in all members
clus.WaitNoLeader(t)
// recover network partition (bi-directional)
recoverPartition(t, leaderPartition, followerPartition)
// need to wait since it recovered with no leader
clus.WaitLeader(t)
clusterMustProgress(t, clus.Members)
}
func getMembersByIndexSlice(clus *cluster, idxs []int) []*member {
ms := make([]*member, len(idxs))
for i, idx := range idxs {
ms[i] = clus.Members[idx]
}
return ms
}
func injectPartition(t *testing.T, src, others []*member) {
for _, m := range src {
m.InjectPartition(t, others)
}
}
func recoverPartition(t *testing.T, src, others []*member) {
for _, m := range src {
m.RecoverPartition(t, others)
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,57 @@
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"testing"
"time"
"golang.org/x/net/context"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/testutil"
)
// TestV3AuthEmptyUserGet ensures that a get with an empty user will return an empty user error.
func TestV3AuthEmptyUserGet(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
defer cancel()
api := toGRPC(clus.Client(0))
auth := api.Auth
if _, err := auth.UserAdd(ctx, &pb.AuthUserAddRequest{Name: "root", Password: "123"}); err != nil {
t.Fatal(err)
}
if _, err := auth.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: "root"}); err != nil {
t.Fatal(err)
}
if _, err := auth.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: "root", Role: "root"}); err != nil {
t.Fatal(err)
}
if _, err := auth.AuthEnable(ctx, &pb.AuthEnableRequest{}); err != nil {
t.Fatal(err)
}
_, err := api.KV.Range(ctx, &pb.RangeRequest{Key: []byte("abc")})
if !eqErrGRPC(err, rpctypes.ErrUserEmpty) {
t.Fatalf("got %v, expected %v", err, rpctypes.ErrUserEmpty)
}
}

View file

@ -0,0 +1,78 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"testing"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/contrib/recipes"
"github.com/coreos/etcd/pkg/testutil"
)
func TestBarrierSingleNode(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
testBarrier(t, 5, func() *clientv3.Client { return clus.clients[0] })
}
func TestBarrierMultiNode(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
testBarrier(t, 5, func() *clientv3.Client { return clus.RandClient() })
}
func testBarrier(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
b := recipe.NewBarrier(chooseClient(), "test-barrier")
if err := b.Hold(); err != nil {
t.Fatalf("could not hold barrier (%v)", err)
}
if err := b.Hold(); err == nil {
t.Fatalf("able to double-hold barrier")
}
donec := make(chan struct{})
for i := 0; i < waiters; i++ {
go func() {
br := recipe.NewBarrier(chooseClient(), "test-barrier")
if err := br.Wait(); err != nil {
t.Fatalf("could not wait on barrier (%v)", err)
}
donec <- struct{}{}
}()
}
select {
case <-donec:
t.Fatalf("barrier did not wait")
default:
}
if err := b.Release(); err != nil {
t.Fatalf("could not release barrier (%v)", err)
}
timerC := time.After(time.Duration(waiters*100) * time.Millisecond)
for i := 0; i < waiters; i++ {
select {
case <-timerC:
t.Fatalf("barrier timed out")
case <-donec:
}
}
}

View file

@ -0,0 +1,155 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"testing"
"time"
"github.com/coreos/etcd/clientv3/concurrency"
"github.com/coreos/etcd/contrib/recipes"
)
func TestDoubleBarrier(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
waiters := 10
session, err := concurrency.NewSession(clus.RandClient())
if err != nil {
t.Error(err)
}
defer session.Orphan()
b := recipe.NewDoubleBarrier(session, "test-barrier", waiters)
donec := make(chan struct{})
for i := 0; i < waiters-1; i++ {
go func() {
session, err := concurrency.NewSession(clus.RandClient())
if err != nil {
t.Error(err)
}
defer session.Orphan()
bb := recipe.NewDoubleBarrier(session, "test-barrier", waiters)
if err := bb.Enter(); err != nil {
t.Fatalf("could not enter on barrier (%v)", err)
}
donec <- struct{}{}
if err := bb.Leave(); err != nil {
t.Fatalf("could not leave on barrier (%v)", err)
}
donec <- struct{}{}
}()
}
time.Sleep(10 * time.Millisecond)
select {
case <-donec:
t.Fatalf("barrier did not enter-wait")
default:
}
if err := b.Enter(); err != nil {
t.Fatalf("could not enter last barrier (%v)", err)
}
timerC := time.After(time.Duration(waiters*100) * time.Millisecond)
for i := 0; i < waiters-1; i++ {
select {
case <-timerC:
t.Fatalf("barrier enter timed out")
case <-donec:
}
}
time.Sleep(10 * time.Millisecond)
select {
case <-donec:
t.Fatalf("barrier did not leave-wait")
default:
}
b.Leave()
timerC = time.After(time.Duration(waiters*100) * time.Millisecond)
for i := 0; i < waiters-1; i++ {
select {
case <-timerC:
t.Fatalf("barrier leave timed out")
case <-donec:
}
}
}
func TestDoubleBarrierFailover(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
waiters := 10
donec := make(chan struct{})
s0, err := concurrency.NewSession(clus.clients[0])
if err != nil {
t.Error(err)
}
defer s0.Orphan()
s1, err := concurrency.NewSession(clus.clients[0])
if err != nil {
t.Error(err)
}
defer s1.Orphan()
// sacrificial barrier holder; lease will be revoked
go func() {
b := recipe.NewDoubleBarrier(s0, "test-barrier", waiters)
if berr := b.Enter(); berr != nil {
t.Fatalf("could not enter on barrier (%v)", berr)
}
donec <- struct{}{}
}()
for i := 0; i < waiters-1; i++ {
go func() {
b := recipe.NewDoubleBarrier(s1, "test-barrier", waiters)
if berr := b.Enter(); berr != nil {
t.Fatalf("could not enter on barrier (%v)", berr)
}
donec <- struct{}{}
b.Leave()
donec <- struct{}{}
}()
}
// wait for barrier enter to unblock
for i := 0; i < waiters; i++ {
select {
case <-donec:
case <-time.After(10 * time.Second):
t.Fatalf("timed out waiting for enter, %d", i)
}
}
if err = s0.Close(); err != nil {
t.Fatal(err)
}
// join on rest of waiters
for i := 0; i < waiters-1; i++ {
select {
case <-donec:
case <-time.After(10 * time.Second):
t.Fatalf("timed out waiting for leave, %d", i)
}
}
}

View file

@ -0,0 +1,227 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"fmt"
"testing"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/concurrency"
"golang.org/x/net/context"
)
// TestElectionWait tests if followers can correctly wait for elections.
func TestElectionWait(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
leaders := 3
followers := 3
var clients []*clientv3.Client
newClient := makeMultiNodeClients(t, clus.cluster, &clients)
electedc := make(chan string)
nextc := []chan struct{}{}
// wait for all elections
donec := make(chan struct{})
for i := 0; i < followers; i++ {
nextc = append(nextc, make(chan struct{}))
go func(ch chan struct{}) {
for j := 0; j < leaders; j++ {
session, err := concurrency.NewSession(newClient())
if err != nil {
t.Error(err)
}
b := concurrency.NewElection(session, "test-election")
cctx, cancel := context.WithCancel(context.TODO())
defer cancel()
s, ok := <-b.Observe(cctx)
if !ok {
t.Fatalf("could not observe election; channel closed")
}
electedc <- string(s.Kvs[0].Value)
// wait for next election round
<-ch
session.Orphan()
}
donec <- struct{}{}
}(nextc[i])
}
// elect some leaders
for i := 0; i < leaders; i++ {
go func() {
session, err := concurrency.NewSession(newClient())
if err != nil {
t.Error(err)
}
defer session.Orphan()
e := concurrency.NewElection(session, "test-election")
ev := fmt.Sprintf("electval-%v", time.Now().UnixNano())
if err := e.Campaign(context.TODO(), ev); err != nil {
t.Fatalf("failed volunteer (%v)", err)
}
// wait for followers to accept leadership
for j := 0; j < followers; j++ {
s := <-electedc
if s != ev {
t.Errorf("wrong election value got %s, wanted %s", s, ev)
}
}
// let next leader take over
if err := e.Resign(context.TODO()); err != nil {
t.Fatalf("failed resign (%v)", err)
}
// tell followers to start listening for next leader
for j := 0; j < followers; j++ {
nextc[j] <- struct{}{}
}
}()
}
// wait on followers
for i := 0; i < followers; i++ {
<-donec
}
closeClients(t, clients)
}
// TestElectionFailover tests that an election will
func TestElectionFailover(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
cctx, cancel := context.WithCancel(context.TODO())
defer cancel()
ss := make([]*concurrency.Session, 3, 3)
for i := 0; i < 3; i++ {
var err error
ss[i], err = concurrency.NewSession(clus.clients[i])
if err != nil {
t.Error(err)
}
defer ss[i].Orphan()
}
// first leader (elected)
e := concurrency.NewElection(ss[0], "test-election")
if err := e.Campaign(context.TODO(), "foo"); err != nil {
t.Fatalf("failed volunteer (%v)", err)
}
// check first leader
resp, ok := <-e.Observe(cctx)
if !ok {
t.Fatalf("could not wait for first election; channel closed")
}
s := string(resp.Kvs[0].Value)
if s != "foo" {
t.Fatalf("wrong election result. got %s, wanted foo", s)
}
// next leader
electedc := make(chan struct{})
go func() {
ee := concurrency.NewElection(ss[1], "test-election")
if eer := ee.Campaign(context.TODO(), "bar"); eer != nil {
t.Fatal(eer)
}
electedc <- struct{}{}
}()
// invoke leader failover
if err := ss[0].Close(); err != nil {
t.Fatal(err)
}
// check new leader
e = concurrency.NewElection(ss[2], "test-election")
resp, ok = <-e.Observe(cctx)
if !ok {
t.Fatalf("could not wait for second election; channel closed")
}
s = string(resp.Kvs[0].Value)
if s != "bar" {
t.Fatalf("wrong election result. got %s, wanted bar", s)
}
// leader must ack election (otherwise, Campaign may see closed conn)
<-electedc
}
// TestElectionSessionRelock ensures that campaigning twice on the same election
// with the same lock will Proclaim instead of deadlocking.
func TestElectionSessionRecampaign(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.RandClient()
session, err := concurrency.NewSession(cli)
if err != nil {
t.Error(err)
}
defer session.Orphan()
e := concurrency.NewElection(session, "test-elect")
if err := e.Campaign(context.TODO(), "abc"); err != nil {
t.Fatal(err)
}
e2 := concurrency.NewElection(session, "test-elect")
if err := e2.Campaign(context.TODO(), "def"); err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
if resp := <-e.Observe(ctx); len(resp.Kvs) == 0 || string(resp.Kvs[0].Value) != "def" {
t.Fatalf("expected value=%q, got response %v", "def", resp)
}
}
// TestElectionOnPrefixOfExistingKey checks that a single
// candidate can be elected on a new key that is a prefix
// of an existing key. To wit, check for regression
// of bug #6278. https://github.com/coreos/etcd/issues/6278
//
func TestElectionOnPrefixOfExistingKey(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.RandClient()
if _, err := cli.Put(context.TODO(), "testa", "value"); err != nil {
t.Fatal(err)
}
s, serr := concurrency.NewSession(cli)
if serr != nil {
t.Fatal(serr)
}
e := concurrency.NewElection(s, "test")
ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
err := e.Campaign(ctx, "abc")
cancel()
if err != nil {
// after 5 seconds, deadlock results in
// 'context deadline exceeded' here.
t.Fatal(err)
}
}

1196
vendor/github.com/coreos/etcd/integration/v3_grpc_test.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,774 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"fmt"
"testing"
"time"
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/mvcc/mvccpb"
"github.com/coreos/etcd/pkg/testutil"
)
// TestV3LeasePrmote ensures the newly elected leader can promote itself
// to the primary lessor, refresh the leases and start to manage leases.
// TODO: use customized clock to make this test go faster?
func TestV3LeasePrmote(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
// create lease
lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 5})
if err != nil {
t.Fatal(err)
}
if lresp.Error != "" {
t.Fatal(lresp.Error)
}
// wait until the lease is going to expire.
time.Sleep(time.Duration(lresp.TTL-1) * time.Second)
// kill the current leader, all leases should be refreshed.
toStop := clus.waitLeader(t, clus.Members)
clus.Members[toStop].Stop(t)
var toWait []*member
for i, m := range clus.Members {
if i != toStop {
toWait = append(toWait, m)
}
}
clus.waitLeader(t, toWait)
clus.Members[toStop].Restart(t)
clus.waitLeader(t, clus.Members)
// ensure lease is refreshed by waiting for a "long" time.
// it was going to expire anyway.
time.Sleep(3 * time.Second)
if !leaseExist(t, clus, lresp.ID) {
t.Error("unexpected lease not exists")
}
// let lease expires. total lease = 5 seconds and we already
// waits for 3 seconds, so 3 seconds more is enough.
time.Sleep(3 * time.Second)
if leaseExist(t, clus, lresp.ID) {
t.Error("unexpected lease exists")
}
}
// TestV3LeaseRevoke ensures a key is deleted once its lease is revoked.
func TestV3LeaseRevoke(t *testing.T) {
defer testutil.AfterTest(t)
testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
lc := toGRPC(clus.RandClient()).Lease
_, err := lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
return err
})
}
// TestV3LeaseGrantById ensures leases may be created by a given id.
func TestV3LeaseGrantByID(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
// create fixed lease
lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(
context.TODO(),
&pb.LeaseGrantRequest{ID: 1, TTL: 1})
if err != nil {
t.Errorf("could not create lease 1 (%v)", err)
}
if lresp.ID != 1 {
t.Errorf("got id %v, wanted id %v", lresp.ID, 1)
}
// create duplicate fixed lease
lresp, err = toGRPC(clus.RandClient()).Lease.LeaseGrant(
context.TODO(),
&pb.LeaseGrantRequest{ID: 1, TTL: 1})
if !eqErrGRPC(err, rpctypes.ErrGRPCLeaseExist) {
t.Error(err)
}
// create fresh fixed lease
lresp, err = toGRPC(clus.RandClient()).Lease.LeaseGrant(
context.TODO(),
&pb.LeaseGrantRequest{ID: 2, TTL: 1})
if err != nil {
t.Errorf("could not create lease 2 (%v)", err)
}
if lresp.ID != 2 {
t.Errorf("got id %v, wanted id %v", lresp.ID, 2)
}
}
// TestV3LeaseExpire ensures a key is deleted once a key expires.
func TestV3LeaseExpire(t *testing.T) {
defer testutil.AfterTest(t)
testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
// let lease lapse; wait for deleted key
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
wStream, err := toGRPC(clus.RandClient()).Watch.Watch(ctx)
if err != nil {
return err
}
wreq := &pb.WatchRequest{RequestUnion: &pb.WatchRequest_CreateRequest{
CreateRequest: &pb.WatchCreateRequest{
Key: []byte("foo"), StartRevision: 1}}}
if err := wStream.Send(wreq); err != nil {
return err
}
if _, err := wStream.Recv(); err != nil {
// the 'created' message
return err
}
if _, err := wStream.Recv(); err != nil {
// the 'put' message
return err
}
errc := make(chan error, 1)
go func() {
resp, err := wStream.Recv()
switch {
case err != nil:
errc <- err
case len(resp.Events) != 1:
fallthrough
case resp.Events[0].Type != mvccpb.DELETE:
errc <- fmt.Errorf("expected key delete, got %v", resp)
default:
errc <- nil
}
}()
select {
case <-time.After(15 * time.Second):
return fmt.Errorf("lease expiration too slow")
case err := <-errc:
return err
}
})
}
// TestV3LeaseKeepAlive ensures keepalive keeps the lease alive.
func TestV3LeaseKeepAlive(t *testing.T) {
defer testutil.AfterTest(t)
testLeaseRemoveLeasedKey(t, func(clus *ClusterV3, leaseID int64) error {
lc := toGRPC(clus.RandClient()).Lease
lreq := &pb.LeaseKeepAliveRequest{ID: leaseID}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
lac, err := lc.LeaseKeepAlive(ctx)
if err != nil {
return err
}
defer lac.CloseSend()
// renew long enough so lease would've expired otherwise
for i := 0; i < 3; i++ {
if err = lac.Send(lreq); err != nil {
return err
}
lresp, rxerr := lac.Recv()
if rxerr != nil {
return rxerr
}
if lresp.ID != leaseID {
return fmt.Errorf("expected lease ID %v, got %v", leaseID, lresp.ID)
}
time.Sleep(time.Duration(lresp.TTL/2) * time.Second)
}
_, err = lc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseID})
return err
})
}
// TestV3LeaseExists creates a lease on a random client and confirms it exists in the cluster.
func TestV3LeaseExists(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
// create lease
ctx0, cancel0 := context.WithCancel(context.Background())
defer cancel0()
lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(
ctx0,
&pb.LeaseGrantRequest{TTL: 30})
if err != nil {
t.Fatal(err)
}
if lresp.Error != "" {
t.Fatal(lresp.Error)
}
if !leaseExist(t, clus, lresp.ID) {
t.Error("unexpected lease not exists")
}
}
// TestV3LeaseRenewStress keeps creating lease and renewing it immediately to ensure the renewal goes through.
// it was oberserved that the immediate lease renewal after granting a lease from follower resulted lease not found.
// related issue https://github.com/coreos/etcd/issues/6978
func TestV3LeaseRenewStress(t *testing.T) {
testLeaseStress(t, stressLeaseRenew)
}
// TestV3LeaseTimeToLiveStress keeps creating lease and retriving it immediately to ensure the lease can be retrived.
// it was oberserved that the immediate lease retrival after granting a lease from follower resulted lease not found.
// related issue https://github.com/coreos/etcd/issues/6978
func TestV3LeaseTimeToLiveStress(t *testing.T) {
testLeaseStress(t, stressLeaseTimeToLive)
}
func testLeaseStress(t *testing.T, stresser func(context.Context, pb.LeaseClient) error) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
errc := make(chan error)
for i := 0; i < 30; i++ {
for j := 0; j < 3; j++ {
go func(i int) { errc <- stresser(ctx, toGRPC(clus.Client(i)).Lease) }(j)
}
}
for i := 0; i < 90; i++ {
if err := <-errc; err != nil {
t.Fatal(err)
}
}
}
func stressLeaseRenew(tctx context.Context, lc pb.LeaseClient) (reterr error) {
defer func() {
if tctx.Err() != nil {
reterr = nil
}
}()
lac, err := lc.LeaseKeepAlive(tctx)
if err != nil {
return err
}
for tctx.Err() == nil {
resp, gerr := lc.LeaseGrant(tctx, &pb.LeaseGrantRequest{TTL: 60})
if gerr != nil {
continue
}
err = lac.Send(&pb.LeaseKeepAliveRequest{ID: resp.ID})
if err != nil {
continue
}
rresp, rxerr := lac.Recv()
if rxerr != nil {
continue
}
if rresp.TTL == 0 {
return fmt.Errorf("TTL shouldn't be 0 so soon")
}
}
return nil
}
func stressLeaseTimeToLive(tctx context.Context, lc pb.LeaseClient) (reterr error) {
defer func() {
if tctx.Err() != nil {
reterr = nil
}
}()
for tctx.Err() == nil {
resp, gerr := lc.LeaseGrant(tctx, &pb.LeaseGrantRequest{TTL: 60})
if gerr != nil {
continue
}
_, kerr := lc.LeaseTimeToLive(tctx, &pb.LeaseTimeToLiveRequest{ID: resp.ID})
if rpctypes.Error(kerr) == rpctypes.ErrLeaseNotFound {
return kerr
}
}
return nil
}
func TestV3PutOnNonExistLease(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
badLeaseID := int64(0x12345678)
putr := &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: badLeaseID}
_, err := toGRPC(clus.RandClient()).KV.Put(ctx, putr)
if !eqErrGRPC(err, rpctypes.ErrGRPCLeaseNotFound) {
t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCLeaseNotFound)
}
}
// TestV3GetNonExistLease tests the case where the non exist lease is report as lease not found error using LeaseTimeToLive()
// A bug was found when a non leader etcd server returns nil instead of lease not found error which caues the server to crash.
// related issue https://github.com/coreos/etcd/issues/6537
func TestV3GetNonExistLease(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
leaseTTLr := &pb.LeaseTimeToLiveRequest{
ID: 123,
Keys: true,
}
for _, client := range clus.clients {
_, err := toGRPC(client).Lease.LeaseTimeToLive(ctx, leaseTTLr)
if !eqErrGRPC(err, rpctypes.ErrGRPCLeaseNotFound) {
t.Errorf("err = %v, want %v", err, rpctypes.ErrGRPCLeaseNotFound)
}
}
}
// TestV3LeaseSwitch tests a key can be switched from one lease to another.
func TestV3LeaseSwitch(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
key := "foo"
// create lease
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
lresp1, err1 := toGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30})
if err1 != nil {
t.Fatal(err1)
}
lresp2, err2 := toGRPC(clus.RandClient()).Lease.LeaseGrant(ctx, &pb.LeaseGrantRequest{TTL: 30})
if err2 != nil {
t.Fatal(err2)
}
// attach key on lease1 then switch it to lease2
put1 := &pb.PutRequest{Key: []byte(key), Lease: lresp1.ID}
_, err := toGRPC(clus.RandClient()).KV.Put(ctx, put1)
if err != nil {
t.Fatal(err)
}
put2 := &pb.PutRequest{Key: []byte(key), Lease: lresp2.ID}
_, err = toGRPC(clus.RandClient()).KV.Put(ctx, put2)
if err != nil {
t.Fatal(err)
}
// revoke lease1 should not remove key
_, err = toGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp1.ID})
if err != nil {
t.Fatal(err)
}
rreq := &pb.RangeRequest{Key: []byte("foo")}
rresp, err := toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
if err != nil {
t.Fatal(err)
}
if len(rresp.Kvs) != 1 {
t.Fatalf("unexpect removal of key")
}
// revoke lease2 should remove key
_, err = toGRPC(clus.RandClient()).Lease.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lresp2.ID})
if err != nil {
t.Fatal(err)
}
rresp, err = toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
if err != nil {
t.Fatal(err)
}
if len(rresp.Kvs) != 0 {
t.Fatalf("lease removed but key remains")
}
}
// TestV3LeaseFailover ensures the old leader drops lease keepalive requests within
// election timeout after it loses its quorum. And the new leader extends the TTL of
// the lease to at least TTL + election timeout.
func TestV3LeaseFailover(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
toIsolate := clus.waitLeader(t, clus.Members)
lc := toGRPC(clus.Client(toIsolate)).Lease
// create lease
lresp, err := lc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: 5})
if err != nil {
t.Fatal(err)
}
if lresp.Error != "" {
t.Fatal(lresp.Error)
}
// isolate the current leader with its followers.
clus.Members[toIsolate].Pause()
lreq := &pb.LeaseKeepAliveRequest{ID: lresp.ID}
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
mctx := metadata.NewContext(context.Background(), md)
ctx, cancel := context.WithCancel(mctx)
defer cancel()
lac, err := lc.LeaseKeepAlive(ctx)
if err != nil {
t.Fatal(err)
}
defer lac.CloseSend()
// send keep alive to old leader until the old leader starts
// to drop lease request.
var expectedExp time.Time
for {
if err = lac.Send(lreq); err != nil {
break
}
lkresp, rxerr := lac.Recv()
if rxerr != nil {
break
}
expectedExp = time.Now().Add(time.Duration(lkresp.TTL) * time.Second)
time.Sleep(time.Duration(lkresp.TTL/2) * time.Second)
}
clus.Members[toIsolate].Resume()
clus.waitLeader(t, clus.Members)
// lease should not expire at the last received expire deadline.
time.Sleep(expectedExp.Sub(time.Now()) - 500*time.Millisecond)
if !leaseExist(t, clus, lresp.ID) {
t.Error("unexpected lease not exists")
}
}
const fiveMinTTL int64 = 300
// TestV3LeaseRecoverAndRevoke ensures that revoking a lease after restart deletes the attached key.
func TestV3LeaseRecoverAndRevoke(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.Client(0)).KV
lsc := toGRPC(clus.Client(0)).Lease
lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL})
if err != nil {
t.Fatal(err)
}
if lresp.Error != "" {
t.Fatal(lresp.Error)
}
_, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID})
if err != nil {
t.Fatal(err)
}
// restart server and ensure lease still exists
clus.Members[0].Stop(t)
clus.Members[0].Restart(t)
clus.waitLeader(t, clus.Members)
// overwrite old client with newly dialed connection
// otherwise, error with "grpc: RPC failed fast due to transport failure"
nc, err := NewClientV3(clus.Members[0])
if err != nil {
t.Fatal(err)
}
kvc = toGRPC(nc).KV
lsc = toGRPC(nc).Lease
defer nc.Close()
// revoke should delete the key
_, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID})
if err != nil {
t.Fatal(err)
}
rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")})
if err != nil {
t.Fatal(err)
}
if len(rresp.Kvs) != 0 {
t.Fatalf("lease removed but key remains")
}
}
// TestV3LeaseRevokeAndRecover ensures that revoked key stays deleted after restart.
func TestV3LeaseRevokeAndRecover(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.Client(0)).KV
lsc := toGRPC(clus.Client(0)).Lease
lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL})
if err != nil {
t.Fatal(err)
}
if lresp.Error != "" {
t.Fatal(lresp.Error)
}
_, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID})
if err != nil {
t.Fatal(err)
}
// revoke should delete the key
_, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID})
if err != nil {
t.Fatal(err)
}
// restart server and ensure revoked key doesn't exist
clus.Members[0].Stop(t)
clus.Members[0].Restart(t)
clus.waitLeader(t, clus.Members)
// overwrite old client with newly dialed connection
// otherwise, error with "grpc: RPC failed fast due to transport failure"
nc, err := NewClientV3(clus.Members[0])
if err != nil {
t.Fatal(err)
}
kvc = toGRPC(nc).KV
defer nc.Close()
rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")})
if err != nil {
t.Fatal(err)
}
if len(rresp.Kvs) != 0 {
t.Fatalf("lease removed but key remains")
}
}
// TestV3LeaseRecoverKeyWithDetachedLease ensures that revoking a detached lease after restart
// does not delete the key.
func TestV3LeaseRecoverKeyWithDetachedLease(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.Client(0)).KV
lsc := toGRPC(clus.Client(0)).Lease
lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL})
if err != nil {
t.Fatal(err)
}
if lresp.Error != "" {
t.Fatal(lresp.Error)
}
_, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID})
if err != nil {
t.Fatal(err)
}
// overwrite lease with none
_, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")})
if err != nil {
t.Fatal(err)
}
// restart server and ensure lease still exists
clus.Members[0].Stop(t)
clus.Members[0].Restart(t)
clus.waitLeader(t, clus.Members)
// overwrite old client with newly dialed connection
// otherwise, error with "grpc: RPC failed fast due to transport failure"
nc, err := NewClientV3(clus.Members[0])
if err != nil {
t.Fatal(err)
}
kvc = toGRPC(nc).KV
lsc = toGRPC(nc).Lease
defer nc.Close()
// revoke the detached lease
_, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: lresp.ID})
if err != nil {
t.Fatal(err)
}
rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")})
if err != nil {
t.Fatal(err)
}
if len(rresp.Kvs) != 1 {
t.Fatalf("only detached lease removed, key should remain")
}
}
func TestV3LeaseRecoverKeyWithMutipleLease(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
kvc := toGRPC(clus.Client(0)).KV
lsc := toGRPC(clus.Client(0)).Lease
var leaseIDs []int64
for i := 0; i < 2; i++ {
lresp, err := lsc.LeaseGrant(context.TODO(), &pb.LeaseGrantRequest{TTL: fiveMinTTL})
if err != nil {
t.Fatal(err)
}
if lresp.Error != "" {
t.Fatal(lresp.Error)
}
leaseIDs = append(leaseIDs, lresp.ID)
_, err = kvc.Put(context.TODO(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar"), Lease: lresp.ID})
if err != nil {
t.Fatal(err)
}
}
// restart server and ensure lease still exists
clus.Members[0].Stop(t)
clus.Members[0].Restart(t)
clus.waitLeader(t, clus.Members)
for i, leaseID := range leaseIDs {
if !leaseExist(t, clus, leaseID) {
t.Errorf("#%d: unexpected lease not exists", i)
}
}
// overwrite old client with newly dialed connection
// otherwise, error with "grpc: RPC failed fast due to transport failure"
nc, err := NewClientV3(clus.Members[0])
if err != nil {
t.Fatal(err)
}
kvc = toGRPC(nc).KV
lsc = toGRPC(nc).Lease
defer nc.Close()
// revoke the old lease
_, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseIDs[0]})
if err != nil {
t.Fatal(err)
}
// key should still exist
rresp, err := kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")})
if err != nil {
t.Fatal(err)
}
if len(rresp.Kvs) != 1 {
t.Fatalf("only detached lease removed, key should remain")
}
// revoke the latest lease
_, err = lsc.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: leaseIDs[1]})
if err != nil {
t.Fatal(err)
}
rresp, err = kvc.Range(context.TODO(), &pb.RangeRequest{Key: []byte("foo")})
if err != nil {
t.Fatal(err)
}
if len(rresp.Kvs) != 0 {
t.Fatalf("lease removed but key remains")
}
}
// acquireLeaseAndKey creates a new lease and creates an attached key.
func acquireLeaseAndKey(clus *ClusterV3, key string) (int64, error) {
// create lease
lresp, err := toGRPC(clus.RandClient()).Lease.LeaseGrant(
context.TODO(),
&pb.LeaseGrantRequest{TTL: 1})
if err != nil {
return 0, err
}
if lresp.Error != "" {
return 0, fmt.Errorf(lresp.Error)
}
// attach to key
put := &pb.PutRequest{Key: []byte(key), Lease: lresp.ID}
if _, err := toGRPC(clus.RandClient()).KV.Put(context.TODO(), put); err != nil {
return 0, err
}
return lresp.ID, nil
}
// testLeaseRemoveLeasedKey performs some action while holding a lease with an
// attached key "foo", then confirms the key is gone.
func testLeaseRemoveLeasedKey(t *testing.T, act func(*ClusterV3, int64) error) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
leaseID, err := acquireLeaseAndKey(clus, "foo")
if err != nil {
t.Fatal(err)
}
if err = act(clus, leaseID); err != nil {
t.Fatal(err)
}
// confirm no key
rreq := &pb.RangeRequest{Key: []byte("foo")}
rresp, err := toGRPC(clus.RandClient()).KV.Range(context.TODO(), rreq)
if err != nil {
t.Fatal(err)
}
if len(rresp.Kvs) != 0 {
t.Fatalf("lease removed but key remains")
}
}
func leaseExist(t *testing.T, clus *ClusterV3, leaseID int64) bool {
l := toGRPC(clus.RandClient()).Lease
_, err := l.LeaseGrant(context.Background(), &pb.LeaseGrantRequest{ID: leaseID, TTL: 5})
if err == nil {
_, err = l.LeaseRevoke(context.Background(), &pb.LeaseRevokeRequest{ID: leaseID})
if err != nil {
t.Fatalf("failed to check lease %v", err)
}
return false
}
if eqErrGRPC(err, rpctypes.ErrGRPCLeaseExist) {
return true
}
t.Fatalf("unexpecter error %v", err)
return true
}

View file

@ -0,0 +1,210 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"math/rand"
"sync"
"testing"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/concurrency"
"github.com/coreos/etcd/contrib/recipes"
"golang.org/x/net/context"
)
func TestMutexSingleNode(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
var clients []*clientv3.Client
testMutex(t, 5, makeSingleNodeClients(t, clus.cluster, &clients))
closeClients(t, clients)
}
func TestMutexMultiNode(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
var clients []*clientv3.Client
testMutex(t, 5, makeMultiNodeClients(t, clus.cluster, &clients))
closeClients(t, clients)
}
func testMutex(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
// stream lock acquisitions
lockedC := make(chan *concurrency.Mutex)
for i := 0; i < waiters; i++ {
go func() {
session, err := concurrency.NewSession(chooseClient())
if err != nil {
t.Error(err)
}
m := concurrency.NewMutex(session, "test-mutex")
if err := m.Lock(context.TODO()); err != nil {
t.Fatalf("could not wait on lock (%v)", err)
}
lockedC <- m
}()
}
// unlock locked mutexes
timerC := time.After(time.Duration(waiters) * time.Second)
for i := 0; i < waiters; i++ {
select {
case <-timerC:
t.Fatalf("timed out waiting for lock %d", i)
case m := <-lockedC:
// lock acquired with m
select {
case <-lockedC:
t.Fatalf("lock %d followers did not wait", i)
default:
}
if err := m.Unlock(context.TODO()); err != nil {
t.Fatalf("could not release lock (%v)", err)
}
}
}
}
// TestMutexSessionRelock ensures that acquiring the same lock with the same
// session will not result in deadlock.
func TestMutexSessionRelock(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
session, err := concurrency.NewSession(clus.RandClient())
if err != nil {
t.Error(err)
}
m := concurrency.NewMutex(session, "test-mutex")
if err := m.Lock(context.TODO()); err != nil {
t.Fatal(err)
}
m2 := concurrency.NewMutex(session, "test-mutex")
if err := m2.Lock(context.TODO()); err != nil {
t.Fatal(err)
}
}
func BenchmarkMutex4Waiters(b *testing.B) {
// XXX switch tests to use TB interface
clus := NewClusterV3(nil, &ClusterConfig{Size: 3})
defer clus.Terminate(nil)
for i := 0; i < b.N; i++ {
testMutex(nil, 4, func() *clientv3.Client { return clus.RandClient() })
}
}
func TestRWMutexSingleNode(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
testRWMutex(t, 5, func() *clientv3.Client { return clus.clients[0] })
}
func TestRWMutexMultiNode(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
testRWMutex(t, 5, func() *clientv3.Client { return clus.RandClient() })
}
func testRWMutex(t *testing.T, waiters int, chooseClient func() *clientv3.Client) {
// stream rwlock acquistions
rlockedC := make(chan *recipe.RWMutex, 1)
wlockedC := make(chan *recipe.RWMutex, 1)
for i := 0; i < waiters; i++ {
go func() {
session, err := concurrency.NewSession(chooseClient())
if err != nil {
t.Error(err)
}
rwm := recipe.NewRWMutex(session, "test-rwmutex")
if rand.Intn(2) == 0 {
if err := rwm.RLock(); err != nil {
t.Fatalf("could not rlock (%v)", err)
}
rlockedC <- rwm
} else {
if err := rwm.Lock(); err != nil {
t.Fatalf("could not lock (%v)", err)
}
wlockedC <- rwm
}
}()
}
// unlock locked rwmutexes
timerC := time.After(time.Duration(waiters) * time.Second)
for i := 0; i < waiters; i++ {
select {
case <-timerC:
t.Fatalf("timed out waiting for lock %d", i)
case wl := <-wlockedC:
select {
case <-rlockedC:
t.Fatalf("rlock %d readers did not wait", i)
default:
}
if err := wl.Unlock(); err != nil {
t.Fatalf("could not release lock (%v)", err)
}
case rl := <-rlockedC:
select {
case <-wlockedC:
t.Fatalf("rlock %d writers did not wait", i)
default:
}
if err := rl.RUnlock(); err != nil {
t.Fatalf("could not release rlock (%v)", err)
}
}
}
}
func makeClients(t *testing.T, clients *[]*clientv3.Client, choose func() *member) func() *clientv3.Client {
var mu sync.Mutex
*clients = nil
return func() *clientv3.Client {
cli, err := NewClientV3(choose())
if err != nil {
t.Fatalf("cannot create client: %v", err)
}
mu.Lock()
*clients = append(*clients, cli)
mu.Unlock()
return cli
}
}
func makeSingleNodeClients(t *testing.T, clus *cluster, clients *[]*clientv3.Client) func() *clientv3.Client {
return makeClients(t, clients, func() *member {
return clus.Members[0]
})
}
func makeMultiNodeClients(t *testing.T, clus *cluster, clients *[]*clientv3.Client) func() *clientv3.Client {
return makeClients(t, clients, func() *member {
return clus.Members[rand.Intn(len(clus.Members))]
})
}
func closeClients(t *testing.T, clients []*clientv3.Client) {
for _, cli := range clients {
if err := cli.Close(); err != nil {
t.Fatal(err)
}
}
}

View file

@ -0,0 +1,49 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"testing"
"time"
"google.golang.org/grpc"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/testutil"
"golang.org/x/net/context"
)
// TestV3MaintenanceHashInflight ensures inflight Hash call
// to embedded being-stopped EtcdServer does not trigger panic.
func TestV3MaintenanceHashInflight(t *testing.T) {
defer testutil.AfterTest(t)
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
cli := clus.RandClient()
mvc := toGRPC(cli).Maintenance
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
donec := make(chan struct{})
go func() {
defer close(donec)
mvc.Hash(ctx, &pb.HashRequest{}, grpc.FailFast(false))
}()
clus.Members[0].s.HardStop()
cancel()
<-donec
}

View file

@ -0,0 +1,225 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"fmt"
"math/rand"
"sync/atomic"
"testing"
"github.com/coreos/etcd/contrib/recipes"
)
const (
manyQueueClients = 3
queueItemsPerClient = 2
)
// TestQueueOneReaderOneWriter confirms the queue is FIFO
func TestQueueOneReaderOneWriter(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
done := make(chan struct{})
go func() {
defer func() {
done <- struct{}{}
}()
etcdc := clus.RandClient()
q := recipe.NewQueue(etcdc, "testq")
for i := 0; i < 5; i++ {
if err := q.Enqueue(fmt.Sprintf("%d", i)); err != nil {
t.Fatalf("error enqueuing (%v)", err)
}
}
}()
etcdc := clus.RandClient()
q := recipe.NewQueue(etcdc, "testq")
for i := 0; i < 5; i++ {
s, err := q.Dequeue()
if err != nil {
t.Fatalf("error dequeueing (%v)", err)
}
if s != fmt.Sprintf("%d", i) {
t.Fatalf("expected dequeue value %v, got %v", s, i)
}
}
<-done
}
func TestQueueManyReaderOneWriter(t *testing.T) {
testQueueNReaderMWriter(t, manyQueueClients, 1)
}
func TestQueueOneReaderManyWriter(t *testing.T) {
testQueueNReaderMWriter(t, 1, manyQueueClients)
}
func TestQueueManyReaderManyWriter(t *testing.T) {
testQueueNReaderMWriter(t, manyQueueClients, manyQueueClients)
}
// BenchmarkQueue benchmarks Queues using many/many readers/writers
func BenchmarkQueue(b *testing.B) {
// XXX switch tests to use TB interface
clus := NewClusterV3(nil, &ClusterConfig{Size: 3})
defer clus.Terminate(nil)
for i := 0; i < b.N; i++ {
testQueueNReaderMWriter(nil, manyQueueClients, manyQueueClients)
}
}
// TestPrQueueOneReaderOneWriter tests whether priority queues respect priorities.
func TestPrQueueOneReaderOneWriter(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
// write out five items with random priority
etcdc := clus.RandClient()
q := recipe.NewPriorityQueue(etcdc, "testprq")
for i := 0; i < 5; i++ {
// [0, 2] priority for priority collision to test seq keys
pr := uint16(rand.Intn(3))
if err := q.Enqueue(fmt.Sprintf("%d", pr), pr); err != nil {
t.Fatalf("error enqueuing (%v)", err)
}
}
// read back items; confirm priority order is respected
lastPr := -1
for i := 0; i < 5; i++ {
s, err := q.Dequeue()
if err != nil {
t.Fatalf("error dequeueing (%v)", err)
}
curPr := 0
if _, err := fmt.Sscanf(s, "%d", &curPr); err != nil {
t.Fatalf(`error parsing item "%s" (%v)`, s, err)
}
if lastPr > curPr {
t.Fatalf("expected priority %v > %v", curPr, lastPr)
}
}
}
func TestPrQueueManyReaderManyWriter(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
rqs := newPriorityQueues(clus, manyQueueClients)
wqs := newPriorityQueues(clus, manyQueueClients)
testReadersWriters(t, rqs, wqs)
}
// BenchmarkQueue benchmarks Queues using n/n readers/writers
func BenchmarkPrQueueOneReaderOneWriter(b *testing.B) {
// XXX switch tests to use TB interface
clus := NewClusterV3(nil, &ClusterConfig{Size: 3})
defer clus.Terminate(nil)
rqs := newPriorityQueues(clus, 1)
wqs := newPriorityQueues(clus, 1)
for i := 0; i < b.N; i++ {
testReadersWriters(nil, rqs, wqs)
}
}
func testQueueNReaderMWriter(t *testing.T, n int, m int) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
testReadersWriters(t, newQueues(clus, n), newQueues(clus, m))
}
func newQueues(clus *ClusterV3, n int) (qs []testQueue) {
for i := 0; i < n; i++ {
etcdc := clus.RandClient()
qs = append(qs, recipe.NewQueue(etcdc, "q"))
}
return qs
}
func newPriorityQueues(clus *ClusterV3, n int) (qs []testQueue) {
for i := 0; i < n; i++ {
etcdc := clus.RandClient()
q := &flatPriorityQueue{recipe.NewPriorityQueue(etcdc, "prq")}
qs = append(qs, q)
}
return qs
}
func testReadersWriters(t *testing.T, rqs []testQueue, wqs []testQueue) {
rerrc := make(chan error)
werrc := make(chan error)
manyWriters(wqs, queueItemsPerClient, werrc)
manyReaders(rqs, len(wqs)*queueItemsPerClient, rerrc)
for range wqs {
if err := <-werrc; err != nil {
t.Errorf("error writing (%v)", err)
}
}
for range rqs {
if err := <-rerrc; err != nil {
t.Errorf("error reading (%v)", err)
}
}
}
func manyReaders(qs []testQueue, totalReads int, errc chan<- error) {
var rxReads int32
for _, q := range qs {
go func(q testQueue) {
for {
total := atomic.AddInt32(&rxReads, 1)
if int(total) > totalReads {
break
}
if _, err := q.Dequeue(); err != nil {
errc <- err
return
}
}
errc <- nil
}(q)
}
}
func manyWriters(qs []testQueue, writesEach int, errc chan<- error) {
for _, q := range qs {
go func(q testQueue) {
for j := 0; j < writesEach; j++ {
if err := q.Enqueue("foo"); err != nil {
errc <- err
return
}
}
errc <- nil
}(q)
}
}
type testQueue interface {
Enqueue(val string) error
Dequeue() (string, error)
}
type flatPriorityQueue struct{ *recipe.PriorityQueue }
func (q *flatPriorityQueue) Enqueue(val string) error {
// randomized to stress dequeuing logic; order isn't important
return q.PriorityQueue.Enqueue(val, uint16(rand.Intn(2)))
}
func (q *flatPriorityQueue) Dequeue() (string, error) {
return q.PriorityQueue.Dequeue()
}

View file

@ -0,0 +1,246 @@
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"fmt"
"math/rand"
"strconv"
"testing"
v3 "github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/concurrency"
"golang.org/x/net/context"
)
// TestSTMConflict tests that conflicts are retried.
func TestSTMConflict(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
etcdc := clus.RandClient()
keys := make([]string, 5)
for i := 0; i < len(keys); i++ {
keys[i] = fmt.Sprintf("foo-%d", i)
if _, err := etcdc.Put(context.TODO(), keys[i], "100"); err != nil {
t.Fatalf("could not make key (%v)", err)
}
}
errc := make(chan error)
for i := range keys {
curEtcdc := clus.RandClient()
srcKey := keys[i]
applyf := func(stm concurrency.STM) error {
src := stm.Get(srcKey)
// must be different key to avoid double-adding
dstKey := srcKey
for dstKey == srcKey {
dstKey = keys[rand.Intn(len(keys))]
}
dst := stm.Get(dstKey)
srcV, _ := strconv.ParseInt(src, 10, 64)
dstV, _ := strconv.ParseInt(dst, 10, 64)
if srcV == 0 {
// can't rand.Intn on 0, so skip this transaction
return nil
}
xfer := int64(rand.Intn(int(srcV)) / 2)
stm.Put(srcKey, fmt.Sprintf("%d", srcV-xfer))
stm.Put(dstKey, fmt.Sprintf("%d", dstV+xfer))
return nil
}
go func() {
_, err := concurrency.NewSTMRepeatable(context.TODO(), curEtcdc, applyf)
errc <- err
}()
}
// wait for txns
for range keys {
if err := <-errc; err != nil {
t.Fatalf("apply failed (%v)", err)
}
}
// ensure sum matches initial sum
sum := 0
for _, oldkey := range keys {
rk, err := etcdc.Get(context.TODO(), oldkey)
if err != nil {
t.Fatalf("couldn't fetch key %s (%v)", oldkey, err)
}
v, _ := strconv.ParseInt(string(rk.Kvs[0].Value), 10, 64)
sum += int(v)
}
if sum != len(keys)*100 {
t.Fatalf("bad sum. got %d, expected %d", sum, len(keys)*100)
}
}
// TestSTMPutNewKey confirms a STM put on a new key is visible after commit.
func TestSTMPutNewKey(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
etcdc := clus.RandClient()
applyf := func(stm concurrency.STM) error {
stm.Put("foo", "bar")
return nil
}
if _, err := concurrency.NewSTMRepeatable(context.TODO(), etcdc, applyf); err != nil {
t.Fatalf("error on stm txn (%v)", err)
}
resp, err := etcdc.Get(context.TODO(), "foo")
if err != nil {
t.Fatalf("error fetching key (%v)", err)
}
if string(resp.Kvs[0].Value) != "bar" {
t.Fatalf("bad value. got %+v, expected 'bar' value", resp)
}
}
// TestSTMAbort tests that an aborted txn does not modify any keys.
func TestSTMAbort(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
etcdc := clus.RandClient()
ctx, cancel := context.WithCancel(context.TODO())
applyf := func(stm concurrency.STM) error {
stm.Put("foo", "baz")
cancel()
stm.Put("foo", "bap")
return nil
}
if _, err := concurrency.NewSTMRepeatable(ctx, etcdc, applyf); err == nil {
t.Fatalf("no error on stm txn")
}
resp, err := etcdc.Get(context.TODO(), "foo")
if err != nil {
t.Fatalf("error fetching key (%v)", err)
}
if len(resp.Kvs) != 0 {
t.Fatalf("bad value. got %+v, expected nothing", resp)
}
}
// TestSTMSerialize tests that serialization is honored when serializable.
func TestSTMSerialize(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 3})
defer clus.Terminate(t)
etcdc := clus.RandClient()
// set up initial keys
keys := make([]string, 5)
for i := 0; i < len(keys); i++ {
keys[i] = fmt.Sprintf("foo-%d", i)
}
// update keys in full batches
updatec := make(chan struct{})
go func() {
defer close(updatec)
for i := 0; i < 5; i++ {
s := fmt.Sprintf("%d", i)
ops := []v3.Op{}
for _, k := range keys {
ops = append(ops, v3.OpPut(k, s))
}
if _, err := etcdc.Txn(context.TODO()).Then(ops...).Commit(); err != nil {
t.Fatalf("couldn't put keys (%v)", err)
}
updatec <- struct{}{}
}
}()
// read all keys in txn, make sure all values match
errc := make(chan error)
for range updatec {
curEtcdc := clus.RandClient()
applyf := func(stm concurrency.STM) error {
vs := []string{}
for i := range keys {
vs = append(vs, stm.Get(keys[i]))
}
for i := range vs {
if vs[0] != vs[i] {
return fmt.Errorf("got vs[%d] = %v, want %v", i, vs[i], vs[0])
}
}
return nil
}
go func() {
_, err := concurrency.NewSTMSerializable(context.TODO(), curEtcdc, applyf)
errc <- err
}()
}
for i := 0; i < 5; i++ {
if err := <-errc; err != nil {
t.Error(err)
}
}
}
// TestSTMApplyOnConcurrentDeletion ensures that concurrent key deletion
// fails the first GET revision comparison within STM; trigger retry.
func TestSTMApplyOnConcurrentDeletion(t *testing.T) {
clus := NewClusterV3(t, &ClusterConfig{Size: 1})
defer clus.Terminate(t)
etcdc := clus.RandClient()
if _, err := etcdc.Put(context.TODO(), "foo", "bar"); err != nil {
t.Fatal(err)
}
donec, readyc := make(chan struct{}), make(chan struct{})
go func() {
<-readyc
if _, err := etcdc.Delete(context.TODO(), "foo"); err != nil {
t.Fatal(err)
}
close(donec)
}()
try := 0
applyf := func(stm concurrency.STM) error {
try++
stm.Get("foo")
if try == 1 {
// trigger delete to make GET rev comparison outdated
close(readyc)
<-donec
}
stm.Put("foo2", "bar2")
return nil
}
if _, err := concurrency.NewSTMRepeatable(context.TODO(), etcdc, applyf); err != nil {
t.Fatalf("error on stm txn (%v)", err)
}
if try != 2 {
t.Fatalf("STM apply expected to run twice, got %d", try)
}
resp, err := etcdc.Get(context.TODO(), "foo2")
if err != nil {
t.Fatalf("error fetching key (%v)", err)
}
if string(resp.Kvs[0].Value) != "bar2" {
t.Fatalf("bad value. got %+v, expected 'bar2' value", resp)
}
}

File diff suppressed because it is too large Load diff