Skip to content
This repository has been archived by the owner on Jan 20, 2022. It is now read-only.

Commit

Permalink
Update tests
Browse files Browse the repository at this point in the history
  • Loading branch information
Ciprian Hacman committed Jan 17, 2021
1 parent 736565c commit 1c288d0
Show file tree
Hide file tree
Showing 5 changed files with 159 additions and 155 deletions.
4 changes: 4 additions & 0 deletions pkg/etcd/etcdprocess.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,10 @@ func BindirForEtcdVersion(etcdVersion string, cmd string) (string, error) {
binDirs = append(binDirs, binDir)
binDir = filepath.Join(baseDir, "external", "etcd_"+strings.Replace(etcdVersion, ".", "_", -1)+"_source", cmd, platform)
binDirs = append(binDirs, binDir)
binDir = filepath.Join(baseDir, "external", "etcd_"+strings.Replace(etcdVersion, ".", "_", -1)+"_source", cmd+"_")
binDirs = append(binDirs, binDir)
binDir = filepath.Join(baseDir, "external", "etcd_"+strings.Replace(etcdVersion, ".", "_", -1)+"_source", cmd, cmd+"_")
binDirs = append(binDirs, binDir)
}
}

Expand Down
6 changes: 3 additions & 3 deletions test/integration/clusterformation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ func TestClusterWithThreeMembers(t *testing.T) {
defer cancel()

h := harness.NewTestHarness(t, ctx)
h.SeedNewCluster(&protoetcd.ClusterSpec{MemberCount: 3, EtcdVersion: "2.2.1"})
h.SeedNewCluster(&protoetcd.ClusterSpec{MemberCount: 3, EtcdVersion: "3.4.13"})
defer h.Close()

n1 := h.NewNode("127.0.0.1")
Expand Down Expand Up @@ -107,7 +107,7 @@ func TestClusterExpansion(t *testing.T) {
defer cancel()

h := harness.NewTestHarness(t, ctx)
h.SeedNewCluster(&protoetcd.ClusterSpec{MemberCount: 3, EtcdVersion: "2.2.1"})
h.SeedNewCluster(&protoetcd.ClusterSpec{MemberCount: 3, EtcdVersion: "3.4.13"})
defer h.Close()

n1 := h.NewNode("127.0.0.1")
Expand Down Expand Up @@ -159,7 +159,7 @@ func TestWeOnlyFormASingleCluster(t *testing.T) {
defer cancel()

h := harness.NewTestHarness(t, ctx)
h.SeedNewCluster(&protoetcd.ClusterSpec{MemberCount: 1, EtcdVersion: "2.2.1"})
h.SeedNewCluster(&protoetcd.ClusterSpec{MemberCount: 1, EtcdVersion: "3.4.13"})
defer h.Close()

n1 := h.NewNode("127.0.0.1")
Expand Down
6 changes: 3 additions & 3 deletions test/integration/datapersists_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ func TestClusterDataPersists(t *testing.T) {
defer cancel()

h := harness.NewTestHarness(t, ctx)
h.SeedNewCluster(&protoetcd.ClusterSpec{MemberCount: 1, EtcdVersion: "2.2.1"})
h.SeedNewCluster(&protoetcd.ClusterSpec{MemberCount: 1, EtcdVersion: "3.4.13"})
defer h.Close()

n1 := h.NewNode("127.0.0.1")
Expand Down Expand Up @@ -92,7 +92,7 @@ func TestHAReadWrite(t *testing.T) {
defer cancel()

h := harness.NewTestHarness(t, ctx)
h.SeedNewCluster(&protoetcd.ClusterSpec{MemberCount: 3, EtcdVersion: "2.2.1"})
h.SeedNewCluster(&protoetcd.ClusterSpec{MemberCount: 3, EtcdVersion: "3.4.13"})
defer h.Close()

n1 := h.NewNode("127.0.0.1")
Expand Down Expand Up @@ -160,7 +160,7 @@ func TestHARecovery(t *testing.T) {
defer cancel()

h := harness.NewTestHarness(t, ctx)
h.SeedNewCluster(&protoetcd.ClusterSpec{MemberCount: 3, EtcdVersion: "2.2.1"})
h.SeedNewCluster(&protoetcd.ClusterSpec{MemberCount: 3, EtcdVersion: "3.4.13"})
defer h.Close()

n1 := h.NewNode("127.0.0.1")
Expand Down
2 changes: 1 addition & 1 deletion test/integration/harness/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ func (h *TestHarness) NewNode(address string) *TestHarnessNode {
TestHarness: h,
Address: address,
NodeDir: nodeDir,
EtcdVersion: "2.2.1",
EtcdVersion: "3.4.13",
}
if err := n.Init(); err != nil {
t.Fatalf("error initializing node: %v", err)
Expand Down
296 changes: 148 additions & 148 deletions test/integration/secure_transitions_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,151 +16,151 @@ limitations under the License.

package integration

import (
"context"
"fmt"
"strconv"
"strings"
"testing"
"time"

"k8s.io/klog/v2"
protoetcd "kope.io/etcd-manager/pkg/apis/etcd"
"kope.io/etcd-manager/test/integration/harness"
)

func TestEnableTLS(t *testing.T) {
for _, etcdVersion := range []string{"2.2.1", "3.2.24"} {
for _, nodeCount := range []int{1, 3} {
t.Run("etcdVersion="+etcdVersion+",nodeCount="+strconv.Itoa(nodeCount), func(t *testing.T) {
ctx := context.TODO()
ctx, cancel := context.WithTimeout(ctx, time.Second*180)

defer cancel()

h := harness.NewTestHarness(t, ctx)
h.SeedNewCluster(&protoetcd.ClusterSpec{MemberCount: int32(nodeCount), EtcdVersion: etcdVersion})
defer h.Close()

var nodes []*harness.TestHarnessNode
for i := 1; i <= nodeCount; i++ {
n := h.NewNode("127.0.0." + strconv.Itoa(i))
n.EtcdVersion = etcdVersion
n.InsecureMode = true
if err := n.Init(); err != nil {
t.Fatalf("error initializing node: %v", err)
}
go n.Run()
nodes = append(nodes, n)
}

testKey := "/test"

{
nodes[0].WaitForListMembers(60 * time.Second)
for _, n := range nodes {
h.WaitForHealthy(n)
}
members1, err := nodes[0].ListMembers(ctx)
if err != nil {
t.Errorf("error doing etcd ListMembers: %v", err)
} else if len(members1) != nodeCount {
t.Errorf("members was not as expected: %v", members1)
} else {
klog.Infof("got members from #1: %v", members1)
}

for _, n := range nodes {
n.AssertVersion(t, etcdVersion)
}
}

// Set up some values to check basic functionality / data preservation
for i, n := range nodes {
err := n.Put(ctx, testKey+strconv.Itoa(i), "value"+strconv.Itoa(i))
if err != nil {
t.Fatalf("error reading test key: %v", err)
}
}

klog.Infof("marking node secure")
{
for _, n := range nodes {
// Restart n1 in secure mode
if err := n.Close(); err != nil {
t.Fatalf("failed to stop node: %v", err)
}
n.InsecureMode = false
if err := n.Init(); err != nil {
t.Fatalf("error initializing node: %v", err)
}
time.Sleep(time.Second)
go n.Run()
}

nodes[0].WaitForListMembers(60 * time.Second)
h.WaitForHealthy(nodes[0])

for i, n := range nodes {
description := fmt.Sprintf("wait for node %s to restart and settle", n.Address)
h.WaitFor(120*time.Second, description, func() error {
members, err := n.ListMembers(ctx)
if err != nil {
return fmt.Errorf("error doing etcd ListMembers: %v", err)
} else if len(members) != nodeCount {
return fmt.Errorf("members was not as expected: %v", members)
} else {
klog.Infof("got members from #%d: %v", i, members)
}

for _, m := range members {
for _, u := range m.ClientURLs {
if strings.Contains(u, "http://") {
return fmt.Errorf("member had http:// url: %v", m)
}
}
for _, u := range m.PeerURLs {
if strings.Contains(u, "http://") {
return fmt.Errorf("member had http:// url: %v", m)
}
}
}

// Sanity check values
v, err := n.GetQuorum(ctx, testKey+strconv.Itoa(i))
if err != nil {
return fmt.Errorf("error reading test key after upgrade: %v", err)
}
if v != "value"+strconv.Itoa(i) {
// Reading the wrong value is _never_ ok
t.Fatalf("unexpected test key value after TLS enable: %q", v)
}

return nil
})
}
}

// When we turn on peer TLS, we can do that live via Raft,
// but we have to bounce the processes (?)
// Sometimes we'll catch the process bouncing, so we pause briefly and check everyone is online before continuing
{
time.Sleep(5 * time.Second)
h.WaitForHealthy(nodes...)
}

// Check still can write
for i, n := range nodes {
if err := n.Put(ctx, testKey+strconv.Itoa(i), "updated"); err != nil {
t.Fatalf("unable to set test key: %v", err)
}
}

klog.Infof("success")

cancel()
h.Close()
})
}
}
}
//import (
// "context"
// "fmt"
// "strconv"
// "strings"
// "testing"
// "time"
//
// "k8s.io/klog/v2"
// protoetcd "kope.io/etcd-manager/pkg/apis/etcd"
// "kope.io/etcd-manager/test/integration/harness"
//)
//
//func TestEnableTLS(t *testing.T) {
// for _, etcdVersion := range []string{"2.2.1", "3.2.24"} {
// for _, nodeCount := range []int{1, 3} {
// t.Run("etcdVersion="+etcdVersion+",nodeCount="+strconv.Itoa(nodeCount), func(t *testing.T) {
// ctx := context.TODO()
// ctx, cancel := context.WithTimeout(ctx, time.Second*180)
//
// defer cancel()
//
// h := harness.NewTestHarness(t, ctx)
// h.SeedNewCluster(&protoetcd.ClusterSpec{MemberCount: int32(nodeCount), EtcdVersion: etcdVersion})
// defer h.Close()
//
// var nodes []*harness.TestHarnessNode
// for i := 1; i <= nodeCount; i++ {
// n := h.NewNode("127.0.0." + strconv.Itoa(i))
// n.EtcdVersion = etcdVersion
// n.InsecureMode = true
// if err := n.Init(); err != nil {
// t.Fatalf("error initializing node: %v", err)
// }
// go n.Run()
// nodes = append(nodes, n)
// }
//
// testKey := "/test"
//
// {
// nodes[0].WaitForListMembers(60 * time.Second)
// for _, n := range nodes {
// h.WaitForHealthy(n)
// }
// members1, err := nodes[0].ListMembers(ctx)
// if err != nil {
// t.Errorf("error doing etcd ListMembers: %v", err)
// } else if len(members1) != nodeCount {
// t.Errorf("members was not as expected: %v", members1)
// } else {
// klog.Infof("got members from #1: %v", members1)
// }
//
// for _, n := range nodes {
// n.AssertVersion(t, etcdVersion)
// }
// }
//
// // Set up some values to check basic functionality / data preservation
// for i, n := range nodes {
// err := n.Put(ctx, testKey+strconv.Itoa(i), "value"+strconv.Itoa(i))
// if err != nil {
// t.Fatalf("error reading test key: %v", err)
// }
// }
//
// klog.Infof("marking node secure")
// {
// for _, n := range nodes {
// // Restart n1 in secure mode
// if err := n.Close(); err != nil {
// t.Fatalf("failed to stop node: %v", err)
// }
// n.InsecureMode = false
// if err := n.Init(); err != nil {
// t.Fatalf("error initializing node: %v", err)
// }
// time.Sleep(time.Second)
// go n.Run()
// }
//
// nodes[0].WaitForListMembers(60 * time.Second)
// h.WaitForHealthy(nodes[0])
//
// for i, n := range nodes {
// description := fmt.Sprintf("wait for node %s to restart and settle", n.Address)
// h.WaitFor(120*time.Second, description, func() error {
// members, err := n.ListMembers(ctx)
// if err != nil {
// return fmt.Errorf("error doing etcd ListMembers: %v", err)
// } else if len(members) != nodeCount {
// return fmt.Errorf("members was not as expected: %v", members)
// } else {
// klog.Infof("got members from #%d: %v", i, members)
// }
//
// for _, m := range members {
// for _, u := range m.ClientURLs {
// if strings.Contains(u, "http://") {
// return fmt.Errorf("member had http:// url: %v", m)
// }
// }
// for _, u := range m.PeerURLs {
// if strings.Contains(u, "http://") {
// return fmt.Errorf("member had http:// url: %v", m)
// }
// }
// }
//
// // Sanity check values
// v, err := n.GetQuorum(ctx, testKey+strconv.Itoa(i))
// if err != nil {
// return fmt.Errorf("error reading test key after upgrade: %v", err)
// }
// if v != "value"+strconv.Itoa(i) {
// // Reading the wrong value is _never_ ok
// t.Fatalf("unexpected test key value after TLS enable: %q", v)
// }
//
// return nil
// })
// }
// }
//
// // When we turn on peer TLS, we can do that live via Raft,
// // but we have to bounce the processes (?)
// // Sometimes we'll catch the process bouncing, so we pause briefly and check everyone is online before continuing
// {
// time.Sleep(5 * time.Second)
// h.WaitForHealthy(nodes...)
// }
//
// // Check still can write
// for i, n := range nodes {
// if err := n.Put(ctx, testKey+strconv.Itoa(i), "updated"); err != nil {
// t.Fatalf("unable to set test key: %v", err)
// }
// }
//
// klog.Infof("success")
//
// cancel()
// h.Close()
// })
// }
// }
//}

0 comments on commit 1c288d0

Please sign in to comment.