From 023b94612dbc4106427930b6a7400c92187b6001 Mon Sep 17 00:00:00 2001 From: Quique Llorente Date: Thu, 12 Sep 2019 15:29:51 +0200 Subject: [PATCH] Test that connections persist a reboot It will create a vlan interface with a policy and reboot the nodes preventing k8s from starting up, the vlan connection has to be there since is NM the one configured and it's persistent. The teardown code puts back the k8s configuration and ensure that k8s is at good shape. Closes #143 Signed-off-by: Quique Llorente --- test/e2e/default_bridged_network_test.go | 36 ------------- test/e2e/reboot_test.go | 69 ++++++++++++++++++++++++ test/e2e/states.go | 3 +- test/e2e/utils.go | 36 +++++++++++++ 4 files changed, 106 insertions(+), 38 deletions(-) create mode 100644 test/e2e/reboot_test.go diff --git a/test/e2e/default_bridged_network_test.go b/test/e2e/default_bridged_network_test.go index 1b5bd1949f..6c8fd2d598 100644 --- a/test/e2e/default_bridged_network_test.go +++ b/test/e2e/default_bridged_network_test.go @@ -1,7 +1,6 @@ package e2e import ( - "context" "fmt" "time" @@ -10,11 +9,6 @@ import ( "github.com/tidwall/gjson" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - - framework "github.com/operator-framework/operator-sdk/pkg/test" - nmstatev1alpha1 "github.com/nmstate/kubernetes-nmstate/pkg/apis/nmstate/v1alpha1" ) @@ -129,33 +123,3 @@ func defaultRouteNextHopInterface(node string) AsyncAssertion { return gjson.ParseBytes(currentStateJSON(node)).Get(path).String() }, 15*time.Second, 1*time.Second) } - -func nodeReadyConditionStatus(nodeName string) (corev1.ConditionStatus, error) { - key := types.NamespacedName{Name: nodeName} - node := corev1.Node{} - // We use a special context here to ensure that Client.Get does not - // get stuck and honor the Eventually timeout and interval values. - // It will return a timeout error in case of .Get takes more time than - // expected so Eventually will retry after expected interval value. - oneSecondTimeoutCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - err := framework.Global.Client.Get(oneSecondTimeoutCtx, key, &node) - if err != nil { - return "", err - } - for _, condition := range node.Status.Conditions { - if condition.Type == corev1.NodeReady { - return condition.Status, nil - } - } - return corev1.ConditionUnknown, nil -} - -func waitForNodesReady() { - time.Sleep(5 * time.Second) - for _, node := range nodes { - EventuallyWithOffset(1, func() (corev1.ConditionStatus, error) { - return nodeReadyConditionStatus(node) - }, 5*time.Minute, 10*time.Second).Should(Equal(corev1.ConditionTrue)) - } -} diff --git a/test/e2e/reboot_test.go b/test/e2e/reboot_test.go new file mode 100644 index 0000000000..d86e350987 --- /dev/null +++ b/test/e2e/reboot_test.go @@ -0,0 +1,69 @@ +package e2e + +import ( + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Nmstate network connnections persistency", func() { + Context("after rebooting a node with applied configuration", func() { + var ( + macByNode = map[string]string{} + ) + BeforeEach(func() { + By("Apply policy") + setDesiredStateWithPolicy("create-linux-bridge", linuxBrUp(bridge1)) + + for _, node := range nodes { + interfacesNameForNodeEventually(node).Should(ContainElement(bridge1)) + macByNode[node] = macAddress(node, bridge1) + Expect(macByNode[node]).To(Equal(macAddress(node, *firstSecondaryNic))) + } + + By("Move kubernetes configuration to prevent start up") + _, errs := runAtNodes("sudo", "mv", "/etc/kubernetes", "/etc/kubernetes.bak") + Expect(errs).ToNot(ContainElement(HaveOccurred())) + + By("Reboot the nodes") + runAtNodes("sudo", "reboot") + + By("Wait for nodes to come back") + _, errs = runAtNodes("true") + Expect(errs).ToNot(ContainElement(HaveOccurred())) + }) + + AfterEach(func() { + By("Move kubernetes configuration back") + _, errs := runAtNodes("sudo", "mv", "/etc/kubernetes.bak", "/etc/kubernetes") + Expect(errs).ToNot(ContainElement(HaveOccurred())) + + By("Wait for k8s to be ready") + waitForNodesReady() + + setDesiredStateWithPolicy("delete-linux-bridge", linuxBrAbsent(bridge1)) + for _, node := range nodes { + interfacesNameForNodeEventually(node).ShouldNot(ContainElement(bridge1)) + } + deletePolicy("create-linux-bridge") + deletePolicy("delete-linux-bridge") + resetDesiredStateForNodes() + }) + It("should have nmstate connections before kubelet starts", func() { + + By("Check that kubelet is down") + Consistently(func() []error { + _, errs := runAtNodes("/usr/sbin/pidof", "kubelet") + return errs + }).ShouldNot(ContainElement(Succeed())) + + By("Linux bridge interface is there with proper mac") + for _, node := range nodes { + output, err := runAtNode(node, "sudo", "ip", "link", "show", bridge1) + Expect(err).ToNot(HaveOccurred()) + Expect(strings.ToLower(output)).To(ContainSubstring(macByNode[node])) + } + }) + }) +}) diff --git a/test/e2e/states.go b/test/e2e/states.go index cbf61de7d6..fed8485087 100644 --- a/test/e2e/states.go +++ b/test/e2e/states.go @@ -22,8 +22,7 @@ func linuxBrUp(bridgeName string) nmstatev1alpha1.State { bridge: port: - name: %s - - name: %s -`, bridgeName, *firstSecondaryNic, *secondSecondaryNic)) +`, bridgeName, *firstSecondaryNic)) } func linuxBrAbsent(bridgeName string) nmstatev1alpha1.State { diff --git a/test/e2e/utils.go b/test/e2e/utils.go index 03ff37c883..5df68b2f06 100644 --- a/test/e2e/utils.go +++ b/test/e2e/utils.go @@ -504,3 +504,39 @@ func ipv4Address(node string, name string) string { path := fmt.Sprintf("interfaces.#(name==\"%s\").ipv4.address.0.ip", name) return gjson.ParseBytes(currentStateJSON(node)).Get(path).String() } + +func macAddress(node string, name string) string { + path := fmt.Sprintf("interfaces.#(name==\"%s\").mac-address", name) + mac := gjson.ParseBytes(currentStateJSON(node)).Get(path).String() + return strings.ToLower(mac) +} + +func nodeReadyConditionStatus(nodeName string) (corev1.ConditionStatus, error) { + key := types.NamespacedName{Name: nodeName} + node := corev1.Node{} + // We use a special context here to ensure that Client.Get does not + // get stuck and honor the Eventually timeout and interval values. + // It will return a timeout error in case of .Get takes more time than + // expected so Eventually will retry after expected interval value. + oneSecondTimeoutCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + err := framework.Global.Client.Get(oneSecondTimeoutCtx, key, &node) + if err != nil { + return "", err + } + for _, condition := range node.Status.Conditions { + if condition.Type == corev1.NodeReady { + return condition.Status, nil + } + } + return corev1.ConditionUnknown, nil +} + +func waitForNodesReady() { + time.Sleep(5 * time.Second) + for _, node := range nodes { + EventuallyWithOffset(1, func() (corev1.ConditionStatus, error) { + return nodeReadyConditionStatus(node) + }, 5*time.Minute, 10*time.Second).Should(Equal(corev1.ConditionTrue)) + } +}