Skip to content

Commit

Permalink
Test that connections persist a reboot
Browse files Browse the repository at this point in the history
It will create a vlan interface with a policy and reboot the nodes
preventing k8s from starting up, the vlan connection has to be there
since is NM the one configured and it's persistent.

The teardown code puts back the k8s configuration and ensure that
k8s is at good shape.

Closes #143

Signed-off-by: Quique Llorente <[email protected]>
  • Loading branch information
qinqon committed Oct 22, 2019
1 parent 2bf7c02 commit 023b946
Show file tree
Hide file tree
Showing 4 changed files with 106 additions and 38 deletions.
36 changes: 0 additions & 36 deletions test/e2e/default_bridged_network_test.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package e2e

import (
"context"
"fmt"
"time"

Expand All @@ -10,11 +9,6 @@ import (

"github.com/tidwall/gjson"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"

framework "github.com/operator-framework/operator-sdk/pkg/test"

nmstatev1alpha1 "github.com/nmstate/kubernetes-nmstate/pkg/apis/nmstate/v1alpha1"
)

Expand Down Expand Up @@ -129,33 +123,3 @@ func defaultRouteNextHopInterface(node string) AsyncAssertion {
return gjson.ParseBytes(currentStateJSON(node)).Get(path).String()
}, 15*time.Second, 1*time.Second)
}

func nodeReadyConditionStatus(nodeName string) (corev1.ConditionStatus, error) {
key := types.NamespacedName{Name: nodeName}
node := corev1.Node{}
// We use a special context here to ensure that Client.Get does not
// get stuck and honor the Eventually timeout and interval values.
// It will return a timeout error in case of .Get takes more time than
// expected so Eventually will retry after expected interval value.
oneSecondTimeoutCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
err := framework.Global.Client.Get(oneSecondTimeoutCtx, key, &node)
if err != nil {
return "", err
}
for _, condition := range node.Status.Conditions {
if condition.Type == corev1.NodeReady {
return condition.Status, nil
}
}
return corev1.ConditionUnknown, nil
}

func waitForNodesReady() {
time.Sleep(5 * time.Second)
for _, node := range nodes {
EventuallyWithOffset(1, func() (corev1.ConditionStatus, error) {
return nodeReadyConditionStatus(node)
}, 5*time.Minute, 10*time.Second).Should(Equal(corev1.ConditionTrue))
}
}
69 changes: 69 additions & 0 deletions test/e2e/reboot_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
package e2e

import (
"strings"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)

var _ = Describe("Nmstate network connnections persistency", func() {
Context("after rebooting a node with applied configuration", func() {
var (
macByNode = map[string]string{}
)
BeforeEach(func() {
By("Apply policy")
setDesiredStateWithPolicy("create-linux-bridge", linuxBrUp(bridge1))

for _, node := range nodes {
interfacesNameForNodeEventually(node).Should(ContainElement(bridge1))
macByNode[node] = macAddress(node, bridge1)
Expect(macByNode[node]).To(Equal(macAddress(node, *firstSecondaryNic)))
}

By("Move kubernetes configuration to prevent start up")
_, errs := runAtNodes("sudo", "mv", "/etc/kubernetes", "/etc/kubernetes.bak")
Expect(errs).ToNot(ContainElement(HaveOccurred()))

By("Reboot the nodes")
runAtNodes("sudo", "reboot")

By("Wait for nodes to come back")
_, errs = runAtNodes("true")
Expect(errs).ToNot(ContainElement(HaveOccurred()))
})

AfterEach(func() {
By("Move kubernetes configuration back")
_, errs := runAtNodes("sudo", "mv", "/etc/kubernetes.bak", "/etc/kubernetes")
Expect(errs).ToNot(ContainElement(HaveOccurred()))

By("Wait for k8s to be ready")
waitForNodesReady()

setDesiredStateWithPolicy("delete-linux-bridge", linuxBrAbsent(bridge1))
for _, node := range nodes {
interfacesNameForNodeEventually(node).ShouldNot(ContainElement(bridge1))
}
deletePolicy("create-linux-bridge")
deletePolicy("delete-linux-bridge")
resetDesiredStateForNodes()
})
It("should have nmstate connections before kubelet starts", func() {

By("Check that kubelet is down")
Consistently(func() []error {
_, errs := runAtNodes("/usr/sbin/pidof", "kubelet")
return errs
}).ShouldNot(ContainElement(Succeed()))

By("Linux bridge interface is there with proper mac")
for _, node := range nodes {
output, err := runAtNode(node, "sudo", "ip", "link", "show", bridge1)
Expect(err).ToNot(HaveOccurred())
Expect(strings.ToLower(output)).To(ContainSubstring(macByNode[node]))
}
})
})
})
3 changes: 1 addition & 2 deletions test/e2e/states.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,7 @@ func linuxBrUp(bridgeName string) nmstatev1alpha1.State {
bridge:
port:
- name: %s
- name: %s
`, bridgeName, *firstSecondaryNic, *secondSecondaryNic))
`, bridgeName, *firstSecondaryNic))
}

func linuxBrAbsent(bridgeName string) nmstatev1alpha1.State {
Expand Down
36 changes: 36 additions & 0 deletions test/e2e/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -504,3 +504,39 @@ func ipv4Address(node string, name string) string {
path := fmt.Sprintf("interfaces.#(name==\"%s\").ipv4.address.0.ip", name)
return gjson.ParseBytes(currentStateJSON(node)).Get(path).String()
}

func macAddress(node string, name string) string {
path := fmt.Sprintf("interfaces.#(name==\"%s\").mac-address", name)
mac := gjson.ParseBytes(currentStateJSON(node)).Get(path).String()
return strings.ToLower(mac)
}

func nodeReadyConditionStatus(nodeName string) (corev1.ConditionStatus, error) {
key := types.NamespacedName{Name: nodeName}
node := corev1.Node{}
// We use a special context here to ensure that Client.Get does not
// get stuck and honor the Eventually timeout and interval values.
// It will return a timeout error in case of .Get takes more time than
// expected so Eventually will retry after expected interval value.
oneSecondTimeoutCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
err := framework.Global.Client.Get(oneSecondTimeoutCtx, key, &node)
if err != nil {
return "", err
}
for _, condition := range node.Status.Conditions {
if condition.Type == corev1.NodeReady {
return condition.Status, nil
}
}
return corev1.ConditionUnknown, nil
}

func waitForNodesReady() {
time.Sleep(5 * time.Second)
for _, node := range nodes {
EventuallyWithOffset(1, func() (corev1.ConditionStatus, error) {
return nodeReadyConditionStatus(node)
}, 5*time.Minute, 10*time.Second).Should(Equal(corev1.ConditionTrue))
}
}

0 comments on commit 023b946

Please sign in to comment.