diff --git a/pkg/apis/machineconfiguration.openshift.io/v1/types.go b/pkg/apis/machineconfiguration.openshift.io/v1/types.go index 6e2e1d3bb8..b403144bab 100644 --- a/pkg/apis/machineconfiguration.openshift.io/v1/types.go +++ b/pkg/apis/machineconfiguration.openshift.io/v1/types.go @@ -299,6 +299,9 @@ type MachineConfigPoolSpec struct { // MaxUnavailable specifies the percentage or constant number of machines that can be updating at any given time. // default is 1. MaxUnavailable *intstr.IntOrString `json:"maxUnavailable"` + + // The targeted MachineConfig object for the machine config pool. + Configuration MachineConfigPoolStatusConfiguration `json:"configuration"` } // MachineConfigPoolStatus is the status for MachineConfigPool resource. diff --git a/pkg/apis/machineconfiguration.openshift.io/v1/zz_generated.deepcopy.go b/pkg/apis/machineconfiguration.openshift.io/v1/zz_generated.deepcopy.go index aae25481d7..54a352f0b3 100644 --- a/pkg/apis/machineconfiguration.openshift.io/v1/zz_generated.deepcopy.go +++ b/pkg/apis/machineconfiguration.openshift.io/v1/zz_generated.deepcopy.go @@ -633,6 +633,7 @@ func (in *MachineConfigPoolSpec) DeepCopyInto(out *MachineConfigPoolSpec) { *out = new(intstr.IntOrString) **out = **in } + in.Configuration.DeepCopyInto(&out.Configuration) return } diff --git a/pkg/controller/container-runtime-config/container_runtime_config_controller_test.go b/pkg/controller/container-runtime-config/container_runtime_config_controller_test.go index cf0a343847..570013be31 100644 --- a/pkg/controller/container-runtime-config/container_runtime_config_controller_test.go +++ b/pkg/controller/container-runtime-config/container_runtime_config_controller_test.go @@ -118,6 +118,7 @@ func newMachineConfigPool(name string, labels map[string]string, selector *metav ObjectMeta: metav1.ObjectMeta{Name: name, Labels: labels, UID: types.UID(utilrand.String(5))}, Spec: mcfgv1.MachineConfigPoolSpec{ MachineConfigSelector: selector, + Configuration: mcfgv1.MachineConfigPoolStatusConfiguration{ObjectReference: corev1.ObjectReference{Name: currentMachineConfig}}, }, Status: mcfgv1.MachineConfigPoolStatus{ Configuration: mcfgv1.MachineConfigPoolStatusConfiguration{ObjectReference: corev1.ObjectReference{Name: currentMachineConfig}}, diff --git a/pkg/controller/kubelet-config/kubelet_config_controller_test.go b/pkg/controller/kubelet-config/kubelet_config_controller_test.go index 310453b0f1..54f39bb6d7 100644 --- a/pkg/controller/kubelet-config/kubelet_config_controller_test.go +++ b/pkg/controller/kubelet-config/kubelet_config_controller_test.go @@ -128,6 +128,7 @@ func newMachineConfigPool(name string, labels map[string]string, selector *metav ObjectMeta: metav1.ObjectMeta{Name: name, Labels: labels, UID: types.UID(utilrand.String(5))}, Spec: mcfgv1.MachineConfigPoolSpec{ MachineConfigSelector: selector, + Configuration: mcfgv1.MachineConfigPoolStatusConfiguration{ObjectReference: corev1.ObjectReference{Name: currentMachineConfig}}, }, Status: mcfgv1.MachineConfigPoolStatus{ Configuration: mcfgv1.MachineConfigPoolStatusConfiguration{ObjectReference: corev1.ObjectReference{Name: currentMachineConfig}}, diff --git a/pkg/controller/node/node_controller.go b/pkg/controller/node/node_controller.go index 4a44d67d09..a25b2d074c 100644 --- a/pkg/controller/node/node_controller.go +++ b/pkg/controller/node/node_controller.go @@ -432,7 +432,7 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { return err } - if machineconfigpool.Status.Configuration.Name == "" { + if machineconfigpool.Spec.Configuration.Name == "" { // Previously we spammed the logs about empty pools. // Let's just pause for a bit here to let the renderer // initialize them. @@ -475,7 +475,7 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { candidates := getCandidateMachines(pool, nodes, maxunavail) for _, node := range candidates { - if err := ctrl.setDesiredMachineConfigAnnotation(node.Name, pool.Status.Configuration.Name); err != nil { + if err := ctrl.setDesiredMachineConfigAnnotation(node.Name, pool.Spec.Configuration.Name); err != nil { return err } } @@ -517,7 +517,7 @@ func (ctrl *Controller) setDesiredMachineConfigAnnotation(nodeName, currentConfi } func getCandidateMachines(pool *mcfgv1.MachineConfigPool, nodesInPool []*corev1.Node, maxUnavailable int) []*corev1.Node { - targetConfig := pool.Status.Configuration.Name + targetConfig := pool.Spec.Configuration.Name unavail := getUnavailableMachines(nodesInPool) // If we're at capacity, there's nothing to do. diff --git a/pkg/controller/node/node_controller_test.go b/pkg/controller/node/node_controller_test.go index 0a77237952..97d6166cfe 100644 --- a/pkg/controller/node/node_controller_test.go +++ b/pkg/controller/node/node_controller_test.go @@ -63,6 +63,7 @@ func newMachineConfigPool(name string, selector *metav1.LabelSelector, maxUnavai Spec: mcfgv1.MachineConfigPoolSpec{ NodeSelector: selector, MaxUnavailable: maxUnavail, + Configuration: mcfgv1.MachineConfigPoolStatusConfiguration{ObjectReference: corev1.ObjectReference{Name: currentMachineConfig}}, }, Status: mcfgv1.MachineConfigPoolStatus{ Configuration: mcfgv1.MachineConfigPoolStatusConfiguration{ObjectReference: corev1.ObjectReference{Name: currentMachineConfig}}, @@ -502,7 +503,7 @@ func TestGetCandidateMachines(t *testing.T) { for idx, test := range tests { t.Run(fmt.Sprintf("case#%d", idx), func(t *testing.T) { pool := &mcfgv1.MachineConfigPool{ - Status: mcfgv1.MachineConfigPoolStatus{ + Spec: mcfgv1.MachineConfigPoolSpec{ Configuration: mcfgv1.MachineConfigPoolStatusConfiguration{ObjectReference: corev1.ObjectReference{Name: "v1"}}, }, } diff --git a/pkg/controller/node/status.go b/pkg/controller/node/status.go index e4f070fca6..e233071bc0 100644 --- a/pkg/controller/node/status.go +++ b/pkg/controller/node/status.go @@ -34,14 +34,12 @@ func (ctrl *Controller) syncStatusOnly(pool *mcfgv1.MachineConfigPool) error { } func calculateStatus(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node) mcfgv1.MachineConfigPoolStatus { - previousUpdated := mcfgv1.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolUpdated) - machineCount := int32(len(nodes)) - updatedMachines := getUpdatedMachines(pool.Status.Configuration.Name, nodes) + updatedMachines := getUpdatedMachines(pool.Spec.Configuration.Name, nodes) updatedMachineCount := int32(len(updatedMachines)) - readyMachines := getReadyMachines(pool.Status.Configuration.Name, nodes) + readyMachines := getReadyMachines(pool.Spec.Configuration.Name, nodes) readyMachineCount := int32(len(readyMachines)) unavailableMachines := getUnavailableMachines(nodes) @@ -73,22 +71,26 @@ func calculateStatus(pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node) mcfgv status.Conditions = append(status.Conditions, conditions[i]) } - if updatedMachineCount == machineCount && + allUpdated := updatedMachineCount == machineCount && readyMachineCount == machineCount && - unavailableMachineCount == 0 { + unavailableMachineCount == 0 + + if allUpdated { //TODO: update api to only have one condition regarding status of update. - updatedMsg := fmt.Sprintf("All nodes are updated with %s", pool.Status.Configuration.Name) + updatedMsg := fmt.Sprintf("All nodes are updated with %s", pool.Spec.Configuration.Name) supdated := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdated, corev1.ConditionTrue, updatedMsg, "") mcfgv1.SetMachineConfigPoolCondition(&status, *supdated) - if !previousUpdated { - glog.Infof("Pool %s: %s", pool.Name, updatedMsg) - } + supdating := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdating, corev1.ConditionFalse, "", "") mcfgv1.SetMachineConfigPoolCondition(&status, *supdating) + if status.Configuration.Name != pool.Spec.Configuration.Name { + glog.Infof("Pool %s: %s", pool.Name, updatedMsg) + status.Configuration = pool.Spec.Configuration + } } else { supdated := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdated, corev1.ConditionFalse, "", "") mcfgv1.SetMachineConfigPoolCondition(&status, *supdated) - supdating := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdating, corev1.ConditionTrue, fmt.Sprintf("All nodes are updating to %s", pool.Status.Configuration.Name), "") + supdating := mcfgv1.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolUpdating, corev1.ConditionTrue, fmt.Sprintf("All nodes are updating to %s", pool.Spec.Configuration.Name), "") mcfgv1.SetMachineConfigPoolCondition(&status, *supdating) } diff --git a/pkg/controller/node/status_test.go b/pkg/controller/node/status_test.go index 8175bcbec8..fe0629819f 100644 --- a/pkg/controller/node/status_test.go +++ b/pkg/controller/node/status_test.go @@ -598,7 +598,7 @@ func TestCalculateStatus(t *testing.T) { for idx, test := range tests { t.Run(fmt.Sprintf("case#%d", idx), func(t *testing.T) { pool := &mcfgv1.MachineConfigPool{ - Status: mcfgv1.MachineConfigPoolStatus{ + Spec: mcfgv1.MachineConfigPoolSpec{ Configuration: mcfgv1.MachineConfigPoolStatusConfiguration{ObjectReference: corev1.ObjectReference{Name: test.currentConfig}}, }, } diff --git a/pkg/controller/render/render_controller.go b/pkg/controller/render/render_controller.go index a41a4c4766..ac3cb9ee23 100644 --- a/pkg/controller/render/render_controller.go +++ b/pkg/controller/render/render_controller.go @@ -500,18 +500,20 @@ func (ctrl *Controller) syncGeneratedMachineConfig(pool *mcfgv1.MachineConfigPoo return err } - if pool.Status.Configuration.Name == generated.Name { + if pool.Spec.Configuration.Name == generated.Name { _, _, err = resourceapply.ApplyMachineConfig(ctrl.client.MachineconfigurationV1(), generated) return err } - glog.V(2).Infof("Pool %s is now targeting: %s", pool.Name, generated.Name) - pool.Status.Configuration.Name = generated.Name - pool.Status.Configuration.Source = source - _, err = ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(pool) + newPool := pool.DeepCopy() + newPool.Spec.Configuration.Name = generated.Name + newPool.Spec.Configuration.Source = source + // TODO(walters) Use subresource or JSON patch, but the latter isn't supported by the unit test mocks + pool, err = ctrl.client.MachineconfigurationV1().MachineConfigPools().Update(newPool) if err != nil { return err } + glog.V(2).Infof("Pool %s: now targeting: %s", pool.Name, generated.Name) if err := ctrl.garbageCollectRenderedConfigs(pool); err != nil { return err @@ -565,6 +567,7 @@ func RunBootstrap(pools []*mcfgv1.MachineConfigPool, configs []*mcfgv1.MachineCo return nil, nil, err } + pool.Spec.Configuration.Name = generated.Name pool.Status.Configuration.Name = generated.Name opools = append(opools, pool) oconfigs = append(oconfigs, generated) diff --git a/pkg/controller/render/render_controller_test.go b/pkg/controller/render/render_controller_test.go index cb30b6c040..ddf981f1f9 100644 --- a/pkg/controller/render/render_controller_test.go +++ b/pkg/controller/render/render_controller_test.go @@ -59,6 +59,7 @@ func newMachineConfigPool(name string, selector *metav1.LabelSelector, currentMa ObjectMeta: metav1.ObjectMeta{Name: name, UID: types.UID(utilrand.String(5))}, Spec: mcfgv1.MachineConfigPoolSpec{ MachineConfigSelector: selector, + Configuration: mcfgv1.MachineConfigPoolStatusConfiguration{ObjectReference: corev1.ObjectReference{Name: currentMachineConfig}}, }, Status: mcfgv1.MachineConfigPoolStatus{ Configuration: mcfgv1.MachineConfigPoolStatusConfiguration{ObjectReference: corev1.ObjectReference{Name: currentMachineConfig}}, @@ -235,6 +236,10 @@ func (f *fixture) expectUpdateMachineConfigAction(config *mcfgv1.MachineConfig) f.actions = append(f.actions, core.NewRootUpdateAction(schema.GroupVersionResource{Resource: "machineconfigs"}, config)) } +func (f *fixture) expectUpdateMachineConfigPoolSpec(pool *mcfgv1.MachineConfigPool) { + f.actions = append(f.actions, core.NewRootUpdateSubresourceAction(schema.GroupVersionResource{Resource: "machineconfigpools"}, "spec", pool)) +} + func (f *fixture) expectUpdateMachineConfigPoolStatus(pool *mcfgv1.MachineConfigPool) { f.actions = append(f.actions, core.NewRootUpdateSubresourceAction(schema.GroupVersionResource{Resource: "machineconfigpools"}, "status", pool)) } @@ -339,6 +344,7 @@ func TestUpdatesGeneratedMachineConfig(t *testing.T) { t.Fatal(err) } gmc.Spec.OSImageURL = "why-did-you-change-it" + mcp.Spec.Configuration.Name = gmc.Name mcp.Status.Configuration.Name = gmc.Name f.ccLister = append(f.ccLister, cc) @@ -401,6 +407,7 @@ func TestDoNothing(t *testing.T) { if err != nil { t.Fatal(err) } + mcp.Spec.Configuration.Name = gmc.Name mcp.Status.Configuration.Name = gmc.Name f.ccLister = append(f.ccLister, cc) diff --git a/pkg/operator/status.go b/pkg/operator/status.go index 73701dc3d7..ec89f98e97 100644 --- a/pkg/operator/status.go +++ b/pkg/operator/status.go @@ -258,8 +258,11 @@ func (optr *Operator) allMachineConfigPoolStatus() (map[string]string, error) { // isMachineConfigPoolConfigurationValid returns nil error when the configuration of a `pool` is created by the controller at version `version`. func isMachineConfigPoolConfigurationValid(pool *mcfgv1.MachineConfigPool, version string, machineConfigGetter func(string) (*mcfgv1.MachineConfig, error)) error { // both .status.configuration.name and .status.configuration.source must be set. + if len(pool.Spec.Configuration.Name) == 0 { + return fmt.Errorf("configuration spec for pool %s is empty", pool.GetName()) + } if len(pool.Status.Configuration.Name) == 0 { - return fmt.Errorf("configuration for pool %s is empty", pool.GetName()) + return fmt.Errorf("configuration status for pool %s is empty", pool.GetName()) } if len(pool.Status.Configuration.Source) == 0 { return fmt.Errorf("list of MachineConfigs that were used to generate configuration for pool %s is empty", pool.GetName()) diff --git a/pkg/operator/status_test.go b/pkg/operator/status_test.go index 072ade6e95..9fca7130b6 100644 --- a/pkg/operator/status_test.go +++ b/pkg/operator/status_test.go @@ -35,7 +35,7 @@ func TestIsMachineConfigPoolConfigurationValid(t *testing.T) { err error }{{ - err: errors.New("configuration for pool dummy-pool is empty"), + err: errors.New("configuration spec for pool dummy-pool is empty"), }, { generated: "g", err: errors.New("list of MachineConfigs that were used to generate configuration for pool dummy-pool is empty"), @@ -116,6 +116,9 @@ func TestIsMachineConfigPoolConfigurationValid(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "dummy-pool", }, + Spec: mcfgv1.MachineConfigPoolSpec{ + Configuration: mcfgv1.MachineConfigPoolStatusConfiguration{ObjectReference: corev1.ObjectReference{Name: test.generated}, Source: source}, + }, Status: mcfgv1.MachineConfigPoolStatus{ Configuration: mcfgv1.MachineConfigPoolStatusConfiguration{ObjectReference: corev1.ObjectReference{Name: test.generated}, Source: source}, }, diff --git a/test/e2e/mcd_test.go b/test/e2e/mcd_test.go index 69e9e594b3..ccf96bc5cb 100644 --- a/test/e2e/mcd_test.go +++ b/test/e2e/mcd_test.go @@ -1,6 +1,7 @@ package e2e_test import ( + "github.com/pkg/errors" "encoding/json" "fmt" "os/exec" @@ -9,6 +10,7 @@ import ( "time" ignv2_2types "github.com/coreos/ignition/config/v2_2/types" + "github.com/stretchr/testify/assert" mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" "github.com/openshift/machine-config-operator/pkg/daemon/constants" "github.com/openshift/machine-config-operator/test/e2e/framework" @@ -73,10 +75,9 @@ func createIgnFile(path, content, fs string, mode int) ignv2_2types.File { func createMCToAddFile(name, filename, data, fs string) *mcv1.MachineConfig { // create a dummy MC - mcName := fmt.Sprintf("%s-%s", name, uuid.NewUUID()) mcadd := &mcv1.MachineConfig{} mcadd.ObjectMeta = metav1.ObjectMeta{ - Name: mcName, + Name: fmt.Sprintf("%s-%s", name, uuid.NewUUID()), // TODO(runcom): hardcoded to workers for safety Labels: mcLabelForWorkers(), } @@ -92,15 +93,64 @@ func createMCToAddFile(name, filename, data, fs string) *mcv1.MachineConfig { }, }, } + return mcadd } +// waitForRenderedConfig polls a MachineConfigPool until it has +// included the given mcName in its config, and returns the new +// rendered config name. +func waitForRenderedConfig(t *testing.T, cs *framework.ClientSet, pool, mcName string) (string, error) { + var renderedConfig string + startTime := time.Now() + if err := wait.PollImmediate(2*time.Second, 5*time.Minute, func() (bool, error) { + mcp, err := cs.MachineConfigPools().Get(pool, metav1.GetOptions{}) + if err != nil { + return false, err + } + for _, mc := range mcp.Spec.Configuration.Source { + if mc.Name == mcName { + renderedConfig = mcp.Spec.Configuration.Name + return true, nil + } + } + return false, nil + }); err != nil { + return "", errors.Wrapf(err, "machine config %s hasn't been picked by pool %s", mcName, pool) + } + t.Logf("Pool %s has rendered config %s with %s (waited %v)", pool, mcName, renderedConfig, time.Since(startTime)) + return renderedConfig, nil +} + +// waitForPoolComplete polls a pool until it has completed an update to target +func waitForPoolComplete(t *testing.T, cs *framework.ClientSet, pool, target string) error { + startTime := time.Now() + if err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) { + mcp, err := cs.MachineConfigPools().Get(pool, metav1.GetOptions{}) + if err != nil { + return false, err + } + if mcp.Status.Configuration.Name != target { + return false, nil + } + if mcv1.IsMachineConfigPoolConditionTrue(mcp.Status.Conditions, mcv1.MachineConfigPoolUpdated) { + return true, nil + } + return false, nil + }); err != nil { + return errors.Wrapf(err, "pool %s didn't report updated to %s", pool, target) + } + t.Logf("Pool %s has completed %s (waited %v)", pool, target, time.Since(startTime)) + return nil +} + func TestMCDeployed(t *testing.T) { cs := framework.NewClientSet("") bumpPoolMaxUnavailableTo(t, cs, 3) // TODO: bring this back to 10 for i := 0; i < 5; i++ { + startTime := time.Now() mcadd := createMCToAddFile("add-a-file", fmt.Sprintf("/etc/mytestconf%d", i), "test", "root") // create the dummy MC now @@ -109,46 +159,23 @@ func TestMCDeployed(t *testing.T) { t.Errorf("failed to create machine config %v", err) } - // grab the latest worker- MC - var newMCName string - if err := wait.PollImmediate(2*time.Second, 5*time.Minute, func() (bool, error) { - mcp, err := cs.MachineConfigPools().Get("worker", metav1.GetOptions{}) - if err != nil { - return false, err - } - for _, mc := range mcp.Status.Configuration.Source { - if mc.Name == mcadd.Name { - newMCName = mcp.Status.Configuration.Name - return true, nil - } - } - return false, nil - }); err != nil { - t.Errorf("machine config hasn't been picked by the pool: %v", err) + t.Logf("Created %s", mcadd.Name) + renderedConfig, err := waitForRenderedConfig(t, cs, "worker", mcadd.Name) + if err != nil { + t.Errorf("%v", err) } - - visited := make(map[string]bool) - if err := wait.Poll(2*time.Second, 30*time.Minute, func() (bool, error) { - nodes, err := getNodesByRole(cs, "worker") - if err != nil { - return false, nil - } - for _, node := range nodes { - if visited[node.Name] { - continue - } - if node.Annotations[constants.CurrentMachineConfigAnnotationKey] == newMCName && node.Annotations[constants.MachineConfigDaemonStateAnnotationKey] == constants.MachineConfigDaemonStateDone { - visited[node.Name] = true - if len(visited) == len(nodes) { - return true, nil - } - continue - } - } - return false, nil - }); err != nil { - t.Errorf("machine config didn't result in file being on any worker: %v", err) + if err := waitForPoolComplete(t, cs, "worker", renderedConfig); err != nil { + t.Fatal(err) + } + nodes, err := getNodesByRole(cs, "worker") + if err != nil { + t.Fatal(err) } + for _, node := range nodes { + assert.Equal(t, renderedConfig, node.Annotations[constants.CurrentMachineConfigAnnotationKey]) + assert.Equal(t, constants.MachineConfigDaemonStateDone, node.Annotations[constants.MachineConfigDaemonStateAnnotationKey]) + } + t.Logf("All nodes updated with %s (%s elapsed)", mcadd.Name, time.Since(startTime)) } } @@ -200,77 +227,49 @@ func TestUpdateSSH(t *testing.T) { if err != nil { t.Errorf("failed to create machine config %v", err) } + t.Logf("Created %s", mcadd.Name) // grab the latest worker- MC - var newMCName string - if err := wait.PollImmediate(2*time.Second, 5*time.Minute, func() (bool, error) { - mcp, err := cs.MachineConfigPools().Get("worker", metav1.GetOptions{}) + renderedConfig, err := waitForRenderedConfig(t, cs, "worker", mcadd.Name) + if err != nil { + t.Fatal(err) + } + if err := waitForPoolComplete(t, cs, "worker", renderedConfig); err != nil { + t.Fatal(err) + } + nodes, err := getNodesByRole(cs, "worker") + if err != nil { + t.Fatal(err) + } + for _, node := range nodes { + assert.Equal(t, node.Annotations[constants.CurrentMachineConfigAnnotationKey], renderedConfig) + assert.Equal(t, node.Annotations[constants.MachineConfigDaemonStateAnnotationKey], constants.MachineConfigDaemonStateDone) + // find the MCD pod that has spec.nodeNAME = node.Name and get its name: + listOptions := metav1.ListOptions{ + FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}).String(), + } + listOptions.LabelSelector = labels.SelectorFromSet(labels.Set{"k8s-app": "machine-config-daemon"}).String() + + mcdList, err := cs.Pods("openshift-machine-config-operator").List(listOptions) if err != nil { - return false, err + t.Fatal(err) } - for _, mc := range mcp.Status.Configuration.Source { - if mc.Name == mcName { - newMCName = mcp.Status.Configuration.Name - return true, nil - } + if len(mcdList.Items) != 1 { + t.Fatalf("Failed to find MCD for node %s", node.Name) } - return false, nil - }); err != nil { - t.Errorf("machine config hasn't been picked by the pool: %v", err) - } + mcdName := mcdList.Items[0].ObjectMeta.Name - visited := make(map[string]bool) - if err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) { - nodes, err := getNodesByRole(cs, "worker") + // now rsh into that daemon and grep the authorized key file to check if 1234_test was written + // must do both commands in same shell, combine commands into one exec.Command() + found, err := exec.Command("oc", "rsh", "-n", "openshift-machine-config-operator", mcdName, + "grep", "1234_test", "/rootfs/home/core/.ssh/authorized_keys").CombinedOutput() if err != nil { - return false, err + t.Fatalf("unable to read authorized_keys on daemon: %s got: %s got err: %v", mcdName, found, err) } - for _, node := range nodes { - if visited[node.Name] { - continue - } - // check that the new MC is in the annotations and that the MCD state is Done. - if node.Annotations[constants.CurrentMachineConfigAnnotationKey] == newMCName && - node.Annotations[constants.MachineConfigDaemonStateAnnotationKey] == constants.MachineConfigDaemonStateDone { - // find the MCD pod that has spec.nodeNAME = node.Name and get its name: - listOptions := metav1.ListOptions{ - FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}).String(), - } - listOptions.LabelSelector = labels.SelectorFromSet(labels.Set{"k8s-app": "machine-config-daemon"}).String() - - mcdList, err := cs.Pods("openshift-machine-config-operator").List(listOptions) - if err != nil { - return false, nil - } - if len(mcdList.Items) != 1 { - t.Logf("did not find any mcd pods") - return false, nil - } - mcdName := mcdList.Items[0].ObjectMeta.Name - - // now rsh into that daemon and grep the authorized key file to check if 1234_test was written - // must do both commands in same shell, combine commands into one exec.Command() - found, err := exec.Command("oc", "rsh", "-n", "openshift-machine-config-operator", mcdName, - "grep", "1234_test", "/rootfs/home/core/.ssh/authorized_keys").CombinedOutput() - if err != nil { - t.Logf("unable to read authorized_keys on daemon: %s got: %s got err: %v", mcdName, found, err) - return false, nil - } - if !strings.Contains(string(found), "1234_test") { - t.Logf("updated ssh keys not found in authorized_keys, got %s", found) - return false, nil - } - - visited[node.Name] = true - if len(visited) == len(nodes) { - return true, nil - } - continue - } + if !strings.Contains(string(found), "1234_test") { + t.Fatalf("updated ssh keys not found in authorized_keys, got %s", found) } - return false, nil - }); err != nil { - t.Errorf("machine config didn't result in ssh keys being on any worker: %v", err) + t.Logf("Node %s has SSH key", node.Name) } } @@ -353,22 +352,9 @@ func TestReconcileAfterBadMC(t *testing.T) { t.Errorf("failed to create machine config %v", err) } - // grab the latest worker- MC - var newMCName string - if err := wait.PollImmediate(2*time.Second, 5*time.Minute, func() (bool, error) { - mcp, err := cs.MachineConfigPools().Get("worker", metav1.GetOptions{}) - if err != nil { - return false, err - } - for _, mc := range mcp.Status.Configuration.Source { - if mc.Name == mcadd.Name { - newMCName = mcp.Status.Configuration.Name - return true, nil - } - } - return false, nil - }); err != nil { - t.Errorf("machine config hasn't been picked by the pool: %v", err) + renderedConfig, err := waitForRenderedConfig(t, cs, "worker", mcadd.Name) + if err != nil { + t.Errorf("%v", err) } // verify that one node picked the above up @@ -378,7 +364,8 @@ func TestReconcileAfterBadMC(t *testing.T) { return false, err } for _, node := range nodes { - if node.Annotations[constants.DesiredMachineConfigAnnotationKey] == newMCName && node.Annotations[constants.MachineConfigDaemonStateAnnotationKey] != constants.MachineConfigDaemonStateDone { + if node.Annotations[constants.DesiredMachineConfigAnnotationKey] == renderedConfig && + node.Annotations[constants.MachineConfigDaemonStateAnnotationKey] != constants.MachineConfigDaemonStateDone { // just check that we have the annotation here, w/o strings checking anything that can flip fast causing flakes if node.Annotations[constants.MachineConfigDaemonReasonAnnotationKey] != "" { return true, nil @@ -410,17 +397,8 @@ func TestReconcileAfterBadMC(t *testing.T) { } // wait for the mcp to go back to previous config - if err := wait.PollImmediate(2*time.Second, 5*time.Minute, func() (bool, error) { - mcp, err := cs.MachineConfigPools().Get("worker", metav1.GetOptions{}) - if err != nil { - return false, err - } - if mcp.Status.Configuration.Name == workerOldMc { - return true, nil - } - return false, nil - }); err != nil { - t.Errorf("old machine config hasn't been picked by the pool: %v", err) + if err := waitForPoolComplete(t, cs, "worker", workerOldMc); err != nil { + t.Fatal(err) } visited := make(map[string]bool)