Skip to content

Commit

Permalink
🌱 v1beta2 conditions: add function for setting the Paused condition (#…
Browse files Browse the repository at this point in the history
…11284)

* v1beta2 conditions: add function for setting the Paused condition

* machine: set v1beta2 Paused condition

* machineset: set v1beta2 Paused condition

* machinedeployment: set v1beta2 Paused condition

* kubeadmcontrolplane: set v1beta2 Paused condition

* cluster: set v1beta2 Paused condition

* kubeadmconfig: set v1beta2 Paused condition

* machinepool: set v1beta2 Paused condition

* machinehealthcheck: set v1beta2 Paused condition

* clusterresourceset: set v1beta2 Paused condition

* dockercluster: set v1beta2 Paused condition

* dockermachine: set v1beta2 Paused condition

* inmemorycluster: set v1beta2 Paused condition

* inmemorymachine: set v1beta2 Paused condition

* bootstrap/kubeadm/internal/builder/builders.go: use consts

* util/predicates/cluster_predicates.go fix comment

* review: cleanup predicates

* paused: remove option stuff

* machinedeployment: preserve v1beta2 status

* drop additional paused check

* Add ClusterPausedTransitionsOrInfrastructureReady predicate

* review fixes

* clusterclass: set v1beta2 Paused condition

* fix for clusterclass / clusterctl

* v1beta2conditions: export HasSameState

* paused improvements

* predicates fixup

* fix test

* review fixes
  • Loading branch information
chrischdi authored Oct 17, 2024
1 parent ccc430f commit 09d6d2d
Show file tree
Hide file tree
Showing 29 changed files with 510 additions and 150 deletions.
6 changes: 0 additions & 6 deletions api/v1beta1/machine_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -273,12 +273,6 @@ const (
MachineDeletingV1Beta2Condition = DeletingV1Beta2Condition
)

// Machine's Paused condition and corresponding reasons that will be used in v1Beta2 API version.
const (
// MachinePausedV1Beta2Condition is true if the Machine or the Cluster it belongs to are paused.
MachinePausedV1Beta2Condition = PausedV1Beta2Condition
)

// ANCHOR: MachineSpec

// MachineSpec defines the desired state of Machine.
Expand Down
10 changes: 10 additions & 0 deletions bootstrap/kubeadm/internal/builder/builders.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
)

Expand Down Expand Up @@ -80,6 +81,15 @@ func (k *KubeadmConfigBuilder) Build() *bootstrapv1.KubeadmConfig {
Namespace: k.namespace,
Name: k.name,
},
Status: bootstrapv1.KubeadmConfigStatus{
V1Beta2: &bootstrapv1.KubeadmConfigV1Beta2Status{
Conditions: []metav1.Condition{{
Type: clusterv1.PausedV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: clusterv1.NotPausedV1Beta2Reason,
}},
},
},
}
if k.initConfig != nil {
config.Spec.InitConfiguration = k.initConfig
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,10 @@ import (
"sigs.k8s.io/cluster-api/feature"
"sigs.k8s.io/cluster-api/internal/util/taints"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
clog "sigs.k8s.io/cluster-api/util/log"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/cluster-api/util/paused"
"sigs.k8s.io/cluster-api/util/predicates"
"sigs.k8s.io/cluster-api/util/secret"
)
Expand Down Expand Up @@ -117,7 +117,7 @@ func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl
Watches(
&clusterv1.Machine{},
handler.EnqueueRequestsFromMapFunc(r.MachineToBootstrapMapFunc),
).WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue))
).WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue))

if feature.Gates.Enabled(feature.MachinePool) {
b = b.Watches(
Expand All @@ -131,7 +131,7 @@ func (r *KubeadmConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl
handler.EnqueueRequestsFromMapFunc(r.ClusterToKubeadmConfigs),
builder.WithPredicates(
predicates.All(mgr.GetScheme(), predicateLog,
predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetScheme(), predicateLog),
predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), predicateLog),
predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue),
),
),
Expand Down Expand Up @@ -199,9 +199,8 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
return ctrl.Result{}, err
}

if annotations.IsPaused(cluster, config) {
log.Info("Reconciliation is paused for this object")
return ctrl.Result{}, nil
if isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, config); err != nil || isPaused || conditionChanged {
return ctrl.Result{}, err
}

scope := &Scope{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,9 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfKubeadmConfigIsReady(t *
machine,
config,
}
myclient := fake.NewClientBuilder().WithObjects(objects...).Build()
myclient := fake.NewClientBuilder().
WithStatusSubresource(&bootstrapv1.KubeadmConfig{}).
WithObjects(objects...).Build()

k := &KubeadmConfigReconciler{
Client: myclient,
Expand Down
5 changes: 5 additions & 0 deletions cmd/clusterctl/client/cluster/topology.go
Original file line number Diff line number Diff line change
Expand Up @@ -520,6 +520,11 @@ func reconcileClusterClass(ctx context.Context, apiReader client.Reader, class c
Client: reconcilerClient,
}

// The first only reconciles the paused condition.
if _, err := clusterClassReconciler.Reconcile(ctx, reconcile.Request{NamespacedName: targetClusterClass}); err != nil {
return nil, errors.Wrap(err, "failed to dry run the ClusterClass controller to reconcile the paused condition")
}

if _, err := clusterClassReconciler.Reconcile(ctx, reconcile.Request{NamespacedName: targetClusterClass}); err != nil {
return nil, errors.Wrap(err, "failed to dry run the ClusterClass controller")
}
Expand Down
11 changes: 5 additions & 6 deletions controlplane/kubeadm/internal/controllers/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,11 +48,11 @@ import (
"sigs.k8s.io/cluster-api/internal/contract"
"sigs.k8s.io/cluster-api/internal/util/ssa"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/collections"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/finalizers"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/cluster-api/util/paused"
"sigs.k8s.io/cluster-api/util/predicates"
"sigs.k8s.io/cluster-api/util/secret"
"sigs.k8s.io/cluster-api/util/version"
Expand Down Expand Up @@ -99,14 +99,14 @@ func (r *KubeadmControlPlaneReconciler) SetupWithManager(ctx context.Context, mg
For(&controlplanev1.KubeadmControlPlane{}).
Owns(&clusterv1.Machine{}).
WithOptions(options).
WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)).
WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)).
Watches(
&clusterv1.Cluster{},
handler.EnqueueRequestsFromMapFunc(r.ClusterToKubeadmControlPlane),
builder.WithPredicates(
predicates.All(mgr.GetScheme(), predicateLog,
predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue),
predicates.ClusterUnpausedAndInfrastructureReady(mgr.GetScheme(), predicateLog),
predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), predicateLog),
),
),
).
Expand Down Expand Up @@ -172,9 +172,8 @@ func (r *KubeadmControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.
log = log.WithValues("Cluster", klog.KObj(cluster))
ctx = ctrl.LoggerInto(ctx, log)

if annotations.IsPaused(cluster, kcp) {
log.Info("Reconciliation is paused for this object")
return ctrl.Result{}, nil
if isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, kcp); err != nil || isPaused || conditionChanged {
return ctrl.Result{}, err
}

// Initialize the patch helper.
Expand Down
7 changes: 7 additions & 0 deletions controlplane/kubeadm/internal/controllers/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -440,6 +440,13 @@ func TestReconcileClusterNoEndpoints(t *testing.T) {
},
},
},
Status: controlplanev1.KubeadmControlPlaneStatus{
V1Beta2: &controlplanev1.KubeadmControlPlaneV1Beta2Status{Conditions: []metav1.Condition{{
Type: clusterv1.PausedV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: clusterv1.NotPausedV1Beta2Reason,
}}},
},
}
webhook := &controlplanev1webhooks.KubeadmControlPlane{}
g.Expect(webhook.Default(ctx, kcp)).To(Succeed())
Expand Down
11 changes: 7 additions & 4 deletions exp/addons/internal/controllers/clusterresourceset_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ import (
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/finalizers"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/cluster-api/util/paused"
"sigs.k8s.io/cluster-api/util/predicates"
)

Expand Down Expand Up @@ -82,9 +83,7 @@ func (r *ClusterResourceSetReconciler) SetupWithManager(ctx context.Context, mgr
handler.EnqueueRequestsFromMapFunc(
resourceToClusterResourceSetFunc[client.Object](r.Client),
),
builder.WithPredicates(
resourcepredicates.TypedResourceCreateOrUpdate[client.Object](predicateLog),
),
builder.WithPredicates(resourcepredicates.TypedResourceCreateOrUpdate[client.Object](predicateLog)),
).
WatchesRawSource(source.Kind(
partialSecretCache,
Expand All @@ -100,7 +99,7 @@ func (r *ClusterResourceSetReconciler) SetupWithManager(ctx context.Context, mgr
resourcepredicates.TypedResourceCreateOrUpdate[*metav1.PartialObjectMetadata](predicateLog),
)).
WithOptions(options).
WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)).
WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)).
Complete(r)
if err != nil {
return errors.Wrap(err, "failed setting up with a controller manager")
Expand Down Expand Up @@ -129,6 +128,10 @@ func (r *ClusterResourceSetReconciler) Reconcile(ctx context.Context, req ctrl.R
return ctrl.Result{}, err
}

if isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, r.Client, nil, clusterResourceSet); err != nil || isPaused || conditionChanged {
return ctrl.Result{}, err
}

// Initialize the patch helper.
patchHelper, err := patch.NewHelper(clusterResourceSet, r.Client)
if err != nil {
Expand Down
12 changes: 5 additions & 7 deletions exp/internal/controllers/machinepool_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,10 @@ import (
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/internal/util/ssa"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/finalizers"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/cluster-api/util/paused"
"sigs.k8s.io/cluster-api/util/predicates"
)

Expand Down Expand Up @@ -109,14 +109,14 @@ func (r *MachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.M
c, err := ctrl.NewControllerManagedBy(mgr).
For(&expv1.MachinePool{}).
WithOptions(options).
WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)).
WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue)).
Watches(
&clusterv1.Cluster{},
handler.EnqueueRequestsFromMapFunc(clusterToMachinePools),
// TODO: should this wait for Cluster.Status.InfrastructureReady similar to Infra Machine resources?
builder.WithPredicates(
predicates.All(mgr.GetScheme(), predicateLog,
predicates.ClusterUnpaused(mgr.GetScheme(), predicateLog),
predicates.ClusterPausedTransitions(mgr.GetScheme(), predicateLog),
predicates.ResourceHasFilterLabel(mgr.GetScheme(), predicateLog, r.WatchFilterValue),
),
),
Expand Down Expand Up @@ -168,10 +168,8 @@ func (r *MachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request)
mp.Spec.ClusterName, mp.Name, mp.Namespace)
}

// Return early if the object or Cluster is paused.
if annotations.IsPaused(cluster, mp) {
log.Info("Reconciliation is paused for this object")
return ctrl.Result{}, nil
if isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, mp); err != nil || isPaused || conditionChanged {
return ctrl.Result{}, err
}

// Initialize the patch helper.
Expand Down
67 changes: 60 additions & 7 deletions exp/internal/controllers/machinepool_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,13 @@ func TestMachinePoolOwnerReference(t *testing.T) {
Replicas: ptr.To[int32](1),
ClusterName: "invalid",
},
Status: expv1.MachinePoolStatus{
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
Type: clusterv1.PausedV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: clusterv1.NotPausedV1Beta2Reason,
}}},
},
}

machinePoolValidCluster := &expv1.MachinePool{
Expand All @@ -176,6 +183,13 @@ func TestMachinePoolOwnerReference(t *testing.T) {
},
ClusterName: "test-cluster",
},
Status: expv1.MachinePoolStatus{
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
Type: clusterv1.PausedV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: clusterv1.NotPausedV1Beta2Reason,
}}},
},
}

machinePoolValidMachinePool := &expv1.MachinePool{
Expand All @@ -197,6 +211,13 @@ func TestMachinePoolOwnerReference(t *testing.T) {
},
ClusterName: "test-cluster",
},
Status: expv1.MachinePoolStatus{
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
Type: clusterv1.PausedV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: clusterv1.NotPausedV1Beta2Reason,
}}},
},
}

testCases := []struct {
Expand Down Expand Up @@ -345,6 +366,11 @@ func TestReconcileMachinePoolRequest(t *testing.T) {
{Name: "test"},
},
ObservedGeneration: 1,
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
Type: clusterv1.PausedV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: clusterv1.NotPausedV1Beta2Reason,
}}},
},
},
expected: expected{
Expand Down Expand Up @@ -390,6 +416,11 @@ func TestReconcileMachinePoolRequest(t *testing.T) {
Name: "test-node",
},
},
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
Type: clusterv1.PausedV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: clusterv1.NotPausedV1Beta2Reason,
}}},
},
},
nodes: []corev1.Node{
Expand Down Expand Up @@ -447,6 +478,11 @@ func TestReconcileMachinePoolRequest(t *testing.T) {
Name: "test-node",
},
},
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
Type: clusterv1.PausedV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: clusterv1.NotPausedV1Beta2Reason,
}}},
},
},
nodes: []corev1.Node{
Expand Down Expand Up @@ -504,6 +540,11 @@ func TestReconcileMachinePoolRequest(t *testing.T) {
Name: "test-node",
},
},
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
Type: clusterv1.PausedV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: clusterv1.NotPausedV1Beta2Reason,
}}},
},
},
nodes: []corev1.Node{
Expand Down Expand Up @@ -820,6 +861,13 @@ func TestRemoveMachinePoolFinalizerAfterDeleteReconcile(t *testing.T) {
},
},
},
Status: expv1.MachinePoolStatus{
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
Type: clusterv1.PausedV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: clusterv1.NotPausedV1Beta2Reason,
}}},
},
}
key := client.ObjectKey{Namespace: m.Namespace, Name: m.Name}
clientFake := fake.NewClientBuilder().WithObjects(testCluster, m).WithStatusSubresource(&expv1.MachinePool{}).Build()
Expand Down Expand Up @@ -912,6 +960,13 @@ func TestMachinePoolConditions(t *testing.T) {
},
},
},
Status: expv1.MachinePoolStatus{
V1Beta2: &expv1.MachinePoolV1Beta2Status{Conditions: []metav1.Condition{{
Type: clusterv1.PausedV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: clusterv1.NotPausedV1Beta2Reason,
}}},
},
}

nodeList := corev1.NodeList{
Expand Down Expand Up @@ -951,14 +1006,12 @@ func TestMachinePoolConditions(t *testing.T) {
infrastructureReady: true,
beforeFunc: func(_, _ *unstructured.Unstructured, mp *expv1.MachinePool, _ *corev1.NodeList) {
mp.Spec.ProviderIDList = []string{"azure://westus2/id-node-4", "aws://us-east-1/id-node-1"}
mp.Status = expv1.MachinePoolStatus{
NodeRefs: []corev1.ObjectReference{
{Name: "node-1"},
{Name: "azure-node-4"},
},
Replicas: 2,
ReadyReplicas: 2,
mp.Status.NodeRefs = []corev1.ObjectReference{
{Name: "node-1"},
{Name: "azure-node-4"},
}
mp.Status.Replicas = 2
mp.Status.ReadyReplicas = 2
},
conditionAssertFunc: func(t *testing.T, getter conditions.Getter) {
t.Helper()
Expand Down
Loading

0 comments on commit 09d6d2d

Please sign in to comment.