Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[0.1] Delete machines #1180

Merged
merged 10 commits into from
Jul 24, 2019
1 change: 1 addition & 0 deletions pkg/controller/cluster/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ go_library(
"//pkg/util:go_default_library",
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/client-go/util/retry:go_default_library",
Expand Down
150 changes: 141 additions & 9 deletions pkg/controller/cluster/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,21 @@ package cluster

import (
"context"
"fmt"
"time"

"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/pager"
"k8s.io/client-go/util/retry"
"k8s.io/klog"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
"sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/typed/cluster/v1alpha1"
controllerError "sigs.k8s.io/cluster-api/pkg/controller/error"
"sigs.k8s.io/cluster-api/pkg/util"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand All @@ -37,15 +43,30 @@ import (
"sigs.k8s.io/controller-runtime/pkg/source"
)

const deleteRequeueAfter = 5 * time.Second

var DefaultActuator Actuator

func AddWithActuator(mgr manager.Manager, actuator Actuator) error {
return add(mgr, newReconciler(mgr, actuator))
reconciler, err := newReconciler(mgr, actuator)
if err != nil {
return err
}

return add(mgr, reconciler)
}

// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager, actuator Actuator) reconcile.Reconciler {
return &ReconcileCluster{Client: mgr.GetClient(), scheme: mgr.GetScheme(), actuator: actuator}
func newReconciler(mgr manager.Manager, actuator Actuator) (reconcile.Reconciler, error) {
cclient, err := v1alpha1.NewForConfig(mgr.GetConfig())
if err != nil {
return nil, err
liztio marked this conversation as resolved.
Show resolved Hide resolved
}
return &ReconcileCluster{
Client: mgr.GetClient(),
clusterClient: cclient,
scheme: mgr.GetScheme(),
actuator: actuator}, nil
}

// add adds a new Controller to mgr with r as the reconcile.Reconciler
Expand All @@ -70,8 +91,9 @@ var _ reconcile.Reconciler = &ReconcileCluster{}
// ReconcileCluster reconciles a Cluster object
type ReconcileCluster struct {
client.Client
scheme *runtime.Scheme
actuator Actuator
clusterClient v1alpha1.ClusterV1alpha1Interface
scheme *runtime.Scheme
actuator Actuator
}

// +kubebuilder:rbac:groups=cluster.k8s.io,resources=clusters,verbs=get;list;watch;create;update;patch;delete
Expand Down Expand Up @@ -119,18 +141,48 @@ func (r *ReconcileCluster) Reconcile(request reconcile.Request) (reconcile.Resul
if !cluster.ObjectMeta.DeletionTimestamp.IsZero() {
// no-op if finalizer has been removed.
if !util.Contains(cluster.ObjectMeta.Finalizers, clusterv1.ClusterFinalizer) {
klog.Infof("reconciling cluster object %v causes a no-op as there is no finalizer.", name)
klog.Infof("Reconciling cluster object %v causes a no-op as there is no finalizer.", name)
return reconcile.Result{}, nil
}

klog.Infof("reconciling cluster object %v triggers delete.", name)
children, err := r.listChildren(context.Background(), cluster)
if err != nil {
klog.Infof("Failed to list dependent objects of cluster %s/%s: %v", cluster.ObjectMeta.Namespace, cluster.ObjectMeta.Name, err)
liztio marked this conversation as resolved.
Show resolved Hide resolved
return reconcile.Result{}, err
}

if len(children) > 0 {
klog.Infof("Deleting cluster %s: %d children still exist, will requeue", name, len(children))
for _, child := range children {

liztio marked this conversation as resolved.
Show resolved Hide resolved
accessor, err := meta.Accessor(child)
if err != nil {
return reconcile.Result{}, errors.Wrapf(err, "couldn't create accessor for %T", child)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we aggregate all errors in this for loop so we can try to delete as much as we can?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The deletion happens in a later step, I think Accessor is unlikely to fail. But it might be a good idea to aggregate the errors for the actual deletion loop

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree that Accessor is unlikely to fail, but I'd still like to include it in errList instead of returning early.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this is my only remaining comment

}

if accessor.GetDeletionTimestamp() != nil {
continue
}

gvk := child.GetObjectKind().GroupVersionKind().String()

klog.V(4).Infof("Deleting cluster %s: Deleting %s %s", name, gvk, accessor.GetName())
if err := r.Delete(context.Background(), child, client.PropagationPolicy(metav1.DeletePropagationForeground)); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "deleting cluster %s: failed to delete %s %s", name, gvk, accessor.GetName())
}
}

return reconcile.Result{Requeue: true, RequeueAfter: deleteRequeueAfter}, nil
}

klog.Infof("Reconciling cluster object %v triggers delete.", name)
if err := r.actuator.Delete(cluster); err != nil {
klog.Errorf("Error deleting cluster object %v; %v", name, err)
return reconcile.Result{}, err
}

// Remove finalizer on successful deletion.
klog.Infof("cluster object %v deletion successful, removing finalizer.", name)
klog.Infof("Cluster object %v deletion successful, removing finalizer.", name)
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
// It's possible the actuator's Delete call modified the cluster. We can't guarantee that every provider
// updated the in memory cluster object with the latest copy of the cluster, so try to get a fresh copy.
Expand All @@ -157,7 +209,7 @@ func (r *ReconcileCluster) Reconcile(request reconcile.Request) (reconcile.Resul
return reconcile.Result{}, nil
}

klog.Infof("reconciling cluster object %v triggers idempotent reconcile.", name)
klog.Infof("Reconciling cluster object %v triggers idempotent reconcile.", name)
if err := r.actuator.Reconcile(cluster); err != nil {
if requeueErr, ok := errors.Cause(err).(controllerError.HasRequeueAfterError); ok {
klog.Infof("Actuator returned requeue-after error: %v", requeueErr)
Expand All @@ -168,3 +220,83 @@ func (r *ReconcileCluster) Reconcile(request reconcile.Request) (reconcile.Resul
}
return reconcile.Result{}, nil
}

// listChildren returns a list of Deployments, Sets, and Machines than have an ownerref to the given cluster
func (r *ReconcileCluster) listChildren(ctx context.Context, cluster *clusterv1.Cluster) ([]runtime.Object, error) {
var children []runtime.Object

ns := cluster.GetNamespace()
opts := metav1.ListOptions{
LabelSelector: labels.FormatLabels(
map[string]string{clusterv1.MachineClusterLabelName: cluster.GetName()},
),
}

dfunc := func(_ context.Context, m metav1.ListOptions) (runtime.Object, error) {
return r.clusterClient.MachineDeployments(ns).List(m)
}
sfunc := func(_ context.Context, m metav1.ListOptions) (runtime.Object, error) {
return r.clusterClient.MachineSets(ns).List(m)
}
mfunc := func(_ context.Context, m metav1.ListOptions) (runtime.Object, error) {
return r.clusterClient.Machines(ns).List(m)
}

deployments, err := pager.New(dfunc).List(ctx, opts)
liztio marked this conversation as resolved.
Show resolved Hide resolved
if err != nil {
liztio marked this conversation as resolved.
Show resolved Hide resolved
return []runtime.Object{}, errors.Wrapf(err, "Failed to list MachineDeployments in %s", ns)
liztio marked this conversation as resolved.
Show resolved Hide resolved
}
dlist, ok := deployments.(*clusterv1.MachineDeploymentList)
liztio marked this conversation as resolved.
Show resolved Hide resolved
if !ok {
return []runtime.Object{}, fmt.Errorf("Expected MachineDeploymentList, got %T", deployments)
}

sets, err := pager.New(sfunc).List(ctx, opts)
if err != nil {
return []runtime.Object{}, errors.Wrapf(err, "Failed to list MachineSets in %s", ns)
}
slist, ok := sets.(*clusterv1.MachineSetList)
if !ok {
return []runtime.Object{}, fmt.Errorf("Expected MachineSetList, got %T", sets)
}

machines, err := pager.New(mfunc).List(ctx, opts)
if err != nil {
return []runtime.Object{}, errors.Wrapf(err, "Failed to list MachineSets in %s", ns)
}
mlist, ok := machines.(*clusterv1.MachineList)
if !ok {
return []runtime.Object{}, fmt.Errorf("Expected MachineList, got %T", machines)
}

for _, d := range dlist.Items {
if pointsTo(&d.ObjectMeta, &cluster.ObjectMeta) {
children = append(children, d.DeepCopyObject())
}
}

for _, s := range slist.Items {
if pointsTo(&s.ObjectMeta, &cluster.ObjectMeta) {
children = append(children, s.DeepCopyObject())
liztio marked this conversation as resolved.
Show resolved Hide resolved
}
}

for _, m := range mlist.Items {
if pointsTo(&m.ObjectMeta, &cluster.ObjectMeta) {
children = append(children, m.DeepCopyObject())
liztio marked this conversation as resolved.
Show resolved Hide resolved
}
}

return children, nil
}

func pointsTo(refs *metav1.ObjectMeta, target *metav1.ObjectMeta) bool {

liztio marked this conversation as resolved.
Show resolved Hide resolved
for _, ref := range refs.OwnerReferences {
if ref.UID == target.UID {
liztio marked this conversation as resolved.
Show resolved Hide resolved
return true
}
}

return false
}
80 changes: 80 additions & 0 deletions pkg/controller/cluster/cluster_controller_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
/*
Copyright 2018 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package cluster

import (
"testing"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)

func TestPointsTo(t *testing.T) {
targetID := "fri3ndsh1p"

tests := []struct {
name string
refIDs []string
expected bool
}{
{
name: "empty owner list",
},
{
name: "single wrong owner ref",
refIDs: []string{"m4g1c"},
},
{
name: "single right owner ref",
refIDs: []string{targetID},
expected: true,
},
{
name: "multiple wrong refs",
refIDs: []string{"m4g1c", "h4rm0ny"},
},
{
name: "multiple refs one right",
refIDs: []string{"m4g1c", targetID},
expected: true,
},
}

for _, test := range tests {
t.Run(test.name, func(t *testing.T) {

liztio marked this conversation as resolved.
Show resolved Hide resolved
pointer := &metav1.ObjectMeta{}

target := &metav1.ObjectMeta{
UID: types.UID(targetID),
}

for _, ref := range test.refIDs {
pointer.OwnerReferences = append(pointer.OwnerReferences, metav1.OwnerReference{
UID: types.UID(ref),
Kind: "Cluster",
})
}

result := pointsTo(pointer, target)
if result != test.expected {
t.Errorf("expected %v, got %v", test.expected, result)

liztio marked this conversation as resolved.
Show resolved Hide resolved
}
})
}
}
6 changes: 5 additions & 1 deletion pkg/controller/cluster/cluster_reconciler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,11 @@ func TestReconcile(t *testing.T) {
c = mgr.GetClient()

a := newTestActuator()
recFn, requests := SetupTestReconcile(newReconciler(mgr, a))
r, err := newReconciler(mgr, a)
if err != nil {
t.Fatalf("Couldn't create controller: %v", err)
}
recFn, requests := SetupTestReconcile(r)
if err := add(mgr, recFn); err != nil {
t.Fatalf("error adding controller to manager: %v", err)
}
Expand Down