Skip to content

Commit

Permalink
feat: switch to using proper ownership configuration and label filters
Browse files Browse the repository at this point in the history
  • Loading branch information
jakobmoellerdev committed Sep 7, 2023
1 parent 8ec722c commit d6ebd11
Show file tree
Hide file tree
Showing 12 changed files with 268 additions and 79 deletions.
30 changes: 18 additions & 12 deletions controllers/lvmcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,12 @@ import (
type EventReasonInfo string
type EventReasonError string

const EventReasonErrorDeletionPending EventReasonError = "DeletionPending"
const EventReasonErrorResourceReconciliationFailed EventReasonError = "ResourceReconciliationFailed"
const EventReasonResourceReconciliationSuccess EventReasonInfo = "ResourceReconciliationSuccess"

var lvmClusterFinalizer = "lvmcluster.topolvm.io"

const (
ControllerName = "lvmcluster-controller"
EventReasonErrorDeletionPending EventReasonError = "DeletionPending"
EventReasonErrorResourceReconciliationFailed EventReasonError = "ResourceReconciliationFailed"
EventReasonResourceReconciliationSuccess EventReasonInfo = "ResourceReconciliationSuccess"

lvmClusterFinalizer = "lvmcluster.topolvm.io"
)

// NOTE: when updating this, please also update docs/design/lvm-operator-manager.md
Expand All @@ -73,10 +71,11 @@ type resourceManager interface {
type LVMClusterReconciler struct {
client.Client
record.EventRecorder
Scheme *runtime.Scheme
ClusterType cluster.Type
Namespace string
ImageName string
Scheme *runtime.Scheme
ClusterType cluster.Type
EnableSnapshotting bool
Namespace string
ImageName string

// TopoLVMLeaderElectionPassthrough uses the given leaderElection when initializing TopoLVM to synchronize
// leader election configuration
Expand Down Expand Up @@ -184,13 +183,16 @@ func (r *LVMClusterReconciler) reconcile(ctx context.Context, instance *lvmv1alp
&vgManager{},
&lvmVG{},
&topolvmStorageClass{},
&topolvmVolumeSnapshotClass{},
}

if r.ClusterType == cluster.TypeOCP {
resources = append(resources, openshiftSccs{})
}

if r.EnableSnapshotting {
resources = append(resources, &topolvmVolumeSnapshotClass{})
}

resourceSyncStart := time.Now()
results := make(chan error, len(resources))
create := func(i int) {
Expand Down Expand Up @@ -393,6 +395,10 @@ func (r *LVMClusterReconciler) processDelete(ctx context.Context, instance *lvmv
resourceDeletionList = append(resourceDeletionList, openshiftSccs{})
}

if r.EnableSnapshotting {
resourceDeletionList = append(resourceDeletionList, &topolvmVolumeSnapshotClass{})
}

for _, unit := range resourceDeletionList {
if err := unit.ensureDeleted(r, ctx, instance); err != nil {
err := fmt.Errorf("failed cleaning up %s: %w", unit.getName(), err)
Expand Down
98 changes: 85 additions & 13 deletions controllers/lvmcluster_controller_watches.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,17 +19,23 @@ package controllers
import (
"context"

snapapiv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
corev1helper "k8s.io/component-helpers/scheduling/corev1"

secv1 "github.com/openshift/api/security/v1"

lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1"
"github.com/openshift/lvm-operator/pkg/cluster"
"sigs.k8s.io/controller-runtime/pkg/log"
"github.com/openshift/lvm-operator/pkg/labels"

appsv1 "k8s.io/api/apps/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)

Expand All @@ -38,45 +44,111 @@ func (r *LVMClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
builder := ctrl.NewControllerManagedBy(mgr).
For(&lvmv1alpha1.LVMCluster{}).
Owns(&appsv1.DaemonSet{}).
Owns(&appsv1.Deployment{}).
Owns(&lvmv1alpha1.LVMVolumeGroup{}).
Watches(
&lvmv1alpha1.LVMVolumeGroupNodeStatus{},
handler.EnqueueRequestsFromMapFunc(r.getLVMClusterObjsForReconcile),
handler.EnqueueRequestsFromMapFunc(r.getLVMClusterObjsByNameFittingNodeSelector),
).
Watches(
&storagev1.StorageClass{},
handler.EnqueueRequestsFromMapFunc(r.getLVMClusterObjsForReconcile),
handler.EnqueueRequestsFromMapFunc(r.getManagedLabelObjsForReconcile),
)

if r.ClusterType == cluster.TypeOCP {
builder = builder.Watches(
&secv1.SecurityContextConstraints{},
handler.EnqueueRequestsFromMapFunc(r.getLVMClusterObjsForReconcile),
handler.EnqueueRequestsFromMapFunc(r.getManagedLabelObjsForReconcile),
)
}
if r.EnableSnapshotting {
builder = builder.Watches(
&snapapiv1.VolumeSnapshotClass{},
handler.EnqueueRequestsFromMapFunc(r.getManagedLabelObjsForReconcile),
)
}

return builder.Complete(r)
}

func (r *LVMClusterReconciler) getLVMClusterObjsForReconcile(ctx context.Context, obj client.Object) []reconcile.Request {

// getManagedLabelObjsForReconcile reconciles the object anytime the given object has all management labels
// set to the available lvmclusters. This can be used especially if owner references are not a valid option (e.g.
// the namespaced LVMCluster needs to "own" a cluster-scoped resource, in which case owner references are invalid).
// This should generally only be used for cluster-scoped resources. Also it should be noted that deletion logic must
// be handled manually as garbage collection is not handled automatically like for owner references.
func (r *LVMClusterReconciler) getManagedLabelObjsForReconcile(ctx context.Context, obj client.Object) []reconcile.Request {
foundLVMClusterList := &lvmv1alpha1.LVMClusterList{}
listOps := &client.ListOptions{
Namespace: obj.GetNamespace(),
}

if err := r.Client.List(ctx, foundLVMClusterList, listOps); err != nil {
log.FromContext(ctx).Error(err, "getLVMClusterObjsForReconcile: Failed to get LVMCluster objs")
log.FromContext(ctx).Error(err, "getManagedLabelObjsForReconcile: Failed to get LVMCluster objs")
return []reconcile.Request{}
}

requests := make([]reconcile.Request, len(foundLVMClusterList.Items))
for i, item := range foundLVMClusterList.Items {
requests[i] = reconcile.Request{
var requests []reconcile.Request
for _, lvmCluster := range foundLVMClusterList.Items {
if !labels.MatchesManagedLabels(r.Scheme, obj, &lvmCluster) {
continue
}
requests = append(requests, reconcile.Request{
NamespacedName: types.NamespacedName{
Name: item.GetName(),
Namespace: item.GetNamespace(),
Name: lvmCluster.GetName(),
Namespace: lvmCluster.GetNamespace(),
},
})
}
return requests
}

// getLVMClusterObjsByNameFittingNodeSelector enqueues the cluster in case the object name fits the node name.
// this means that as if the obj name fits to a given node on the cluster and that node is part of the node selector,
// then the lvm cluster will get updated as well. Should only be used in conjunction with LVMVolumeGroupNodeStatus
// as other objects do not use the node name as resource name.
func (r *LVMClusterReconciler) getLVMClusterObjsByNameFittingNodeSelector(ctx context.Context, obj client.Object) []reconcile.Request {
foundLVMClusterList := &lvmv1alpha1.LVMClusterList{}
listOps := &client.ListOptions{
Namespace: obj.GetNamespace(),
}

if err := r.Client.List(ctx, foundLVMClusterList, listOps); err != nil {
log.FromContext(ctx).Error(err, "getLVMClusterObjsByNameFittingNodeSelector: Failed to get LVMCluster objs")
return []reconcile.Request{}
}

node := &v1.Node{}
if err := r.Client.Get(ctx, client.ObjectKeyFromObject(obj), node); err != nil {
log.FromContext(ctx).Error(err, "getLVMClusterObjsByNameFittingNodeSelector: Failed to get Node")
return []reconcile.Request{}
}

var requests []reconcile.Request
for _, lvmCluster := range foundLVMClusterList.Items {
selector, _ := extractNodeSelectorAndTolerations(&lvmCluster)
// if the selector is nil then the default behavior is to match all nodes
if selector == nil {
requests = append(requests, reconcile.Request{
NamespacedName: types.NamespacedName{
Name: lvmCluster.GetName(),
Namespace: lvmCluster.GetNamespace(),
},
})
continue
}

match, err := corev1helper.MatchNodeSelectorTerms(node, selector)
if err != nil {
log.FromContext(ctx).Error(err, "getLVMClusterObjsByNameFittingNodeSelector: node selector matching in event handler failed")
return []reconcile.Request{}
}
if match {
requests = append(requests, reconcile.Request{
NamespacedName: types.NamespacedName{
Name: lvmCluster.GetName(),
Namespace: lvmCluster.GetNamespace(),
},
})
}
}
return requests
Expand Down
6 changes: 5 additions & 1 deletion controllers/scc.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,10 @@ import (
"fmt"

secv1 "github.com/openshift/api/security/v1"

lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1"
"github.com/openshift/lvm-operator/pkg/labels"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -43,7 +46,7 @@ func (c openshiftSccs) getName() string {
return sccName
}

func (c openshiftSccs) ensureCreated(r *LVMClusterReconciler, ctx context.Context, _ *lvmv1alpha1.LVMCluster) error {
func (c openshiftSccs) ensureCreated(r *LVMClusterReconciler, ctx context.Context, cluster *lvmv1alpha1.LVMCluster) error {
logger := log.FromContext(ctx).WithValues("resourceManager", c.getName())
sccs := getAllSCCs(r.Namespace)
for _, scc := range sccs {
Expand All @@ -58,6 +61,7 @@ func (c openshiftSccs) ensureCreated(r *LVMClusterReconciler, ctx context.Contex
return fmt.Errorf("something went wrong when checking for SecurityContextConstraint: %w", err)
}

labels.SetManagedLabels(r.Scheme, scc, cluster)
if err := r.Create(ctx, scc); err != nil {
return fmt.Errorf("%s failed to reconcile: %w", c.getName(), err)
}
Expand Down
25 changes: 19 additions & 6 deletions controllers/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package controllers

import (
"context"
"errors"
"path/filepath"
"testing"

Expand All @@ -27,6 +28,7 @@ import (
secv1 "github.com/openshift/api/security/v1"
"github.com/openshift/lvm-operator/pkg/cluster"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
Expand Down Expand Up @@ -121,13 +123,24 @@ var _ = BeforeSuite(func() {
clusterType, err := cluster.NewTypeResolver(k8sClient).GetType(ctx)
Expect(err).ToNot(HaveOccurred())

enableSnapshotting := true
vsc := &snapapi.VolumeSnapshotClassList{}
if err := k8sClient.List(ctx, vsc, &client.ListOptions{Limit: 1}); err != nil {
// this is necessary in case the VolumeSnapshotClass CRDs are not registered in the Distro, e.g. for OpenShift Local
if discovery.IsGroupDiscoveryFailedError(errors.Unwrap(err)) {
logger.Info("VolumeSnapshotClasses do not exist on the cluster, ignoring")
enableSnapshotting = false
}
}

err = (&LVMClusterReconciler{
Client: k8sManager.GetClient(),
EventRecorder: k8sManager.GetEventRecorderFor(ControllerName),
Scheme: k8sManager.GetScheme(),
ClusterType: clusterType,
Namespace: testLvmClusterNamespace,
ImageName: testImageName,
Client: k8sManager.GetClient(),
EventRecorder: k8sManager.GetEventRecorderFor("LVMClusterReconciler"),
EnableSnapshotting: enableSnapshotting,
Scheme: k8sManager.GetScheme(),
ClusterType: clusterType,
Namespace: testLvmClusterNamespace,
ImageName: testImageName,
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())

Expand Down
5 changes: 4 additions & 1 deletion controllers/topolvm_csi_driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ import (
"fmt"

lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1"
"github.com/openshift/lvm-operator/pkg/labels"

storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
Expand All @@ -44,11 +46,12 @@ func (c csiDriver) getName() string {

//+kubebuilder:rbac:groups=storage.k8s.io,resources=csidrivers,verbs=get;create;delete;watch;list

func (c csiDriver) ensureCreated(r *LVMClusterReconciler, ctx context.Context, _ *lvmv1alpha1.LVMCluster) error {
func (c csiDriver) ensureCreated(r *LVMClusterReconciler, ctx context.Context, cluster *lvmv1alpha1.LVMCluster) error {
logger := log.FromContext(ctx).WithValues("resourceManager", c.getName())
csiDriverResource := getCSIDriverResource()

result, err := cutil.CreateOrUpdate(ctx, r.Client, csiDriverResource, func() error {
labels.SetManagedLabels(r.Scheme, csiDriverResource, cluster)
// no need to mutate any field
return nil
})
Expand Down
24 changes: 9 additions & 15 deletions controllers/topolvm_snapshotclass.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,15 @@ package controllers

import (
"context"
"errors"
"fmt"

snapapi "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1"

lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1"
"github.com/openshift/lvm-operator/pkg/labels"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"sigs.k8s.io/controller-runtime/pkg/client"
cutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
Expand All @@ -46,19 +47,17 @@ func (s topolvmVolumeSnapshotClass) getName() string {

//+kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshotclasses,verbs=get;create;delete;watch;list

func (s topolvmVolumeSnapshotClass) ensureCreated(r *LVMClusterReconciler, ctx context.Context, lvmCluster *lvmv1alpha1.LVMCluster) error {
func (s topolvmVolumeSnapshotClass) ensureCreated(r *LVMClusterReconciler, ctx context.Context, cluster *lvmv1alpha1.LVMCluster) error {
logger := log.FromContext(ctx).WithValues("resourceManager", s.getName())
// one volume snapshot class for every deviceClass based on CR is created
topolvmSnapshotClasses := getTopolvmSnapshotClasses(lvmCluster)
topolvmSnapshotClasses := getTopolvmSnapshotClasses(cluster)
for _, vsc := range topolvmSnapshotClasses {
// we anticipate no edits to volume snapshot class
result, err := cutil.CreateOrUpdate(ctx, r.Client, vsc, func() error { return nil })
result, err := cutil.CreateOrUpdate(ctx, r.Client, vsc, func() error {
labels.SetManagedLabels(r.Scheme, vsc, cluster)
return nil
})
if err != nil {
// this is necessary in case the VolumeSnapshotClass CRDs are not registered in the Distro, e.g. for OpenShift Local
if discovery.IsGroupDiscoveryFailedError(errors.Unwrap(err)) {
logger.Info("volume snapshot class CRDs do not exist on the cluster, ignoring", "VolumeSnapshotClass", vscName)
return nil
}
return fmt.Errorf("%s failed to reconcile: %w", s.getName(), err)
}
logger.Info("VolumeSnapshotClass applied to cluster", "operation", result, "name", vsc.Name)
Expand All @@ -77,11 +76,6 @@ func (s topolvmVolumeSnapshotClass) ensureDeleted(r *LVMClusterReconciler, ctx c

vsc := &snapapi.VolumeSnapshotClass{}
if err := r.Client.Get(ctx, types.NamespacedName{Name: vscName}, vsc); err != nil {
// this is necessary in case the VolumeSnapshotClass CRDs are not registered in the Distro, e.g. for OpenShift Local
if discovery.IsGroupDiscoveryFailedError(errors.Unwrap(err)) {
logger.Info("VolumeSnapshotClasses do not exist on the cluster, ignoring")
return nil
}
return client.IgnoreNotFound(err)
}

Expand Down
11 changes: 8 additions & 3 deletions controllers/topolvm_storageclass.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ import (
"fmt"

lvmv1alpha1 "github.com/openshift/lvm-operator/api/v1alpha1"
"github.com/openshift/lvm-operator/pkg/labels"

storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
Expand All @@ -44,14 +46,17 @@ func (s topolvmStorageClass) getName() string {

//+kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;create;delete;watch;list

func (s topolvmStorageClass) ensureCreated(r *LVMClusterReconciler, ctx context.Context, lvmCluster *lvmv1alpha1.LVMCluster) error {
func (s topolvmStorageClass) ensureCreated(r *LVMClusterReconciler, ctx context.Context, cluster *lvmv1alpha1.LVMCluster) error {
logger := log.FromContext(ctx).WithValues("resourceManager", s.getName())

// one storage class for every deviceClass based on CR is created
topolvmStorageClasses := s.getTopolvmStorageClasses(r, ctx, lvmCluster)
topolvmStorageClasses := s.getTopolvmStorageClasses(r, ctx, cluster)
for _, sc := range topolvmStorageClasses {
// we anticipate no edits to storage class
result, err := cutil.CreateOrUpdate(ctx, r.Client, sc, func() error { return nil })
result, err := cutil.CreateOrUpdate(ctx, r.Client, sc, func() error {
labels.SetManagedLabels(r.Scheme, sc, cluster)
return nil
})
if err != nil {
return fmt.Errorf("%s failed to reconcile: %w", s.getName(), err)
}
Expand Down
Loading

0 comments on commit d6ebd11

Please sign in to comment.