Skip to content

Commit

Permalink
pao:hypershift: generelize reconciliation loop
Browse files Browse the repository at this point in the history
Modify the reconciliation flow so it could fit for both HCP and OCP.

Signed-off-by: Talor Itzhak <[email protected]>
  • Loading branch information
Tal-or committed May 15, 2024
1 parent d2b5184 commit 9195346
Show file tree
Hide file tree
Showing 3 changed files with 84 additions and 14 deletions.
47 changes: 47 additions & 0 deletions cmd/cluster-node-tuning-operator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"github.com/spf13/pflag"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
Expand All @@ -34,17 +35,23 @@ import (
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"net/http"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/cluster"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/webhook"

tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1"
ntoclient "github.com/openshift/cluster-node-tuning-operator/pkg/client"
"github.com/openshift/cluster-node-tuning-operator/pkg/config"
"github.com/openshift/cluster-node-tuning-operator/pkg/metrics"
"github.com/openshift/cluster-node-tuning-operator/pkg/operator"
"github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/cmd/render"
"github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/hypershift"
hcpcomponents "github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/hypershift/components"
hcpstatus "github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/hypershift/status"
"github.com/openshift/cluster-node-tuning-operator/pkg/tuned/cmd/operand"
tunedrender "github.com/openshift/cluster-node-tuning-operator/pkg/tuned/cmd/render"
"github.com/openshift/cluster-node-tuning-operator/pkg/util"
Expand Down Expand Up @@ -197,6 +204,46 @@ func operatorRun() {
if err = (&performancev2.PerformanceProfile{}).SetupWebhookWithManager(mgr); err != nil {
klog.Exitf("unable to create PerformanceProfile v2 webhook: %v", err)
}
} else {
// Hypershift configuration
restConfig, err := ntoclient.GetInClusterConfig()
if err != nil {
klog.Exitf("unable to create get InClusterConfiguration while creating PerformanceProfile controller: %v", err)
}

fOps := func(opts *cluster.Options) {
operatorNamespace := config.OperatorNamespace()
opts.Cache.Namespaces = []string{operatorNamespace}
opts.Scheme = mgr.GetScheme()
opts.MapperProvider = func(c *rest.Config, httpClient *http.Client) (meta.RESTMapper, error) {
return mgr.GetRESTMapper(), nil
}
opts.NewClient = func(config *rest.Config, options client.Options) (client.Client, error) {
c, err := client.New(config, options)
if err != nil {
return nil, err
}
return hypershift.NewControlPlaneClient(c, operatorNamespace), nil
}
}
managementCluster, err := cluster.New(restConfig, fOps)
if err != nil {
klog.Exitf("unable to create ManagementCluster while creating PerformanceProfile controller : %v", err)
}

if err := mgr.Add(managementCluster); err != nil {
klog.Exitf("unable to add ManagementCluster to manger while creating PerformanceProfile controller : %v", err)
}
if err = (&paocontroller.PerformanceProfileReconciler{
// dataPlaneClient
Client: mgr.GetClient(),
ManagementClient: managementCluster.GetClient(),
Recorder: managementCluster.GetEventRecorderFor("performance-profile-controller"),
ComponentsHandler: hcpcomponents.NewHandler(managementCluster.GetClient(), mgr.GetClient(), mgr.GetScheme()),
StatusWriter: hcpstatus.NewWriter(managementCluster.GetClient(), mgr.GetClient()),
}).SetupWithManagerForHypershift(mgr, managementCluster); err != nil {
klog.Exitf("unable to create PerformanceProfile controller: %v", err)
}
}
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
klog.Exitf("manager exited with non-zero code: %v", err)
Expand Down
50 changes: 36 additions & 14 deletions pkg/performanceprofile/controller/performanceprofile_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import (
mcov1 "github.com/openshift/api/machineconfiguration/v1"
performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/performanceprofile/v2"
tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1"
"github.com/openshift/cluster-node-tuning-operator/pkg/config"
ntoconfig "github.com/openshift/cluster-node-tuning-operator/pkg/config"
"github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/components"
profileutil "github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/components/profile"
"github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/resources"
Expand All @@ -45,18 +45,24 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/klog"
"k8s.io/klog/v2"

ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/cluster"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)

const finalizer = "foreground-deletion"
const (
openshiftFinalizer = "foreground-deletion"
hypershiftFinalizer = "hypershift.openshift.io/foreground-deletion"
controllerGeneratedMachineConfig = "hypershift.openshift.io/performanceprofile-config"
)

// PerformanceProfileReconciler reconciles a PerformanceProfile object
type PerformanceProfileReconciler struct {
Expand Down Expand Up @@ -387,21 +393,32 @@ func (r *PerformanceProfileReconciler) Reconcile(ctx context.Context, req ctrl.R
return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
}

klog.Info("Reconciling PerformanceProfile")
// Fetch the PerformanceProfile instance
instance := &performancev2.PerformanceProfile{}
err = r.Get(ctx, req.NamespacedName, instance)
klog.InfoS("Reconciling", "reqNamespace", req.NamespacedName)
defer klog.InfoS("Exit Reconciling", "reqNamespace", req.NamespacedName)

var instance client.Object
instance = &performancev2.PerformanceProfile{}
finalizer := openshiftFinalizer
if ntoconfig.InHyperShift() {
instance = &corev1.ConfigMap{}
finalizer = hypershiftFinalizer
}

err = r.ManagementClient.Get(ctx, req.NamespacedName, instance)
if err != nil {
if k8serros.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Request object isn't found, could have been deleted after reconciled request.
// Owned objects are automatically garbage collected.
// For additional cleanup logic, use finalizers.
// Return and don't requeue
klog.InfoS("Instance not found", "reqNamespace", req.NamespacedName)
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
klog.ErrorS(err, "Reading failure", "reqNamespace", req.NamespacedName)
return reconcile.Result{}, err
}

instanceKey := client.ObjectKeyFromObject(instance)
if instance.GetDeletionTimestamp() != nil {
// delete components
if err := r.ComponentsHandler.Delete(ctx, instance.GetName()); err != nil {
Expand All @@ -417,22 +434,27 @@ func (r *PerformanceProfileReconciler) Reconcile(ctx context.Context, req ctrl.R

// remove finalizer
if hasFinalizer(instance, finalizer) {
klog.InfoS("Remove finalizer", "instance", instanceKey.String())
removeFinalizer(instance, finalizer)
if err := r.Update(ctx, instance); err != nil {
if err := r.ManagementClient.Update(ctx, instance); err != nil {
return reconcile.Result{}, err
}
klog.InfoS("Finalizer Removed", "instance", instanceKey.String())
return reconcile.Result{}, nil
}
}

// add finalizer
if !hasFinalizer(instance, finalizer) {
klog.InfoS("Add finalizer", "instance", instanceKey.String())
instance.SetFinalizers(append(instance.GetFinalizers(), finalizer))
instance.Status.Conditions = status.GetProgressingConditions("DeploymentStarting", "Deployment is starting")
if err := r.Update(ctx, instance); err != nil {
if err = r.StatusWriter.Update(ctx, instance, status.GetProgressingConditions("DeploymentStarting", "Deployment is starting")); err != nil {
return reconcile.Result{}, err
}

if err = r.ManagementClient.Update(ctx, instance); err != nil {
return reconcile.Result{}, err
}
klog.InfoS("Finalizer added", "instance", instanceKey.String())
// we exit reconcile loop because we will have additional update reconcile
return reconcile.Result{}, nil
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ import (
)

var _ = Describe("Controller", func() {
const finalizer = "foreground-deletion"
var request reconcile.Request
var profile *performancev2.PerformanceProfile
var profileMCP *mcov1.MachineConfigPool
Expand Down

0 comments on commit 9195346

Please sign in to comment.