From dba015a1c2503128cc6a02a11cb66f801c9e1fa1 Mon Sep 17 00:00:00 2001 From: Oliver King Date: Thu, 2 Nov 2023 22:13:03 -0500 Subject: [PATCH] everything working --- pkg/controller/controller.go | 7 +- pkg/controller/dns/external_dns.go | 11 +- pkg/controller/dns/external_dns_test.go | 58 ++- pkg/controller/nginx/nginx.go | 28 -- .../nginxingress/nginx_ingress_controller.go | 431 ++++++++++++++++++ pkg/controller/nginxingress/types.go | 11 + pkg/manifests/common.go | 39 +- pkg/manifests/external_dns.go | 7 +- pkg/manifests/nginx.go | 106 ++--- pkg/manifests/types.go | 58 +++ 10 files changed, 597 insertions(+), 159 deletions(-) create mode 100644 pkg/controller/nginxingress/nginx_ingress_controller.go create mode 100644 pkg/controller/nginxingress/types.go create mode 100644 pkg/manifests/types.go diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index a545800c..dc5f4847 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -10,6 +10,7 @@ import ( "os" approutingv1alpha1 "github.com/Azure/aks-app-routing-operator/api/v1alpha1" + "github.com/Azure/aks-app-routing-operator/pkg/controller/nginxingress" "github.com/Azure/aks-app-routing-operator/pkg/webhook" "github.com/go-logr/logr" cfgv1alpha2 "github.com/openservicemesh/osm/pkg/apis/config/v1alpha2" @@ -165,10 +166,14 @@ func setupWebhooks(mgr ctrl.Manager, addWebhooksFn func(mgr ctrl.Manager) error) func setupControllers(mgr ctrl.Manager, conf *config.Config) error { var selfDeploy *appsv1.Deployment = nil // self deploy doesn't work because operator isn't in same resources as child resources - if err := dns.NewExternalDns(mgr, conf, selfDeploy); err != nil { + if err := dns.NewExternalDns(mgr, conf); err != nil { return fmt.Errorf("setting up external dns controller: %w", err) } + if err := nginxingress.NewReconciler(conf, mgr); err != nil { + return fmt.Errorf("setting up nginx ingress controller reconciler: %w", err) + } + nginxConfigs, err := nginx.New(mgr, conf, selfDeploy) if err != nil { return fmt.Errorf("getting nginx configs: %w", err) diff --git a/pkg/controller/dns/external_dns.go b/pkg/controller/dns/external_dns.go index 1a451039..62de497e 100644 --- a/pkg/controller/dns/external_dns.go +++ b/pkg/controller/dns/external_dns.go @@ -8,7 +8,6 @@ import ( "github.com/Azure/aks-app-routing-operator/pkg/controller/controllername" "github.com/Azure/aks-app-routing-operator/pkg/manifests" "github.com/Azure/aks-app-routing-operator/pkg/util" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" ctrl "sigs.k8s.io/controller-runtime" @@ -50,8 +49,8 @@ func addExternalDnsCleaner(manager ctrl.Manager, objs []cleanObj) error { } // NewExternalDns starts all resources required for external dns -func NewExternalDns(manager ctrl.Manager, conf *config.Config, self *appsv1.Deployment) error { - instances := instances(conf, self) +func NewExternalDns(manager ctrl.Manager, conf *config.Config) error { + instances := instances(conf) deployInstances := filterAction(instances, deploy) deployRes := getResources(deployInstances) @@ -67,16 +66,16 @@ func NewExternalDns(manager ctrl.Manager, conf *config.Config, self *appsv1.Depl return nil } -func instances(conf *config.Config, self *appsv1.Deployment) []instance { +func instances(conf *config.Config) []instance { // public publicCfg := publicConfig(conf) publicAction := actionFromConfig(publicCfg) - publicResources := manifests.ExternalDnsResources(conf, self, []*manifests.ExternalDnsConfig{publicCfg}) + publicResources := manifests.ExternalDnsResources(conf, []*manifests.ExternalDnsConfig{publicCfg}) // private privateCfg := privateConfig(conf) privateAction := actionFromConfig(privateCfg) - privateResources := manifests.ExternalDnsResources(conf, self, []*manifests.ExternalDnsConfig{privateCfg}) + privateResources := manifests.ExternalDnsResources(conf, []*manifests.ExternalDnsConfig{privateCfg}) return []instance{ { diff --git a/pkg/controller/dns/external_dns_test.go b/pkg/controller/dns/external_dns_test.go index 85f8f6f3..92c74f22 100644 --- a/pkg/controller/dns/external_dns_test.go +++ b/pkg/controller/dns/external_dns_test.go @@ -11,7 +11,6 @@ import ( "github.com/Azure/aks-app-routing-operator/pkg/util" "github.com/google/uuid" "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/rest" @@ -25,9 +24,8 @@ var ( env *envtest.Environment restConfig *rest.Config err error - self *appsv1.Deployment = nil - uid = uuid.New().String() - noZones = config.Config{ + uid = uuid.New().String() + noZones = config.Config{ ClusterUid: uid, PrivateZoneConfig: config.DnsZoneConfig{}, PublicZoneConfig: config.DnsZoneConfig{}, @@ -182,12 +180,12 @@ func TestInstances(t *testing.T) { expected: []instance{ { config: publicConfig(&noZones), - resources: manifests.ExternalDnsResources(&noZones, self, []*manifests.ExternalDnsConfig{publicConfig(&noZones)}), + resources: manifests.ExternalDnsResources(&noZones, []*manifests.ExternalDnsConfig{publicConfig(&noZones)}), action: clean, }, { config: privateConfig(&noZones), - resources: manifests.ExternalDnsResources(&noZones, self, []*manifests.ExternalDnsConfig{privateConfig(&noZones)}), + resources: manifests.ExternalDnsResources(&noZones, []*manifests.ExternalDnsConfig{privateConfig(&noZones)}), action: clean, }, }, @@ -198,12 +196,12 @@ func TestInstances(t *testing.T) { expected: []instance{ { config: publicConfig(&onlyPrivZones), - resources: manifests.ExternalDnsResources(&onlyPrivZones, self, []*manifests.ExternalDnsConfig{publicConfig(&onlyPrivZones)}), + resources: manifests.ExternalDnsResources(&onlyPrivZones, []*manifests.ExternalDnsConfig{publicConfig(&onlyPrivZones)}), action: clean, }, { config: privateConfig(&onlyPrivZones), - resources: manifests.ExternalDnsResources(&onlyPrivZones, self, []*manifests.ExternalDnsConfig{privateConfig(&onlyPrivZones)}), + resources: manifests.ExternalDnsResources(&onlyPrivZones, []*manifests.ExternalDnsConfig{privateConfig(&onlyPrivZones)}), action: deploy, }, }, @@ -214,12 +212,12 @@ func TestInstances(t *testing.T) { expected: []instance{ { config: publicConfig(&onlyPubZones), - resources: manifests.ExternalDnsResources(&onlyPubZones, self, []*manifests.ExternalDnsConfig{publicConfig(&onlyPubZones)}), + resources: manifests.ExternalDnsResources(&onlyPubZones, []*manifests.ExternalDnsConfig{publicConfig(&onlyPubZones)}), action: deploy, }, { config: privateConfig(&onlyPubZones), - resources: manifests.ExternalDnsResources(&onlyPubZones, self, []*manifests.ExternalDnsConfig{privateConfig(&onlyPubZones)}), + resources: manifests.ExternalDnsResources(&onlyPubZones, []*manifests.ExternalDnsConfig{privateConfig(&onlyPubZones)}), action: clean, }, }, @@ -230,12 +228,12 @@ func TestInstances(t *testing.T) { expected: []instance{ { config: publicConfig(&allZones), - resources: manifests.ExternalDnsResources(&allZones, self, []*manifests.ExternalDnsConfig{publicConfig(&allZones)}), + resources: manifests.ExternalDnsResources(&allZones, []*manifests.ExternalDnsConfig{publicConfig(&allZones)}), action: deploy, }, { config: privateConfig(&allZones), - resources: manifests.ExternalDnsResources(&allZones, self, []*manifests.ExternalDnsConfig{privateConfig(&allZones)}), + resources: manifests.ExternalDnsResources(&allZones, []*manifests.ExternalDnsConfig{privateConfig(&allZones)}), action: deploy, }, }, @@ -243,7 +241,7 @@ func TestInstances(t *testing.T) { } for _, test := range tests { - instances := instances(test.conf, self) + instances := instances(test.conf) if !reflect.DeepEqual(instances, test.expected) { t.Error( "For", test.name, @@ -255,9 +253,9 @@ func TestInstances(t *testing.T) { } func TestFilterAction(t *testing.T) { - allClean := instances(&noZones, self) - allDeploy := instances(&allZones, self) - oneDeployOneClean := instances(&onlyPrivZones, self) + allClean := instances(&noZones) + allDeploy := instances(&allZones) + oneDeployOneClean := instances(&onlyPrivZones) tests := []struct { name string @@ -318,7 +316,7 @@ func TestFilterAction(t *testing.T) { } func TestGetResources(t *testing.T) { - instances := instances(&noZones, self) + instances := instances(&noZones) got := getResources(instances) var expected []client.Object for _, instance := range instances { @@ -345,17 +343,17 @@ func TestGetLabels(t *testing.T) { }, { name: "top level and private", - instances: filterAction(instances(&onlyPrivZones, self), deploy), + instances: filterAction(instances(&onlyPrivZones), deploy), expected: util.MergeMaps(manifests.GetTopLevelLabels(), manifests.PrivateProvider.Labels()), }, { name: "top level and public", - instances: filterAction(instances(&onlyPubZones, self), deploy), + instances: filterAction(instances(&onlyPubZones), deploy), expected: util.MergeMaps(manifests.GetTopLevelLabels(), manifests.PublicProvider.Labels()), }, { name: "all labels", - instances: instances(&allZones, self), + instances: instances(&allZones), expected: util.MergeMaps(manifests.GetTopLevelLabels(), manifests.PublicProvider.Labels(), manifests.PrivateProvider.Labels()), }, } @@ -374,36 +372,36 @@ func TestCleanObjs(t *testing.T) { }{ { name: "private dns clean", - instances: instances(&onlyPubZones, self), + instances: instances(&onlyPubZones), expected: []cleanObj{{ - resources: instances(&onlyPubZones, self)[1].resources, + resources: instances(&onlyPubZones)[1].resources, labels: util.MergeMaps(manifests.GetTopLevelLabels(), manifests.PrivateProvider.Labels()), }}, }, { name: "public dns clean", - instances: instances(&onlyPrivZones, self), + instances: instances(&onlyPrivZones), expected: []cleanObj{{ - resources: instances(&onlyPrivZones, self)[0].resources, + resources: instances(&onlyPrivZones)[0].resources, labels: util.MergeMaps(manifests.GetTopLevelLabels(), manifests.PublicProvider.Labels()), }}, }, { name: "all dns clean", - instances: instances(&noZones, self), + instances: instances(&noZones), expected: []cleanObj{ { - resources: instances(&noZones, self)[0].resources, + resources: instances(&noZones)[0].resources, labels: util.MergeMaps(manifests.GetTopLevelLabels(), manifests.PublicProvider.Labels()), }, { - resources: instances(&noZones, self)[1].resources, + resources: instances(&noZones)[1].resources, labels: util.MergeMaps(manifests.GetTopLevelLabels(), manifests.PrivateProvider.Labels()), }}, }, { name: "no dns clean", - instances: instances(&allZones, self), + instances: instances(&allZones), expected: []cleanObj(nil), }, } @@ -462,7 +460,7 @@ func TestAddExternalDnsCleaner(t *testing.T) { err = addExternalDnsCleaner(m, []cleanObj{ { - resources: instances(&noZones, self)[0].resources, + resources: instances(&noZones)[0].resources, labels: util.MergeMaps(manifests.GetTopLevelLabels(), manifests.PublicProvider.Labels()), }}) require.NoError(t, err) @@ -473,7 +471,7 @@ func TestNewExternalDns(t *testing.T) { require.NoError(t, err) conf := &config.Config{NS: "app-routing-system", OperatorDeployment: "operator"} - err = NewExternalDns(m, conf, self) + err = NewExternalDns(m, conf) require.NoError(t, err) } diff --git a/pkg/controller/nginx/nginx.go b/pkg/controller/nginx/nginx.go index b1222542..82c31cc6 100644 --- a/pkg/controller/nginx/nginx.go +++ b/pkg/controller/nginx/nginx.go @@ -4,14 +4,12 @@ import ( "context" "github.com/Azure/aks-app-routing-operator/pkg/config" - "github.com/Azure/aks-app-routing-operator/pkg/controller/ingress" "github.com/Azure/aks-app-routing-operator/pkg/controller/service" "github.com/Azure/aks-app-routing-operator/pkg/manifests" appsv1 "k8s.io/api/apps/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" ) @@ -68,14 +66,6 @@ func New(m manager.Manager, conf *config.Config, self *appsv1.Deployment) ([]*ma defaultIngConfig: defaultIngConfig, } - if err := n.addIngressClassReconciler(); err != nil { - return nil, err - } - - if err := n.addIngressControllerReconciler(); err != nil { - return nil, err - } - if err := n.addIngressReconciler(); err != nil { return nil, err } @@ -83,24 +73,6 @@ func New(m manager.Manager, conf *config.Config, self *appsv1.Deployment) ([]*ma return n.ingConfigs, nil } -func (n *nginx) addIngressClassReconciler() error { - objs := []client.Object{} - for _, config := range n.ingConfigs { - objs = append(objs, manifests.NginxIngressClass(n.conf, n.self, config)...) - } - - return ingress.NewIngressClassReconciler(n.manager, objs, n.name) -} - -func (n *nginx) addIngressControllerReconciler() error { - objs := []client.Object{} - for _, config := range n.ingConfigs { - objs = append(objs, manifests.NginxIngressControllerResources(n.conf, n.self, config)...) - } - - return ingress.NewIngressControllerReconciler(n.manager, objs, n.name) -} - func (n *nginx) addIngressReconciler() error { return service.NewNginxIngressReconciler(n.manager, n.defaultIngConfig) } diff --git a/pkg/controller/nginxingress/nginx_ingress_controller.go b/pkg/controller/nginxingress/nginx_ingress_controller.go new file mode 100644 index 00000000..3b7fce09 --- /dev/null +++ b/pkg/controller/nginxingress/nginx_ingress_controller.go @@ -0,0 +1,431 @@ +package nginxingress + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" + + approutingv1alpha1 "github.com/Azure/aks-app-routing-operator/api/v1alpha1" + "github.com/Azure/aks-app-routing-operator/pkg/config" + "github.com/Azure/aks-app-routing-operator/pkg/controller/controllername" + "github.com/Azure/aks-app-routing-operator/pkg/controller/metrics" + "github.com/Azure/aks-app-routing-operator/pkg/manifests" + "github.com/Azure/aks-app-routing-operator/pkg/util" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + netv1 "k8s.io/api/networking/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/utils/keymutex" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +const ( + controllerClassMaxLen = 250 +) + +var ( + nginxIngressControllerReconcilerName = controllername.New("nginx", "ingress", "controller", "reconciler") + + // collisionCountMu is used to prevent multiple nginxIngressController resources from determining their collisionCount at the same time. We use + // a hashed key mutex because collisions can only occur when the nginxIngressController resources have the same spec.ControllerNamePrefix field. + // This is the field used to key into this mutex. + collisionCountMu = keymutex.NewHashed(6) // 6 is the number of "buckets". It's not too big, not too small +) + +// nginxIngressControllerReconciler reconciles a NginxIngressController object +type nginxIngressControllerReconciler struct { + client client.Client + conf *config.Config +} + +// NewReconciler sets up the controller with the Manager. +func NewReconciler(conf *config.Config, mgr ctrl.Manager) error { + metrics.InitControllerMetrics(nginxIngressControllerReconcilerName) + + reconciler := &nginxIngressControllerReconciler{ + client: mgr.GetClient(), + conf: conf, + } + + if err := nginxIngressControllerReconcilerName.AddToController( + ctrl.NewControllerManagedBy(mgr). + For(&approutingv1alpha1.NginxIngressController{}). + Owns(&appsv1.Deployment{}), + mgr.GetLogger(), + ).Complete(reconciler); err != nil { + return err + } + + return nil +} + +func (n *nginxIngressControllerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) { + lgr := log.FromContext(ctx, "nginxIngressController", req.NamespacedName, "reconciler", "event") + ctx = log.IntoContext(ctx, lgr) + + defer func() { + metrics.HandleControllerReconcileMetrics(nginxIngressControllerReconcilerName, res, err) + }() + + var nginxIngressController approutingv1alpha1.NginxIngressController + if err := n.client.Get(ctx, req.NamespacedName, &nginxIngressController); err != nil { + if apierrors.IsNotFound(err) { // object was deleted + lgr.Info("NginxIngressController not found") + return ctrl.Result{}, nil + } + + lgr.Error(err, "unable to fetch NginxIngressController") + return ctrl.Result{}, err + } + + lockKey := nginxIngressController.Spec.ControllerNamePrefix + collisionCountMu.LockKey(lockKey) + defer collisionCountMu.UnlockKey(lockKey) + if err := n.SetCollisionCount(ctx, &nginxIngressController); err != nil { + lgr.Error(err, "unable to set collision count") + return ctrl.Result{}, fmt.Errorf("setting collision count: %w", err) + } + + resources := n.ManagedResources(&nginxIngressController) + if resources == nil { + return ctrl.Result{}, fmt.Errorf("unable to get managed resources") + } + + managedRes, err := n.ReconcileResource(ctx, &nginxIngressController, resources) + defer func() { + n.updateStatus(&nginxIngressController, resources.Deployment, resources.IngressClass, managedRes) + if statusErr := n.client.Status().Update(ctx, &nginxIngressController); statusErr != nil { + lgr.Error(statusErr, "unable to update NginxIngressController status") + if err == nil { + err = statusErr + } + } + }() + if err != nil { + return ctrl.Result{}, fmt.Errorf("reconciling resource: %w", err) + } + + return ctrl.Result{}, nil +} + +// ReconcileResource reconciles the NginxIngressController resources returning a list of managed resources. +func (n *nginxIngressControllerReconciler) ReconcileResource(ctx context.Context, nic *approutingv1alpha1.NginxIngressController, res *manifests.NginxResources) ([]approutingv1alpha1.ManagedObjectReference, error) { + if nic == nil { + return nil, errors.New("nginxIngressController cannot be nil") + } + + start := time.Now() + lgr := log.FromContext(ctx, "nginxIngressController", nic.GetName()) + ctx = log.IntoContext(ctx, lgr) + lgr.Info("starting to reconcile resource") + defer lgr.Info("finished reconciling resource", "latencySec", time.Since(start).Seconds()) + + var managedResourceRefs []approutingv1alpha1.ManagedObjectReference + for _, obj := range res.Objects() { + // TODO: upsert works pretty well but we want to set annotations exactly on the nginx service, we should use an alternative strategy for that + + if err := util.Upsert(ctx, n.client, obj); err != nil { + lgr.Error(err, "unable to upsert object", "name", obj.GetName(), "kind", obj.GetObjectKind().GroupVersionKind().Kind, "namespace", obj.GetNamespace()) + return nil, fmt.Errorf("upserting object: %w", err) + } + + if managedByUs(obj) { + managedResourceRefs = append(managedResourceRefs, approutingv1alpha1.ManagedObjectReference{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + Kind: obj.GetObjectKind().GroupVersionKind().Kind, + APIGroup: obj.GetObjectKind().GroupVersionKind().Group, + }) + + } + } + + return managedResourceRefs, nil +} + +func (n *nginxIngressControllerReconciler) ManagedResources(nic *approutingv1alpha1.NginxIngressController) *manifests.NginxResources { + if nic == nil { + return nil + } + + cc := "webapprouting.kubernetes.azure.com/nginx/" + url.PathEscape(nic.Name) + + // it's impossible for this to happen because we enforce nic.Name to be less than 101 characters + if len(cc) > controllerClassMaxLen { + cc = cc[:controllerClassMaxLen] + } + + nginxIngressCfg := &manifests.NginxIngressConfig{ + ControllerClass: cc, + ResourceName: fmt.Sprintf("%s-%d", nic.Spec.ControllerNamePrefix, nic.Status.CollisionCount), + IcName: nic.Spec.IngressClassName, + ServiceConfig: &manifests.ServiceConfig{ + Annotations: nic.Spec.LoadBalancerAnnotations, + }, + } + + res := manifests.GetNginxResources(n.conf, nginxIngressCfg) + owner := manifests.GetOwnerRefs(nic, true) + for _, obj := range res.Objects() { + if managedByUs(obj) { + obj.SetOwnerReferences(owner) + } + } + + return res +} + +func (n *nginxIngressControllerReconciler) SetCollisionCount(ctx context.Context, nic *approutingv1alpha1.NginxIngressController) error { + lgr := log.FromContext(ctx) + startingCollisionCount := nic.Status.CollisionCount + + // there's a limit to how many times we should try to find the collision count, we don't want to put too much stress on api server + // TODO: we should set a condition when hit + jitter retry interval + for i := 0; i < 10; i++ { + collision, err := n.collides(ctx, nic) + if err != nil { + lgr.Error(err, "unable to determine collision") + return fmt.Errorf("determining collision: %w", err) + } + + if collision == collisionIngressClass { + lgr.Info("ingress class collision") + meta.SetStatusCondition(&nic.Status.Conditions, metav1.Condition{ + Type: approutingv1alpha1.ConditionTypeIngressClassReady, + Status: "Collision", + ObservedGeneration: nic.Generation, + Message: fmt.Sprintf("IngressClass %s already exists in the cluster and isn't owned by this resource. Delete the IngressClass or recreate this resource with a different spec.IngressClass field.", nic.Spec.IngressClassName), + Reason: "IngressClassCollision", + }) + if err := n.client.Status().Update(ctx, nic); err != nil { + lgr.Error(err, "unable to update status") + return fmt.Errorf("updating status with IngressClass collision") + } + + return nil // this isn't an error, it's caused by a race condition involving our webhook + } + + if collision == collisionNone { + break + } + + lgr.Info("reconcilable collision detected, incrementing", "collisionCount", nic.Status.CollisionCount) + nic.Status.CollisionCount++ + + if i == 9 { + return errors.New("too many collisions") + } + } + + if startingCollisionCount != nic.Status.CollisionCount { + lgr.Info("setting new collision count", "collisionCount", nic.Status.CollisionCount, "startingCollisionCount", startingCollisionCount) + if err := n.client.Status().Update(ctx, nic); err != nil { + lgr.Error(err, "unable to update status") + return fmt.Errorf("updating status: %w", err) + } + } + + return nil +} + +func (n *nginxIngressControllerReconciler) collides(ctx context.Context, nic *approutingv1alpha1.NginxIngressController) (collision, error) { + lgr := log.FromContext(ctx) + + res := n.ManagedResources(nic) + if res == nil { + return collisionNone, fmt.Errorf("getting managed objects") + } + + for _, obj := range res.Objects() { + lgr := lgr.WithValues("kind", obj.GetObjectKind().GroupVersionKind().Kind, "name", obj.GetName(), "namespace", obj.GetNamespace()) + + // if we won't own the resource, we don't care about collisions. + // this is most commonly used for namespaces since we shouldn't own + // namespaces + if !managedByUs(obj) { + lgr.Info("skipping collision check because we don't own the resource") + continue + } + + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(obj.GetObjectKind().GroupVersionKind()) + + err := n.client.Get(ctx, client.ObjectKeyFromObject(obj), u) + if err != nil { + if apierrors.IsNotFound(err) { + continue + } + + return collisionNone, fmt.Errorf("getting object: %w", err) + } + + if owner := util.FindOwnerKind(u.GetOwnerReferences(), nic.Kind); owner == nic.Name { + lgr.Info("the nginxIngressController owns this resource") + continue + } + + lgr.Info("collision detected") + if obj.GetObjectKind().GroupVersionKind().Kind == "IngressClass" { + return collisionIngressClass, nil + } + + return collisionOther, nil + } + + lgr.Info("no collisions detected") + return collisionNone, nil +} + +// updateStatus updates the status of the NginxIngressController resource. If a nil controller Deployment or IngressClass is passed, the status is defaulted for those fields if they are not already set. +func (n *nginxIngressControllerReconciler) updateStatus(nic *approutingv1alpha1.NginxIngressController, controllerDeployment *appsv1.Deployment, ic *netv1.IngressClass, managedResourceRefs []approutingv1alpha1.ManagedObjectReference) { + if managedResourceRefs != nil { + nic.Status.ManagedResourceRefs = managedResourceRefs + } + + if ic == nil || ic.CreationTimestamp.IsZero() { + nic.SetCondition(metav1.Condition{ + Type: approutingv1alpha1.ConditionTypeIngressClassReady, + Status: metav1.ConditionUnknown, + Reason: "IngressClassUnknown", + Message: "IngressClass is unknown", + }) + } else { + nic.SetCondition(metav1.Condition{ + Type: approutingv1alpha1.ConditionTypeIngressClassReady, + Status: "True", + Reason: "IngressClassIsReady", + Message: "Ingress Class is up-to-date ", + }) + } + + // default conditions + if controllerDeployment == nil || controllerDeployment.CreationTimestamp.IsZero() { + nic.SetCondition(metav1.Condition{ + Type: approutingv1alpha1.ConditionTypeControllerAvailable, + Status: metav1.ConditionUnknown, + Reason: "ControllerDeploymentUnknown", + Message: "Controller deployment is unknown", + }) + nic.SetCondition(metav1.Condition{ + Type: approutingv1alpha1.ConditionTypeProgressing, + Status: metav1.ConditionUnknown, + Reason: "ControllerDeploymentUnknown", + Message: "Controller deployment is unknown", + }) + } else { + nic.Status.ControllerReadyReplicas = controllerDeployment.Status.ReadyReplicas + nic.Status.ControllerAvailableReplicas = controllerDeployment.Status.AvailableReplicas + nic.Status.ControllerUnavailableReplicas = controllerDeployment.Status.UnavailableReplicas + nic.Status.ControllerReplicas = controllerDeployment.Status.Replicas + + for _, cond := range controllerDeployment.Status.Conditions { + switch cond.Type { + case appsv1.DeploymentProgressing: + n.updateStatusControllerProgressing(nic, cond) + case appsv1.DeploymentAvailable: + n.updateStatusControllerAvailable(nic, cond) + } + } + } + + controllerAvailable := nic.GetCondition(approutingv1alpha1.ConditionTypeControllerAvailable) + icAvailable := nic.GetCondition(approutingv1alpha1.ConditionTypeIngressClassReady) + if controllerAvailable != nil && icAvailable != nil && controllerAvailable.Status == metav1.ConditionTrue && icAvailable.Status == metav1.ConditionTrue { + nic.SetCondition(metav1.Condition{ + Type: approutingv1alpha1.ConditionTypeAvailable, + Status: metav1.ConditionTrue, + Reason: "ControllerIsAvailable", + Message: "Controller Deployment has minimum availability and IngressClass is up-to-date", + }) + } else { + nic.SetCondition(metav1.Condition{ + Type: approutingv1alpha1.ConditionTypeAvailable, + Status: metav1.ConditionFalse, + Reason: "ControllerIsNotAvailable", + Message: "Controller Deployment does not have minimum availability or IngressClass is not up-to-date", + }) + } +} + +func (n *nginxIngressControllerReconciler) updateStatusControllerAvailable(nic *approutingv1alpha1.NginxIngressController, availableCondition appsv1.DeploymentCondition) { + if availableCondition.Type != appsv1.DeploymentAvailable { + return + } + + var cond metav1.Condition + switch availableCondition.Status { + case corev1.ConditionTrue: + cond = metav1.Condition{ + Type: approutingv1alpha1.ConditionTypeControllerAvailable, + Status: metav1.ConditionTrue, + Reason: "ControllerDeploymentAvailable", + Message: "Controller Deployment is available", + } + case corev1.ConditionFalse: + cond = metav1.Condition{ + Type: approutingv1alpha1.ConditionTypeControllerAvailable, + Status: metav1.ConditionFalse, + Reason: "ControllerDeploymentNotAvailable", + Message: "Controller Deployment is not available", + } + default: + cond = metav1.Condition{ + Type: approutingv1alpha1.ConditionTypeControllerAvailable, + Status: metav1.ConditionUnknown, + Reason: "ControllerDeploymentUnknown", + Message: "Controller Deployment is in an unknown state", + } + } + + nic.SetCondition(cond) +} + +func (n *nginxIngressControllerReconciler) updateStatusControllerProgressing(nic *approutingv1alpha1.NginxIngressController, progressingCondition appsv1.DeploymentCondition) { + if progressingCondition.Type != appsv1.DeploymentProgressing { + return + } + + var cond metav1.Condition + switch progressingCondition.Status { + case corev1.ConditionTrue: + cond = metav1.Condition{ + Type: approutingv1alpha1.ConditionTypeProgressing, + Status: metav1.ConditionTrue, + Reason: "ControllerDeploymentProgressing", + Message: "Controller Deployment has successfully progressed", + } + case corev1.ConditionFalse: + cond = metav1.Condition{ + Type: approutingv1alpha1.ConditionTypeProgressing, + Status: metav1.ConditionFalse, + Reason: "ControllerDeploymentNotProgressing", + Message: "Controller Deployment has failed to progress", + } + default: + cond = metav1.Condition{ + Type: approutingv1alpha1.ConditionTypeProgressing, + Status: metav1.ConditionUnknown, + Reason: "ControllerDeploymentProgressingUnknown", + Message: "Controller Deployment progress is unknown", + } + } + + nic.SetCondition(cond) +} + +func managedByUs(obj client.Object) bool { + for k, v := range manifests.GetTopLevelLabels() { + if obj.GetLabels()[k] != v { + return false + } + } + + return true +} diff --git a/pkg/controller/nginxingress/types.go b/pkg/controller/nginxingress/types.go new file mode 100644 index 00000000..0152d692 --- /dev/null +++ b/pkg/controller/nginxingress/types.go @@ -0,0 +1,11 @@ +package nginxingress + +// collision represents the type of collision that occurred when reconciling an nginxIngressController resource. +// This is used to determine the way we should handle the collision. +type collision int + +const ( + collisionNone collision = iota + collisionIngressClass + collisionOther +) diff --git a/pkg/manifests/common.go b/pkg/manifests/common.go index 878e0800..3214ce3d 100644 --- a/pkg/manifests/common.go +++ b/pkg/manifests/common.go @@ -1,30 +1,23 @@ package manifests import ( - appsv1 "k8s.io/api/apps/v1" + "github.com/Azure/aks-app-routing-operator/pkg/util" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/Azure/aks-app-routing-operator/pkg/config" ) const operatorName = "aks-app-routing-operator" -// resourceType is a struct that represents a Kubernetes resource type -type resourceType struct { - Group string - Version string - // Name is the name of the resource type - Name string -} - // GetTopLevelLabels returns labels that every resource App Routing manages have func GetTopLevelLabels() map[string]string { // this is a function to avoid any accidental mutation due to maps being reference types return map[string]string{"app.kubernetes.io/managed-by": operatorName} } -// Checks the first set of labels has the labels of the other passed in sets +// HasTopLevelLabels returns true if the given labels match the top level labels func HasTopLevelLabels(objLabels map[string]string) bool { if len(objLabels) == 0 { return false @@ -42,15 +35,20 @@ func HasTopLevelLabels(objLabels map[string]string) bool { return true } -func getOwnerRefs(deploy *appsv1.Deployment) []metav1.OwnerReference { - if deploy == nil { - return nil - } +// GetOwnerRefs returns the owner references for the given object +func GetOwnerRefs(owner client.Object, controller bool) []metav1.OwnerReference { + gvk := owner.GetObjectKind().GroupVersionKind() + apiVersion := gvk.GroupVersion().String() + kind := gvk.Kind + name := owner.GetName() + uid := owner.GetUID() + return []metav1.OwnerReference{{ - APIVersion: "apps/v1", - Kind: "Deployment", - Name: deploy.Name, - UID: deploy.UID, + APIVersion: apiVersion, + Kind: kind, + Name: name, + UID: uid, + Controller: util.ToPtr(controller), }} } @@ -61,8 +59,9 @@ func namespace(conf *config.Config) *corev1.Namespace { APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: conf.NS, - Labels: GetTopLevelLabels(), + Name: conf.NS, + // don't set top-level labels,namespace is not managed by operator + Labels: map[string]string{}, Annotations: map[string]string{}, }, } diff --git a/pkg/manifests/external_dns.go b/pkg/manifests/external_dns.go index 33ccd928..fad74d0d 100644 --- a/pkg/manifests/external_dns.go +++ b/pkg/manifests/external_dns.go @@ -83,7 +83,7 @@ type ExternalDnsConfig struct { } // ExternalDnsResources returns Kubernetes objects required for external dns -func ExternalDnsResources(conf *config.Config, self *appsv1.Deployment, externalDnsConfigs []*ExternalDnsConfig) []client.Object { +func ExternalDnsResources(conf *config.Config, externalDnsConfigs []*ExternalDnsConfig) []client.Object { var objs []client.Object // Can safely assume the namespace exists if using kube-system @@ -95,11 +95,6 @@ func ExternalDnsResources(conf *config.Config, self *appsv1.Deployment, external objs = append(objs, externalDnsResourcesFromConfig(conf, dnsConfig)...) } - owners := getOwnerRefs(self) - for _, obj := range objs { - obj.SetOwnerReferences(owners) - } - return objs } diff --git a/pkg/manifests/nginx.go b/pkg/manifests/nginx.go index bf33df47..a9e682f1 100644 --- a/pkg/manifests/nginx.go +++ b/pkg/manifests/nginx.go @@ -7,6 +7,8 @@ import ( "path" "strconv" + "github.com/Azure/aks-app-routing-operator/pkg/config" + "github.com/Azure/aks-app-routing-operator/pkg/util" appsv1 "k8s.io/api/apps/v1" autov1 "k8s.io/api/autoscaling/v1" corev1 "k8s.io/api/core/v1" @@ -16,10 +18,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/Azure/aks-app-routing-operator/pkg/config" - "github.com/Azure/aks-app-routing-operator/pkg/util" ) const ( @@ -125,66 +123,49 @@ func (n *NginxIngressConfig) PodLabels() map[string]string { // ServiceConfig defines configuration options for required resources for a Service that goes with an Ingress type ServiceConfig struct { - IsInternal bool - Hostname string + Annotations map[string]string } -// NginxIngressClass returns an IngressClass for the provided configuration -func NginxIngressClass(conf *config.Config, self *appsv1.Deployment, ingressConfig *NginxIngressConfig) []client.Object { - ing := &netv1.IngressClass{ - TypeMeta: metav1.TypeMeta{ - Kind: "IngressClass", - APIVersion: "networking.k8s.io/v1", - }, - ObjectMeta: metav1.ObjectMeta{Name: ingressConfig.IcName, Labels: GetTopLevelLabels()}, - Spec: netv1.IngressClassSpec{ - Controller: ingressConfig.ControllerClass, - }, +func GetNginxResources(conf *config.Config, ingressConfig *NginxIngressConfig) *NginxResources { + res := &NginxResources{ + IngressClass: newNginxIngressControllerIngressClass(conf, ingressConfig), + ServiceAccount: newNginxIngressControllerServiceAccount(conf, ingressConfig), + ClusterRole: newNginxIngressControllerClusterRole(conf, ingressConfig), + Role: newNginxIngressControllerRole(conf, ingressConfig), + ClusterRoleBinding: newNginxIngressControllerClusterRoleBinding(conf, ingressConfig), + RoleBinding: newNginxIngressControllerRoleBinding(conf, ingressConfig), + Service: newNginxIngressControllerService(conf, ingressConfig), + Deployment: newNginxIngressControllerDeployment(conf, ingressConfig), + ConfigMap: newNginxIngressControllerConfigmap(conf, ingressConfig), + HorizontalPodAutoscaler: newNginxIngressControllerHPA(conf, ingressConfig), + PodDisruptionBudget: newNginxIngressControllerPDB(conf, ingressConfig), } - objs := []client.Object{ing} - - owners := getOwnerRefs(self) - for _, obj := range objs { - obj.SetOwnerReferences(owners) + for _, obj := range res.Objects() { l := util.MergeMaps(obj.GetLabels(), nginxLabels) obj.SetLabels(l) } - return objs -} - -// NginxIngressControllerResources returns Kubernetes objects required for the controller -func NginxIngressControllerResources(conf *config.Config, self *appsv1.Deployment, ingressConfig *NginxIngressConfig) []client.Object { - objs := []client.Object{} - - // Can safely assume the namespace exists if using kube-system + // Can safely assume the namespace exists if using kube-system. + // Purposefully do this after applying the labels, namespace isn't an Nginx-specific resource if conf.NS != "kube-system" { - objs = append(objs, namespace(conf)) + res.Namespace = namespace(conf) } - objs = append(objs, - newNginxIngressControllerServiceAccount(conf, ingressConfig), - newNginxIngressControllerClusterRole(conf, ingressConfig), - newNginxIngressControllerClusterRoleBinding(conf, ingressConfig), - newNginxIngressControllerRole(conf, ingressConfig), - newNginxIngressControllerRoleBinding(conf, ingressConfig), - newNginxIngressControllerService(conf, ingressConfig), - newNginxIngressControllerDeployment(conf, ingressConfig), - newNginxIngressControllerConfigmap(conf, ingressConfig), - newNginxIngressControllerPDB(conf, ingressConfig), - newNginxIngressControllerHPA(conf, ingressConfig), - ) - - owners := getOwnerRefs(self) - for _, obj := range objs { - obj.SetOwnerReferences(owners) + return res +} - l := util.MergeMaps(obj.GetLabels(), nginxLabels) - obj.SetLabels(l) +func newNginxIngressControllerIngressClass(conf *config.Config, ingressConfig *NginxIngressConfig) *netv1.IngressClass { + return &netv1.IngressClass{ + TypeMeta: metav1.TypeMeta{ + Kind: "IngressClass", + APIVersion: "networking.k8s.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{Name: ingressConfig.IcName, Labels: GetTopLevelLabels()}, + Spec: netv1.IngressClassSpec{ + Controller: ingressConfig.ControllerClass, + }, } - - return objs } func newNginxIngressControllerServiceAccount(conf *config.Config, ingressConfig *NginxIngressConfig) *corev1.ServiceAccount { @@ -382,28 +363,17 @@ func newNginxIngressControllerRoleBinding(conf *config.Config, ingressConfig *Ng } func newNginxIngressControllerService(conf *config.Config, ingressConfig *NginxIngressConfig) *corev1.Service { - isInternal := false - hostname := "" - if ingressConfig.ServiceConfig != nil { // this should always be nil prior to dynamic provisioning work - isInternal = ingressConfig.ServiceConfig.IsInternal - hostname = ingressConfig.ServiceConfig.Hostname - } - annotations := make(map[string]string) - if isInternal { - annotations["service.beta.kubernetes.io/azure-load-balancer-internal"] = "true" - } - if hostname != "" { - annotations["external-dns.alpha.kubernetes.io/hostname"] = "loadbalancer." + hostname - } - if hostname != "" && isInternal { - annotations["external-dns.alpha.kubernetes.io/internal-hostname"] = "clusterip." + hostname - } - for k, v := range promAnnotations { annotations[k] = v } + if ingressConfig != nil && ingressConfig.ServiceConfig != nil { + for k, v := range ingressConfig.ServiceConfig.Annotations { + annotations[k] = v + } + } + return &corev1.Service{ TypeMeta: metav1.TypeMeta{ Kind: "Service", diff --git a/pkg/manifests/types.go b/pkg/manifests/types.go new file mode 100644 index 00000000..39cf7669 --- /dev/null +++ b/pkg/manifests/types.go @@ -0,0 +1,58 @@ +package manifests + +import ( + appsv1 "k8s.io/api/apps/v1" + autov1 "k8s.io/api/autoscaling/v1" + corev1 "k8s.io/api/core/v1" + netv1 "k8s.io/api/networking/v1" + policyv1 "k8s.io/api/policy/v1" + rbacv1 "k8s.io/api/rbac/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// resourceType is a struct that represents a Kubernetes resource type +type resourceType struct { + Group string + Version string + // Name is the name of the resource type + Name string +} + +// NginxResources is a struct that represents the Kubernetes resources that are created for the Nginx Ingress Controller. When these resources +// are acted upon by client-go, the fields here are updated since they are pointers to the actual resources. +type NginxResources struct { + Namespace *corev1.Namespace + IngressClass *netv1.IngressClass + ServiceAccount *corev1.ServiceAccount + ClusterRole *rbacv1.ClusterRole + Role *rbacv1.Role + ClusterRoleBinding *rbacv1.ClusterRoleBinding + RoleBinding *rbacv1.RoleBinding + Service *corev1.Service + Deployment *appsv1.Deployment + ConfigMap *corev1.ConfigMap + HorizontalPodAutoscaler *autov1.HorizontalPodAutoscaler + PodDisruptionBudget *policyv1.PodDisruptionBudget +} + +func (n *NginxResources) Objects() []client.Object { + objs := []client.Object{ + n.IngressClass, + n.ServiceAccount, + n.ClusterRole, + n.Role, + n.ClusterRoleBinding, + n.RoleBinding, + n.Service, + n.Deployment, + n.ConfigMap, + n.HorizontalPodAutoscaler, + n.PodDisruptionBudget, + } + + if n.Namespace != nil { + objs = append([]client.Object{n.Namespace}, objs...) // put namespace at front, so we can create resources in order + } + + return objs +}