diff --git a/azure/scope/cluster.go b/azure/scope/cluster.go index a6e42b401b1..91e110ecb0d 100644 --- a/azure/scope/cluster.go +++ b/azure/scope/cluster.go @@ -75,6 +75,7 @@ func NewClusterScope(ctx context.Context, params ClusterScopeParams) (*ClusterSc return nil, errors.New("failed to generate new scope from nil AzureCluster") } + useLegacyGroups := false if params.AzureCluster.Spec.IdentityRef == nil { err := params.AzureClients.setCredentials(params.AzureCluster.Spec.SubscriptionID, params.AzureCluster.Spec.AzureEnvironment) if err != nil { @@ -89,6 +90,12 @@ func NewClusterScope(ctx context.Context, params ClusterScopeParams) (*ClusterSc if err != nil { return nil, errors.Wrap(err, "failed to configure azure settings and credentials for Identity") } + + // ASO does not yet support per-resource credentials using UserAssignedMSI. Fallback to the legacy SDK + // groups service when it is used. + if credentialsProvider.Identity.Spec.Type == infrav1.UserAssignedMSI { + useLegacyGroups = true + } } if params.Cache == nil { @@ -101,12 +108,13 @@ func NewClusterScope(ctx context.Context, params ClusterScopeParams) (*ClusterSc } return &ClusterScope{ - Client: params.Client, - AzureClients: params.AzureClients, - Cluster: params.Cluster, - AzureCluster: params.AzureCluster, - patchHelper: helper, - cache: params.Cache, + Client: params.Client, + AzureClients: params.AzureClients, + Cluster: params.Cluster, + AzureCluster: params.AzureCluster, + patchHelper: helper, + cache: params.Cache, + UseLegacyGroups: useLegacyGroups, }, nil } @@ -117,8 +125,9 @@ type ClusterScope struct { cache *ClusterCache AzureClients - Cluster *clusterv1.Cluster - AzureCluster *infrav1.AzureCluster + Cluster *clusterv1.Cluster + AzureCluster *infrav1.AzureCluster + UseLegacyGroups bool } // ClusterCache stores ClusterCache data locally so we don't have to hit the API multiple times within the same reconcile loop. @@ -1062,13 +1071,16 @@ func (s *ClusterScope) SetAnnotation(key, value string) { // TagsSpecs returns the tag specs for the AzureCluster. func (s *ClusterScope) TagsSpecs() []azure.TagsSpec { - return []azure.TagsSpec{ - { - Scope: azure.ResourceGroupID(s.SubscriptionID(), s.ResourceGroup()), - Tags: s.AdditionalTags(), - Annotation: azure.RGTagsLastAppliedAnnotation, - }, + if s.UseLegacyGroups { + return []azure.TagsSpec{ + { + Scope: azure.ResourceGroupID(s.SubscriptionID(), s.ResourceGroup()), + Tags: s.AdditionalTags(), + Annotation: azure.RGTagsLastAppliedAnnotation, + }, + } } + return nil } // PrivateEndpointSpecs returns the private endpoint specs. diff --git a/azure/scope/managedcontrolplane.go b/azure/scope/managedcontrolplane.go index 12e938d7e84..46019710766 100644 --- a/azure/scope/managedcontrolplane.go +++ b/azure/scope/managedcontrolplane.go @@ -73,6 +73,7 @@ func NewManagedControlPlaneScope(ctx context.Context, params ManagedControlPlane return nil, errors.New("failed to generate new scope from nil ControlPlane") } + useLegacyGroups := false if params.ControlPlane.Spec.IdentityRef == nil { if err := params.AzureClients.setCredentials(params.ControlPlane.Spec.SubscriptionID, params.ControlPlane.Spec.AzureEnvironment); err != nil { return nil, errors.Wrap(err, "failed to create Azure session") @@ -86,6 +87,12 @@ func NewManagedControlPlaneScope(ctx context.Context, params ManagedControlPlane if err := params.AzureClients.setCredentialsWithProvider(ctx, params.ControlPlane.Spec.SubscriptionID, params.ControlPlane.Spec.AzureEnvironment, credentialsProvider); err != nil { return nil, errors.Wrap(err, "failed to configure azure settings and credentials for Identity") } + + // ASO does not yet support per-resource credentials using UserAssignedMSI. Fallback to the legacy SDK + // groups service when it is used. + if credentialsProvider.Identity.Spec.Type == infrav1.UserAssignedMSI { + useLegacyGroups = true + } } if params.Cache == nil { @@ -105,6 +112,7 @@ func NewManagedControlPlaneScope(ctx context.Context, params ManagedControlPlane ManagedMachinePools: params.ManagedMachinePools, patchHelper: helper, cache: params.Cache, + UseLegacyGroups: useLegacyGroups, }, nil } @@ -119,6 +127,7 @@ type ManagedControlPlaneScope struct { Cluster *clusterv1.Cluster ControlPlane *infrav1.AzureManagedControlPlane ManagedMachinePools []ManagedMachinePool + UseLegacyGroups bool } // ManagedControlPlaneCache stores ManagedControlPlane data locally so we don't have to hit the API multiple times within the same reconcile loop. @@ -731,18 +740,21 @@ func (s *ManagedControlPlaneScope) SetAnnotation(key, value string) { // TagsSpecs returns the tag specs for the ManagedControlPlane. func (s *ManagedControlPlaneScope) TagsSpecs() []azure.TagsSpec { - return []azure.TagsSpec{ - { - Scope: azure.ResourceGroupID(s.SubscriptionID(), s.ResourceGroup()), - Tags: s.AdditionalTags(), - Annotation: azure.RGTagsLastAppliedAnnotation, - }, + specs := []azure.TagsSpec{ { Scope: azure.ManagedClusterID(s.SubscriptionID(), s.ResourceGroup(), s.ManagedClusterSpec().ResourceName()), Tags: s.AdditionalTags(), Annotation: azure.ManagedClusterTagsLastAppliedAnnotation, }, } + if s.UseLegacyGroups { + specs = append(specs, azure.TagsSpec{ + Scope: azure.ResourceGroupID(s.SubscriptionID(), s.ResourceGroup()), + Tags: s.AdditionalTags(), + Annotation: azure.RGTagsLastAppliedAnnotation, + }) + } + return specs } // AvailabilityStatusResource refers to the AzureManagedControlPlane. diff --git a/azure/services/asogroups/groups.go b/azure/services/asogroups/groups.go index bff6d03734e..202c392bd9e 100644 --- a/azure/services/asogroups/groups.go +++ b/azure/services/asogroups/groups.go @@ -59,7 +59,7 @@ func (s *Service) Name() string { // Reconcile idempotently creates or updates a resource group. func (s *Service) Reconcile(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "groups.Service.Reconcile") + ctx, _, done := tele.StartSpanWithLogger(ctx, "asogroups.Service.Reconcile") defer done() ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultAzureServiceReconcileTimeout) @@ -77,7 +77,7 @@ func (s *Service) Reconcile(ctx context.Context) error { // Delete deletes the resource group if it is managed by capz. func (s *Service) Delete(ctx context.Context) error { - ctx, _, done := tele.StartSpanWithLogger(ctx, "groups.Service.Delete") + ctx, _, done := tele.StartSpanWithLogger(ctx, "asogroups.Service.Delete") defer done() ctx, cancel := context.WithTimeout(ctx, reconciler.DefaultAzureServiceReconcileTimeout) diff --git a/azure/services/asogroups/groups_test.go b/azure/services/asogroups/groups_test.go index 9ccf1dfe3e8..8bc82369319 100644 --- a/azure/services/asogroups/groups_test.go +++ b/azure/services/asogroups/groups_test.go @@ -117,15 +117,6 @@ func TestReconcileGroups(t *testing.T) { } } -type ErroringGetClient struct { - client.Client - err error -} - -func (e *ErroringGetClient) Get(_ context.Context, _ client.ObjectKey, _ client.Object, _ ...client.GetOption) error { - return e.err -} - type ErroringDeleteClient struct { client.Client err error @@ -223,24 +214,14 @@ func TestDeleteGroups(t *testing.T) { func TestShouldDeleteIndividualResources(t *testing.T) { tests := []struct { - name string - clientBuilder func(g Gomega) client.Client - expect func(s *mock_asogroups.MockGroupScopeMockRecorder) - expected bool + name string + objects []client.Object + expect func(s *mock_asogroups.MockGroupScopeMockRecorder) + expected bool }{ { - name: "error checking if group is managed", - clientBuilder: func(g Gomega) client.Client { - scheme := runtime.NewScheme() - g.Expect(asoresourcesv1.AddToScheme(scheme)) - c := fakeclient.NewClientBuilder(). - WithScheme(scheme). - Build() - return &ErroringGetClient{ - Client: c, - err: errors.New("an error"), - } - }, + name: "error checking if group is managed", + objects: nil, expect: func(s *mock_asogroups.MockGroupScopeMockRecorder) { s.ASOGroupSpec().Return(&GroupSpec{}).AnyTimes() s.ClusterName().Return("").AnyTimes() @@ -248,23 +229,17 @@ func TestShouldDeleteIndividualResources(t *testing.T) { expected: true, }, { - name: "unmanaged", - clientBuilder: func(g Gomega) client.Client { - scheme := runtime.NewScheme() - g.Expect(asoresourcesv1.AddToScheme(scheme)) - c := fakeclient.NewClientBuilder(). - WithScheme(scheme). - WithObjects(&asoresourcesv1.ResourceGroup{ - ObjectMeta: metav1.ObjectMeta{ - Name: "name", - Namespace: "namespace", - Labels: map[string]string{ - infrav1.OwnedByClusterLabelKey: "not-cluster", - }, + name: "group is unmanaged", + objects: []client.Object{ + &asoresourcesv1.ResourceGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + Labels: map[string]string{ + infrav1.OwnedByClusterLabelKey: "not-cluster", }, - }). - Build() - return c + }, + }, }, expect: func(s *mock_asogroups.MockGroupScopeMockRecorder) { s.ASOGroupSpec().Return(&GroupSpec{ @@ -276,26 +251,20 @@ func TestShouldDeleteIndividualResources(t *testing.T) { expected: true, }, { - name: "managed, RG has reconcile policy skip", - clientBuilder: func(g Gomega) client.Client { - scheme := runtime.NewScheme() - g.Expect(asoresourcesv1.AddToScheme(scheme)) - c := fakeclient.NewClientBuilder(). - WithScheme(scheme). - WithObjects(&asoresourcesv1.ResourceGroup{ - ObjectMeta: metav1.ObjectMeta{ - Name: "name", - Namespace: "namespace", - Labels: map[string]string{ - infrav1.OwnedByClusterLabelKey: "cluster", - }, - Annotations: map[string]string{ - aso.ReconcilePolicyAnnotation: aso.ReconcilePolicySkip, - }, + name: "group is managed and has reconcile policy skip", + objects: []client.Object{ + &asoresourcesv1.ResourceGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + Labels: map[string]string{ + infrav1.OwnedByClusterLabelKey: "cluster", }, - }). - Build() - return c + Annotations: map[string]string{ + aso.ReconcilePolicyAnnotation: aso.ReconcilePolicySkip, + }, + }, + }, }, expect: func(s *mock_asogroups.MockGroupScopeMockRecorder) { s.ASOGroupSpec().Return(&GroupSpec{ @@ -307,26 +276,20 @@ func TestShouldDeleteIndividualResources(t *testing.T) { expected: true, }, { - name: "managed, RG has reconcile policy manage", - clientBuilder: func(g Gomega) client.Client { - scheme := runtime.NewScheme() - g.Expect(asoresourcesv1.AddToScheme(scheme)) - c := fakeclient.NewClientBuilder(). - WithScheme(scheme). - WithObjects(&asoresourcesv1.ResourceGroup{ - ObjectMeta: metav1.ObjectMeta{ - Name: "name", - Namespace: "namespace", - Labels: map[string]string{ - infrav1.OwnedByClusterLabelKey: "cluster", - }, - Annotations: map[string]string{ - aso.ReconcilePolicyAnnotation: aso.ReconcilePolicyManage, - }, + name: "group is managed and has reconcile policy manage", + objects: []client.Object{ + &asoresourcesv1.ResourceGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + Labels: map[string]string{ + infrav1.OwnedByClusterLabelKey: "cluster", }, - }). - Build() - return c + Annotations: map[string]string{ + aso.ReconcilePolicyAnnotation: aso.ReconcilePolicyManage, + }, + }, + }, }, expect: func(s *mock_asogroups.MockGroupScopeMockRecorder) { s.ASOGroupSpec().Return(&GroupSpec{ @@ -347,10 +310,12 @@ func TestShouldDeleteIndividualResources(t *testing.T) { defer mockCtrl.Finish() scopeMock := mock_asogroups.NewMockGroupScope(mockCtrl) - var ctrlClient client.Client - if test.clientBuilder != nil { - ctrlClient = test.clientBuilder(g) - } + scheme := runtime.NewScheme() + g.Expect(asoresourcesv1.AddToScheme(scheme)) + ctrlClient := fakeclient.NewClientBuilder(). + WithScheme(scheme). + WithObjects(test.objects...). + Build() scopeMock.EXPECT().GetClient().Return(ctrlClient).AnyTimes() test.expect(scopeMock.EXPECT()) diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 9bef000c89e..ce557be6b40 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -261,3 +261,23 @@ rules: - get - patch - update +- apiGroups: + - resources.azure.com + resources: + - resourcegroups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - resources.azure.com + resources: + - resourcegroups/status + verbs: + - get + - list + - watch diff --git a/controllers/azurecluster_controller.go b/controllers/azurecluster_controller.go index 0b357ef07e6..eb20215cb01 100644 --- a/controllers/azurecluster_controller.go +++ b/controllers/azurecluster_controller.go @@ -111,6 +111,8 @@ func (acr *AzureClusterReconciler) SetupWithManager(ctx context.Context, mgr ctr // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremachinetemplates;azuremachinetemplates/status,verbs=get;list;watch // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azureclusteridentities;azureclusteridentities/status,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="",resources=namespaces,verbs=list; +// +kubebuilder:rbac:groups=resources.azure.com,resources=resourcegroups,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=resources.azure.com,resources=resourcegroups/status,verbs=get;list;watch // Reconcile idempotently gets, creates, and updates a cluster. func (acr *AzureClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { diff --git a/controllers/azurecluster_controller_test.go b/controllers/azurecluster_controller_test.go index 143a88a6add..57df3d6869b 100644 --- a/controllers/azurecluster_controller_test.go +++ b/controllers/azurecluster_controller_test.go @@ -20,6 +20,7 @@ import ( "context" "testing" + asoresourcesv1 "github.com/Azure/azure-service-operator/v2/api/resources/v1api20200601" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -65,6 +66,7 @@ func TestAzureClusterReconcilePaused(t *testing.T) { sb := runtime.NewSchemeBuilder( clusterv1.AddToScheme, infrav1.AddToScheme, + asoresourcesv1.AddToScheme, ) s := runtime.NewScheme() g.Expect(sb.AddToScheme(s)).To(Succeed()) @@ -76,11 +78,12 @@ func TestAzureClusterReconcilePaused(t *testing.T) { reconciler := NewAzureClusterReconciler(c, recorder, reconciler.DefaultLoopTimeout, "") name := test.RandomName("paused", 10) + namespace := "default" cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: "default", + Namespace: namespace, }, Spec: clusterv1.ClusterSpec{ Paused: true, @@ -91,7 +94,7 @@ func TestAzureClusterReconcilePaused(t *testing.T) { instance := &infrav1.AzureCluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: "default", + Namespace: namespace, OwnerReferences: []metav1.OwnerReference{ { Kind: "Cluster", @@ -105,10 +108,19 @@ func TestAzureClusterReconcilePaused(t *testing.T) { AzureClusterClassSpec: infrav1.AzureClusterClassSpec{ SubscriptionID: "something", }, + ResourceGroup: name, }, } g.Expect(c.Create(ctx, instance)).To(Succeed()) + rg := &asoresourcesv1.ResourceGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + g.Expect(c.Create(ctx, rg)).To(Succeed()) + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: instance.Namespace, diff --git a/controllers/azurecluster_reconciler.go b/controllers/azurecluster_reconciler.go index a55b5ffad23..bb4994788c9 100644 --- a/controllers/azurecluster_reconciler.go +++ b/controllers/azurecluster_reconciler.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/errors" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/asogroups" "sigs.k8s.io/cluster-api-provider-azure/azure/services/bastionhosts" "sigs.k8s.io/cluster-api-provider-azure/azure/services/groups" "sigs.k8s.io/cluster-api-provider-azure/azure/services/loadbalancers" @@ -55,6 +56,10 @@ func newAzureClusterService(scope *scope.ClusterScope) (*azureClusterService, er if err != nil { return nil, errors.Wrap(err, "failed creating a NewCache") } + var groupsSvc azure.ServiceReconciler = asogroups.New(scope) + if scope.UseLegacyGroups { + groupsSvc = groups.New(scope) + } natGatewaysSvc, err := natgateways.New(scope) if err != nil { return nil, err @@ -62,7 +67,7 @@ func newAzureClusterService(scope *scope.ClusterScope) (*azureClusterService, er return &azureClusterService{ scope: scope, services: []azure.ServiceReconciler{ - groups.New(scope), + groupsSvc, virtualnetworks.New(scope), securitygroups.New(scope), routetables.New(scope), @@ -125,20 +130,7 @@ func (s *azureClusterService) Delete(ctx context.Context) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.azureClusterService.Delete") defer done() - groupSvc, err := s.getService(groups.ServiceName) - if err != nil { - return errors.Wrap(err, "failed to get group service") - } - - managed, err := groupSvc.IsManaged(ctx) - if err != nil { - if azure.ResourceNotFound(err) { - // If the resource group is not found, there is nothing to delete, return early. - return nil - } - return errors.Wrap(err, "failed to determine if the AzureCluster resource group is managed") - } - if managed { + if !ShouldDeleteIndividualResources(ctx, s.scope) { // If the resource group is managed, delete it. // We need to explicitly delete vnet peerings, as it is not part of the resource group. vnetPeeringsSvc, err := s.getService(vnetpeerings.ServiceName) @@ -148,6 +140,15 @@ func (s *azureClusterService) Delete(ctx context.Context) error { if err := vnetPeeringsSvc.Delete(ctx); err != nil { return errors.Wrap(err, "failed to delete peerings") } + + groupsServiceName := asogroups.ServiceName + if s.scope.UseLegacyGroups { + groupsServiceName = groups.ServiceName + } + groupSvc, err := s.getService(groupsServiceName) + if err != nil { + return errors.Wrap(err, "failed to get group service") + } // Delete the entire resource group directly. if err := groupSvc.Delete(ctx); err != nil { return errors.Wrap(err, "failed to delete resource group") diff --git a/controllers/azurecluster_reconciler_test.go b/controllers/azurecluster_reconciler_test.go index f803386e94d..ca4d03a00c9 100644 --- a/controllers/azurecluster_reconciler_test.go +++ b/controllers/azurecluster_reconciler_test.go @@ -20,19 +20,26 @@ import ( "context" "errors" "testing" + "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-11-01/compute" + asoresourcesv1 "github.com/Azure/azure-service-operator/v2/api/resources/v1api20200601" . "github.com/onsi/gomega" "go.uber.org/mock/gomock" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/mock_azure" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" - "sigs.k8s.io/cluster-api-provider-azure/azure/services/groups" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/aso" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/asogroups" "sigs.k8s.io/cluster-api-provider-azure/azure/services/resourceskus" "sigs.k8s.io/cluster-api-provider-azure/azure/services/vnetpeerings" gomockinternal "sigs.k8s.io/cluster-api-provider-azure/internal/test/matchers/gomock" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func TestAzureClusterServiceReconcile(t *testing.T) { @@ -169,48 +176,110 @@ func TestAzureClusterServicePause(t *testing.T) { } func TestAzureClusterServiceDelete(t *testing.T) { + clusterName := "cluster" + namespace := "ns" + resourceGroup := "rg" + cases := map[string]struct { expectedError string + clientBuilder func(g Gomega) client.Client expect func(grp *mock_azure.MockServiceReconcilerMockRecorder, vpr *mock_azure.MockServiceReconcilerMockRecorder, one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) }{ "Resource Group is deleted successfully": { expectedError: "", - expect: func(grp *mock_azure.MockServiceReconcilerMockRecorder, vpr *mock_azure.MockServiceReconcilerMockRecorder, one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { + clientBuilder: func(g Gomega) client.Client { + scheme := runtime.NewScheme() + g.Expect(asoresourcesv1.AddToScheme(scheme)).To(Succeed()) + + rg := &asoresourcesv1.ResourceGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceGroup, + Namespace: namespace, + Labels: map[string]string{ + infrav1.OwnedByClusterLabelKey: clusterName, + }, + Annotations: map[string]string{ + aso.ReconcilePolicyAnnotation: aso.ReconcilePolicyManage, + }, + }, + } + + c := fakeclient.NewClientBuilder(). + WithScheme(scheme). + WithObjects(rg). + Build() + + return c + }, + expect: func(grp *mock_azure.MockServiceReconcilerMockRecorder, vpr *mock_azure.MockServiceReconcilerMockRecorder, _ *mock_azure.MockServiceReconcilerMockRecorder, _ *mock_azure.MockServiceReconcilerMockRecorder, _ *mock_azure.MockServiceReconcilerMockRecorder) { gomock.InOrder( - grp.Name().Return(groups.ServiceName), - grp.IsManaged(gomockinternal.AContext()).Return(true, nil), - grp.Name().Return(groups.ServiceName), + grp.Name().Return(asogroups.ServiceName), vpr.Name().Return(vnetpeerings.ServiceName), vpr.Delete(gomockinternal.AContext()).Return(nil), + grp.Name().Return(asogroups.ServiceName), grp.Delete(gomockinternal.AContext()).Return(nil)) }, }, - "Error when checking if resource group is managed": { - expectedError: "failed to determine if the AzureCluster resource group is managed: an error happened", - expect: func(grp *mock_azure.MockServiceReconcilerMockRecorder, vpr *mock_azure.MockServiceReconcilerMockRecorder, one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { - gomock.InOrder( - grp.Name().Return(groups.ServiceName), - grp.IsManaged(gomockinternal.AContext()).Return(false, errors.New("an error happened"))) - }, - }, "Resource Group delete fails": { expectedError: "failed to delete resource group: internal error", - expect: func(grp *mock_azure.MockServiceReconcilerMockRecorder, vpr *mock_azure.MockServiceReconcilerMockRecorder, one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { + clientBuilder: func(g Gomega) client.Client { + scheme := runtime.NewScheme() + g.Expect(asoresourcesv1.AddToScheme(scheme)).To(Succeed()) + + rg := &asoresourcesv1.ResourceGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceGroup, + Namespace: namespace, + Labels: map[string]string{ + infrav1.OwnedByClusterLabelKey: clusterName, + }, + Annotations: map[string]string{ + aso.ReconcilePolicyAnnotation: aso.ReconcilePolicyManage, + }, + }, + } + + c := fakeclient.NewClientBuilder(). + WithScheme(scheme). + WithObjects(rg). + Build() + + return c + }, + expect: func(grp *mock_azure.MockServiceReconcilerMockRecorder, vpr *mock_azure.MockServiceReconcilerMockRecorder, _ *mock_azure.MockServiceReconcilerMockRecorder, _ *mock_azure.MockServiceReconcilerMockRecorder, _ *mock_azure.MockServiceReconcilerMockRecorder) { gomock.InOrder( - grp.Name().Return(groups.ServiceName), - grp.IsManaged(gomockinternal.AContext()).Return(true, nil), - grp.Name().Return(groups.ServiceName), + grp.Name().Return(asogroups.ServiceName), vpr.Name().Return(vnetpeerings.ServiceName), vpr.Delete(gomockinternal.AContext()).Return(nil), + grp.Name().Return(asogroups.ServiceName), grp.Delete(gomockinternal.AContext()).Return(errors.New("internal error"))) }, }, "Resource Group not owned by cluster": { expectedError: "", + clientBuilder: func(g Gomega) client.Client { + scheme := runtime.NewScheme() + g.Expect(asoresourcesv1.AddToScheme(scheme)).To(Succeed()) + + rg := &asoresourcesv1.ResourceGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceGroup, + Namespace: namespace, + Labels: map[string]string{ + infrav1.OwnedByClusterLabelKey: "not-" + clusterName, + }, + }, + } + + c := fakeclient.NewClientBuilder(). + WithScheme(scheme). + WithObjects(rg). + Build() + + return c + }, expect: func(grp *mock_azure.MockServiceReconcilerMockRecorder, vpr *mock_azure.MockServiceReconcilerMockRecorder, one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { gomock.InOrder( - grp.Name().Return(groups.ServiceName), - grp.IsManaged(gomockinternal.AContext()).Return(false, nil), three.Delete(gomockinternal.AContext()).Return(nil), two.Delete(gomockinternal.AContext()).Return(nil), one.Delete(gomockinternal.AContext()).Return(nil), @@ -220,10 +289,29 @@ func TestAzureClusterServiceDelete(t *testing.T) { }, "service delete fails": { expectedError: "failed to delete AzureCluster service two: some error happened", - expect: func(grp *mock_azure.MockServiceReconcilerMockRecorder, vpr *mock_azure.MockServiceReconcilerMockRecorder, one *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { + clientBuilder: func(g Gomega) client.Client { + scheme := runtime.NewScheme() + g.Expect(asoresourcesv1.AddToScheme(scheme)).To(Succeed()) + + rg := &asoresourcesv1.ResourceGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceGroup, + Namespace: namespace, + Labels: map[string]string{ + infrav1.OwnedByClusterLabelKey: "not-" + clusterName, + }, + }, + } + + c := fakeclient.NewClientBuilder(). + WithScheme(scheme). + WithObjects(rg). + Build() + + return c + }, + expect: func(_ *mock_azure.MockServiceReconcilerMockRecorder, _ *mock_azure.MockServiceReconcilerMockRecorder, _ *mock_azure.MockServiceReconcilerMockRecorder, two *mock_azure.MockServiceReconcilerMockRecorder, three *mock_azure.MockServiceReconcilerMockRecorder) { gomock.InOrder( - grp.Name().Return(groups.ServiceName), - grp.IsManaged(gomockinternal.AContext()).Return(false, nil), three.Delete(gomockinternal.AContext()).Return(nil), two.Delete(gomockinternal.AContext()).Return(errors.New("some error happened")), two.Name().Return("two")) @@ -246,10 +334,23 @@ func TestAzureClusterServiceDelete(t *testing.T) { svcThreeMock := mock_azure.NewMockServiceReconciler(mockCtrl) tc.expect(groupsMock.EXPECT(), vnetpeeringsMock.EXPECT(), svcOneMock.EXPECT(), svcTwoMock.EXPECT(), svcThreeMock.EXPECT()) + c := tc.clientBuilder(g) s := &azureClusterService{ scope: &scope.ClusterScope{ - AzureCluster: &infrav1.AzureCluster{}, + Client: c, + AzureCluster: &infrav1.AzureCluster{ + Spec: infrav1.AzureClusterSpec{ + ResourceGroup: resourceGroup, + }, + }, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + }, }, services: []azure.ServiceReconciler{ groupsMock, diff --git a/controllers/azuremanagedcontrolplane_controller.go b/controllers/azuremanagedcontrolplane_controller.go index e1710976bf6..524c34ae4d5 100644 --- a/controllers/azuremanagedcontrolplane_controller.go +++ b/controllers/azuremanagedcontrolplane_controller.go @@ -111,6 +111,8 @@ func (amcpr *AzureManagedControlPlaneReconciler) SetupWithManager(ctx context.Co // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedcontrolplanes,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azuremanagedcontrolplanes/status,verbs=get;update;patch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch +// +kubebuilder:rbac:groups=resources.azure.com,resources=resourcegroups,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=resources.azure.com,resources=resourcegroups/status,verbs=get;list;watch // Reconcile idempotently gets, creates, and updates a managed control plane. func (amcpr *AzureManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { diff --git a/controllers/azuremanagedcontrolplane_controller_test.go b/controllers/azuremanagedcontrolplane_controller_test.go index 5841c6e0564..08886059b29 100644 --- a/controllers/azuremanagedcontrolplane_controller_test.go +++ b/controllers/azuremanagedcontrolplane_controller_test.go @@ -19,6 +19,7 @@ import ( "context" "testing" + asoresourcesv1 "github.com/Azure/azure-service-operator/v2/api/resources/v1api20200601" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -95,6 +96,7 @@ func TestAzureManagedControlPlaneReconcilePaused(t *testing.T) { sb := runtime.NewSchemeBuilder( clusterv1.AddToScheme, infrav1.AddToScheme, + asoresourcesv1.AddToScheme, ) s := runtime.NewScheme() g.Expect(sb.AddToScheme(s)).To(Succeed()) @@ -111,11 +113,12 @@ func TestAzureManagedControlPlaneReconcilePaused(t *testing.T) { WatchFilterValue: "", } name := test.RandomName("paused", 10) + namespace := "default" cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: "default", + Namespace: namespace, }, Spec: clusterv1.ClusterSpec{ Paused: true, @@ -126,7 +129,7 @@ func TestAzureManagedControlPlaneReconcilePaused(t *testing.T) { instance := &infrav1.AzureManagedControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: "default", + Namespace: namespace, OwnerReferences: []metav1.OwnerReference{ { Kind: "Cluster", @@ -136,11 +139,20 @@ func TestAzureManagedControlPlaneReconcilePaused(t *testing.T) { }, }, Spec: infrav1.AzureManagedControlPlaneSpec{ - SubscriptionID: "something", + SubscriptionID: "something", + ResourceGroupName: name, }, } g.Expect(c.Create(ctx, instance)).To(Succeed()) + rg := &asoresourcesv1.ResourceGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + g.Expect(c.Create(ctx, rg)).To(Succeed()) + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ NamespacedName: client.ObjectKey{ Namespace: instance.Namespace, diff --git a/controllers/azuremanagedcontrolplane_reconciler.go b/controllers/azuremanagedcontrolplane_reconciler.go index 68b159895c2..f4ac858a2ed 100644 --- a/controllers/azuremanagedcontrolplane_reconciler.go +++ b/controllers/azuremanagedcontrolplane_reconciler.go @@ -22,6 +22,7 @@ import ( "github.com/pkg/errors" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/asogroups" "sigs.k8s.io/cluster-api-provider-azure/azure/services/groups" "sigs.k8s.io/cluster-api-provider-azure/azure/services/managedclusters" "sigs.k8s.io/cluster-api-provider-azure/azure/services/privateendpoints" @@ -44,11 +45,15 @@ type azureManagedControlPlaneService struct { // newAzureManagedControlPlaneReconciler populates all the services based on input scope. func newAzureManagedControlPlaneReconciler(scope *scope.ManagedControlPlaneScope) *azureManagedControlPlaneService { + var groupsService azure.ServiceReconciler = asogroups.New(scope) + if scope.UseLegacyGroups { + groupsService = groups.New(scope) + } return &azureManagedControlPlaneService{ kubeclient: scope.Client, scope: scope, services: []azure.ServiceReconciler{ - groups.New(scope), + groupsService, virtualnetworks.New(scope), subnets.New(scope), managedclusters.New(scope), diff --git a/controllers/helpers.go b/controllers/helpers.go index 019510253ea..4fcfc816635 100644 --- a/controllers/helpers.go +++ b/controllers/helpers.go @@ -37,6 +37,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + "sigs.k8s.io/cluster-api-provider-azure/azure/services/asogroups" "sigs.k8s.io/cluster-api-provider-azure/azure/services/groups" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/feature" @@ -606,11 +607,14 @@ func ShouldDeleteIndividualResources(ctx context.Context, clusterScope *scope.Cl if clusterScope.Cluster.DeletionTimestamp.IsZero() { return true } - grpSvc := groups.New(clusterScope) - managed, err := grpSvc.IsManaged(ctx) - // Since this is a best effort attempt to speed up delete, we don't fail the delete if we can't get the RG status. - // Instead, take the long way and delete all resources one by one. - return err != nil || !managed + if clusterScope.UseLegacyGroups { + grpSvc := groups.New(clusterScope) + managed, err := grpSvc.IsManaged(ctx) + // Since this is a best effort attempt to speed up delete, we don't fail the delete if we can't get the RG status. + // Instead, take the long way and delete all resources one by one. + return err != nil || !managed + } + return asogroups.New(clusterScope).ShouldDeleteIndividualResources(ctx) } // GetClusterIdentityFromRef returns the AzureClusterIdentity referenced by the AzureCluster. diff --git a/docs/book/src/topics/aso.md b/docs/book/src/topics/aso.md index 6373100c964..6cb8ea27ffc 100644 --- a/docs/book/src/topics/aso.md +++ b/docs/book/src/topics/aso.md @@ -8,10 +8,10 @@ CAPZ interfaces with Azure to create and manage some types of resources using [A More context around the decision for CAPZ to pivot towards using ASO can be found in the [proposal](https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/main/docs/proposals/20230123-azure-service-operator.md). -## The biggest changes +## Primary changes For most users, the introduction of ASO is expected to be fully transparent and backwards compatible. Changes -that may affect specific use-cases are described below. +that may affect specific use cases are described below. ### Installation diff --git a/main.go b/main.go index d9cb33a1d5b..b81da918138 100644 --- a/main.go +++ b/main.go @@ -26,6 +26,7 @@ import ( // +kubebuilder:scaffold:imports aadpodv1 "github.com/Azure/aad-pod-identity/pkg/apis/aadpodidentity/v1" + asoresourcesv1 "github.com/Azure/azure-service-operator/v2/api/resources/v1api20200601" "github.com/spf13/pflag" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -72,6 +73,7 @@ func init() { _ = clusterv1.AddToScheme(scheme) _ = expv1.AddToScheme(scheme) _ = kubeadmv1.AddToScheme(scheme) + _ = asoresourcesv1.AddToScheme(scheme) // +kubebuilder:scaffold:scheme // Add aadpodidentity v1 to the scheme.