From 0f321e4089a3f4dc37f8420bf2ef6762c398c400 Mon Sep 17 00:00:00 2001 From: Jack Francis Date: Wed, 10 Aug 2022 14:28:48 +0100 Subject: [PATCH] AKS: enable isVnetManaged, add caching Co-authored-by: Mike Tougeron --- azure/scope/cluster.go | 19 +++- azure/scope/cluster_test.go | 127 ++++++++++++++++++++++ azure/scope/managedcontrolplane.go | 28 ++++- azure/scope/managedcontrolplane_test.go | 137 ++++++++++++++++++++++++ azure/services/subnets/subnets.go | 2 +- 5 files changed, 310 insertions(+), 3 deletions(-) diff --git a/azure/scope/cluster.go b/azure/scope/cluster.go index e1f7e24e406..ec3ca3eb388 100644 --- a/azure/scope/cluster.go +++ b/azure/scope/cluster.go @@ -56,6 +56,7 @@ type ClusterScopeParams struct { Client client.Client Cluster *clusterv1.Cluster AzureCluster *infrav1.AzureCluster + Cache *ClusterCache } // NewClusterScope creates a new Scope from the supplied parameters. @@ -87,6 +88,10 @@ func NewClusterScope(ctx context.Context, params ClusterScopeParams) (*ClusterSc } } + if params.Cache == nil { + params.Cache = &ClusterCache{} + } + helper, err := patch.NewHelper(params.AzureCluster, params.Client) if err != nil { return nil, errors.Errorf("failed to init patch helper: %v", err) @@ -98,6 +103,7 @@ func NewClusterScope(ctx context.Context, params ClusterScopeParams) (*ClusterSc Cluster: params.Cluster, AzureCluster: params.AzureCluster, patchHelper: helper, + cache: params.Cache, }, nil } @@ -105,12 +111,18 @@ func NewClusterScope(ctx context.Context, params ClusterScopeParams) (*ClusterSc type ClusterScope struct { Client client.Client patchHelper *patch.Helper + cache *ClusterCache AzureClients Cluster *clusterv1.Cluster AzureCluster *infrav1.AzureCluster } +// ClusterCache stores ClusterCache data locally so we don't have to hit the API multiple times within the same reconcile loop. +type ClusterCache struct { + isVnetManaged *bool +} + // BaseURI returns the Azure ResourceManagerEndpoint. func (s *ClusterScope) BaseURI() string { return s.ResourceManagerEndpoint @@ -524,7 +536,12 @@ func (s *ClusterScope) Vnet() *infrav1.VnetSpec { // IsVnetManaged returns true if the vnet is managed. func (s *ClusterScope) IsVnetManaged() bool { - return s.Vnet().ID == "" || s.Vnet().Tags.HasOwned(s.ClusterName()) + if s.cache.isVnetManaged != nil { + return to.Bool(s.cache.isVnetManaged) + } + isVnetManaged := s.Vnet().ID == "" || s.Vnet().Tags.HasOwned(s.ClusterName()) + s.cache.isVnetManaged = to.BoolPtr(isVnetManaged) + return isVnetManaged } // IsIPv6Enabled returns true if IPv6 is enabled. diff --git a/azure/scope/cluster_test.go b/azure/scope/cluster_test.go index d13d1b7d0a8..fb44854f16b 100644 --- a/azure/scope/cluster_test.go +++ b/azure/scope/cluster_test.go @@ -793,6 +793,7 @@ func TestRouteTableSpecs(t *testing.T) { }, }, }, + cache: &ClusterCache{}, }, want: nil, }, @@ -828,6 +829,7 @@ func TestRouteTableSpecs(t *testing.T) { }, }, }, + cache: &ClusterCache{}, }, want: []azure.ResourceSpecGetter{ &routetables.RouteTableSpec{ @@ -875,6 +877,7 @@ func TestNatGatewaySpecs(t *testing.T) { }, }, }, + cache: &ClusterCache{}, }, want: nil, }, @@ -922,6 +925,7 @@ func TestNatGatewaySpecs(t *testing.T) { }, }, }, + cache: &ClusterCache{}, }, want: []azure.ResourceSpecGetter{ &natgateways.NatGatewaySpec{ @@ -999,6 +1003,7 @@ func TestNatGatewaySpecs(t *testing.T) { }, }, }, + cache: &ClusterCache{}, }, want: []azure.ResourceSpecGetter{ &natgateways.NatGatewaySpec{ @@ -1075,6 +1080,7 @@ func TestNatGatewaySpecs(t *testing.T) { }, }, }, + cache: &ClusterCache{}, }, want: []azure.ResourceSpecGetter{ &natgateways.NatGatewaySpec{ @@ -1154,6 +1160,7 @@ func TestNSGSpecs(t *testing.T) { }, }, }, + cache: &ClusterCache{}, }, want: []azure.ResourceSpecGetter{ &securitygroups.NSGSpec{ @@ -1199,6 +1206,7 @@ func TestSubnetSpecs(t *testing.T) { }, }, }, + cache: &ClusterCache{}, }, want: []azure.ResourceSpecGetter{}, }, @@ -1260,6 +1268,7 @@ func TestSubnetSpecs(t *testing.T) { }, }, }, + cache: &ClusterCache{}, }, want: []azure.ResourceSpecGetter{ &subnets.SubnetSpec{ @@ -1362,6 +1371,7 @@ func TestSubnetSpecs(t *testing.T) { }, }, }, + cache: &ClusterCache{}, }, want: []azure.ResourceSpecGetter{ &subnets.SubnetSpec{ @@ -1404,6 +1414,122 @@ func TestSubnetSpecs(t *testing.T) { } } +func TestIsVnetManaged(t *testing.T) { + tests := []struct { + name string + clusterScope ClusterScope + want bool + }{ + { + name: "VNET ID is empty", + clusterScope: ClusterScope{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + }, + }, + AzureCluster: &infrav1.AzureCluster{ + Spec: infrav1.AzureClusterSpec{ + NetworkSpec: infrav1.NetworkSpec{ + Vnet: infrav1.VnetSpec{ + ID: "", + }, + }, + }, + }, + cache: &ClusterCache{}, + }, + want: true, + }, + { + name: "Wrong tags", + clusterScope: ClusterScope{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + }, + }, + AzureCluster: &infrav1.AzureCluster{ + Spec: infrav1.AzureClusterSpec{ + NetworkSpec: infrav1.NetworkSpec{ + Vnet: infrav1.VnetSpec{ + ID: "my-id", + VnetClassSpec: infrav1.VnetClassSpec{Tags: map[string]string{ + "key": "value", + }}, + }, + }, + }, + }, + cache: &ClusterCache{}, + }, + want: false, + }, + { + name: "Has owning tags", + clusterScope: ClusterScope{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + }, + }, + AzureCluster: &infrav1.AzureCluster{ + Spec: infrav1.AzureClusterSpec{ + NetworkSpec: infrav1.NetworkSpec{ + Vnet: infrav1.VnetSpec{ + ID: "my-id", + VnetClassSpec: infrav1.VnetClassSpec{Tags: map[string]string{ + "sigs.k8s.io_cluster-api-provider-azure_cluster_my-cluster": "owned", + }}, + }, + }, + }, + }, + cache: &ClusterCache{}, + }, + want: true, + }, + { + name: "Has cached value of false", + clusterScope: ClusterScope{ + AzureCluster: &infrav1.AzureCluster{ + Spec: infrav1.AzureClusterSpec{}, + }, + cache: &ClusterCache{ + isVnetManaged: to.BoolPtr(false), + }, + }, + want: false, + }, + { + name: "Has cached value of true", + clusterScope: ClusterScope{ + AzureCluster: &infrav1.AzureCluster{ + Spec: infrav1.AzureClusterSpec{}, + }, + cache: &ClusterCache{ + isVnetManaged: to.BoolPtr(true), + }, + }, + want: true, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + got := tt.clusterScope.IsVnetManaged() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("IsVnetManaged() = \n%t, want \n%t", got, tt.want) + } + if to.Bool(tt.clusterScope.cache.isVnetManaged) != got { + t.Errorf("IsVnetManaged() = \n%t, cache = \n%t", got, to.Bool(tt.clusterScope.cache.isVnetManaged)) + } + }) + } +} + func TestAzureBastionSpec(t *testing.T) { tests := []struct { name string @@ -1510,6 +1636,7 @@ func TestAzureBastionSpec(t *testing.T) { }, }, }, + cache: &ClusterCache{}, }, want: &bastionhosts.AzureBastionSpec{ Name: "fake-azure-bastion-1", diff --git a/azure/scope/managedcontrolplane.go b/azure/scope/managedcontrolplane.go index a3e8cd4c2dc..c7359ff1f74 100644 --- a/azure/scope/managedcontrolplane.go +++ b/azure/scope/managedcontrolplane.go @@ -22,6 +22,7 @@ import ( "strings" "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/to" "github.com/pkg/errors" "golang.org/x/mod/semver" corev1 "k8s.io/api/core/v1" @@ -51,6 +52,7 @@ type ManagedControlPlaneScopeParams struct { Cluster *clusterv1.Cluster ControlPlane *infrav1exp.AzureManagedControlPlane ManagedMachinePools []ManagedMachinePool + Cache *ManagedControlPlaneCache } // NewManagedControlPlaneScope creates a new Scope from the supplied parameters. @@ -82,6 +84,10 @@ func NewManagedControlPlaneScope(ctx context.Context, params ManagedControlPlane } } + if params.Cache == nil { + params.Cache = &ManagedControlPlaneCache{} + } + helper, err := patch.NewHelper(params.ControlPlane, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") @@ -94,6 +100,7 @@ func NewManagedControlPlaneScope(ctx context.Context, params ManagedControlPlane ControlPlane: params.ControlPlane, ManagedMachinePools: params.ManagedMachinePools, patchHelper: helper, + cache: params.Cache, }, nil } @@ -102,6 +109,7 @@ type ManagedControlPlaneScope struct { Client client.Client patchHelper *patch.Helper kubeConfigData []byte + cache *ManagedControlPlaneCache AzureClients Cluster *clusterv1.Cluster @@ -109,6 +117,11 @@ type ManagedControlPlaneScope struct { ManagedMachinePools []ManagedMachinePool } +// ManagedControlPlaneCache stores ManagedControlPlane data locally so we don't have to hit the API multiple times within the same reconcile loop. +type ManagedControlPlaneCache struct { + isVnetManaged *bool +} + // ResourceGroup returns the managed control plane's resource group. func (s *ManagedControlPlaneScope) ResourceGroup() string { if s.ControlPlane == nil { @@ -328,7 +341,20 @@ func (s *ManagedControlPlaneScope) IsIPv6Enabled() bool { // IsVnetManaged returns true if the vnet is managed. func (s *ManagedControlPlaneScope) IsVnetManaged() bool { - return true + if s.cache.isVnetManaged != nil { + return to.Bool(s.cache.isVnetManaged) + } + // TODO refactor `IsVnetManaged` so that it is able to use an upstream context + // see https://github.com/kubernetes-sigs/cluster-api-provider-azure/issues/2581 + ctx := context.Background() + ctx, log, done := tele.StartSpanWithLogger(ctx, "scope.ManagedControlPlaneScope.IsVnetManaged") + defer done() + isManaged, err := virtualnetworks.New(s).IsManaged(ctx) + if err != nil { + log.Error(err, "Unable to determine if ManagedControlPlaneScope VNET is managed by capz", "AzureManagedCluster", s.ClusterName()) + } + s.cache.isVnetManaged = to.BoolPtr(isManaged) + return isManaged } // APIServerLBName returns the API Server LB spec. diff --git a/azure/scope/managedcontrolplane_test.go b/azure/scope/managedcontrolplane_test.go index 0cf3edd5e7f..cd40b1a6792 100644 --- a/azure/scope/managedcontrolplane_test.go +++ b/azure/scope/managedcontrolplane_test.go @@ -399,3 +399,140 @@ func TestManagedControlPlaneScope_OSType(t *testing.T) { }) } } + +func TestManagedControlPlaneScope_IsVnetManagedCache(t *testing.T) { + scheme := runtime.NewScheme() + _ = expv1.AddToScheme(scheme) + _ = infrav1exp.AddToScheme(scheme) + + cases := []struct { + Name string + Input ManagedControlPlaneScopeParams + Expected bool + }{ + { + Name: "no Cache value", + Input: ManagedControlPlaneScopeParams{ + AzureClients: AzureClients{ + Authorizer: autorest.NullAuthorizer{}, + }, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + }, + ControlPlane: &infrav1exp.AzureManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + Spec: infrav1exp.AzureManagedControlPlaneSpec{ + Version: "v1.20.1", + SubscriptionID: "00000000-0000-0000-0000-000000000000", + }, + }, + ManagedMachinePools: []ManagedMachinePool{ + { + MachinePool: getMachinePool("pool0"), + InfraMachinePool: getAzureMachinePool("pool0", infrav1exp.NodePoolModeSystem), + }, + { + MachinePool: getMachinePool("pool1"), + InfraMachinePool: getLinuxAzureMachinePool("pool1"), + }, + }, + }, + Expected: false, + }, + { + Name: "with Cache value of true", + Input: ManagedControlPlaneScopeParams{ + AzureClients: AzureClients{ + Authorizer: autorest.NullAuthorizer{}, + }, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + }, + ControlPlane: &infrav1exp.AzureManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + Spec: infrav1exp.AzureManagedControlPlaneSpec{ + Version: "v1.20.1", + SubscriptionID: "00000000-0000-0000-0000-000000000000", + }, + }, + ManagedMachinePools: []ManagedMachinePool{ + { + MachinePool: getMachinePool("pool0"), + InfraMachinePool: getAzureMachinePool("pool0", infrav1exp.NodePoolModeSystem), + }, + { + MachinePool: getMachinePool("pool1"), + InfraMachinePool: getLinuxAzureMachinePool("pool1"), + }, + }, + Cache: &ManagedControlPlaneCache{ + isVnetManaged: to.BoolPtr(true), + }, + }, + Expected: true, + }, + { + Name: "with Cache value of false", + Input: ManagedControlPlaneScopeParams{ + AzureClients: AzureClients{ + Authorizer: autorest.NullAuthorizer{}, + }, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + }, + ControlPlane: &infrav1exp.AzureManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + Namespace: "default", + }, + Spec: infrav1exp.AzureManagedControlPlaneSpec{ + Version: "v1.20.1", + SubscriptionID: "00000000-0000-0000-0000-000000000000", + }, + }, + ManagedMachinePools: []ManagedMachinePool{ + { + MachinePool: getMachinePool("pool0"), + InfraMachinePool: getAzureMachinePool("pool0", infrav1exp.NodePoolModeSystem), + }, + { + MachinePool: getMachinePool("pool1"), + InfraMachinePool: getLinuxAzureMachinePool("pool1"), + }, + }, + Cache: &ManagedControlPlaneCache{ + isVnetManaged: to.BoolPtr(false), + }, + }, + Expected: false, + }, + } + + for _, c := range cases { + c := c + t.Run(c.Name, func(t *testing.T) { + g := NewWithT(t) + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(c.Input.ControlPlane).Build() + c.Input.Client = fakeClient + s, err := NewManagedControlPlaneScope(context.TODO(), c.Input) + g.Expect(err).To(Succeed()) + isVnetManaged := s.IsVnetManaged() + g.Expect(isVnetManaged).To(Equal(c.Expected)) + }) + } +} diff --git a/azure/services/subnets/subnets.go b/azure/services/subnets/subnets.go index 8a92bb1b968..a427beb947e 100644 --- a/azure/services/subnets/subnets.go +++ b/azure/services/subnets/subnets.go @@ -111,7 +111,7 @@ func (s *Service) Delete(ctx context.Context) error { defer cancel() if managed, err := s.IsManaged(ctx); err == nil && !managed { - log.V(4).Info("Skipping subnets deletion in custom vnet mode") + log.Info("Skipping subnets deletion in custom vnet mode") return nil } else if err != nil { return errors.Wrap(err, "failed to check if subnets are managed")