diff --git a/azure/defaults_test.go b/azure/defaults_test.go index f098fa3095c..29738894e7c 100644 --- a/azure/defaults_test.go +++ b/azure/defaults_test.go @@ -26,6 +26,7 @@ import ( "github.com/Azure/go-autorest/autorest" . "github.com/onsi/gomega" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) diff --git a/azure/regional_baseuri.go b/azure/regional_baseuri.go new file mode 100644 index 00000000000..4978fb1f635 --- /dev/null +++ b/azure/regional_baseuri.go @@ -0,0 +1,60 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "fmt" + "net/url" + "path" + + "github.com/pkg/errors" +) + +type aliasAuth = Authorizer + +// baseURIAdapter wraps an azure.Authorizer and adds a region to the BaseURI. This is useful if you need to make direct +// calls to a specific Azure region. One possible case is to avoid replication delay when listing resources within a +// resource group. For example, listing the VMSSes within a resource group. +type baseURIAdapter struct { + aliasAuth + Region string + parsedURL *url.URL +} + +// WithRegionalBaseURI returns an authorizer that has a regional base URI, like `https://{region}.management.azure.com`. +func WithRegionalBaseURI(authorizer Authorizer, region string) (Authorizer, error) { + parsedURI, err := url.Parse(authorizer.BaseURI()) + if err != nil { + return nil, errors.Wrap(err, "failed to parse the base URI of client") + } + + return &baseURIAdapter{ + aliasAuth: authorizer, + Region: region, + parsedURL: parsedURI, + }, nil +} + +// BaseURI return a regional base URI, like `https://{region}.management.azure.com`. +func (a *baseURIAdapter) BaseURI() string { + if a == nil || a.parsedURL == nil || a.Region == "" { + return a.aliasAuth.BaseURI() + } + + sansScheme := path.Join(fmt.Sprintf("%s.%s", a.Region, a.parsedURL.Host), a.parsedURL.Path) + return fmt.Sprintf("%s://%s", a.parsedURL.Scheme, sansScheme) +} diff --git a/azure/regional_baseuri_test.go b/azure/regional_baseuri_test.go new file mode 100644 index 00000000000..415aaef7c36 --- /dev/null +++ b/azure/regional_baseuri_test.go @@ -0,0 +1,75 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "testing" + + "github.com/golang/mock/gomock" + . "github.com/onsi/gomega" + + "sigs.k8s.io/cluster-api-provider-azure/azure/mock_azure" +) + +func TestWithRegionalBaseURI(t *testing.T) { + cases := []struct { + Name string + AuthorizerFactory func(authMock *mock_azure.MockAuthorizer) Authorizer + Region string + Result string + }{ + { + Name: "with a region", + AuthorizerFactory: func(authMock *mock_azure.MockAuthorizer) Authorizer { + authMock.EXPECT().BaseURI().Return("http://foo.bar").AnyTimes() + return authMock + }, + Region: "bazz", + Result: "http://bazz.foo.bar", + }, + { + Name: "with no region", + AuthorizerFactory: func(authMock *mock_azure.MockAuthorizer) Authorizer { + authMock.EXPECT().BaseURI().Return("http://foo.bar").AnyTimes() + return authMock + }, + Result: "http://foo.bar", + }, + { + Name: "with a region and path", + AuthorizerFactory: func(authMock *mock_azure.MockAuthorizer) Authorizer { + authMock.EXPECT().BaseURI().Return("http://foo.bar/something/id").AnyTimes() + return authMock + }, + Region: "bazz", + Result: "http://bazz.foo.bar/something/id", + }, + } + + for _, c := range cases { + c := c + t.Run(c.Name, func(t *testing.T) { + g := NewWithT(t) + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + authMock := mock_azure.NewMockAuthorizer(mockCtrl) + regionalAuth, err := WithRegionalBaseURI(c.AuthorizerFactory(authMock), c.Region) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(regionalAuth.BaseURI()).To(Equal(c.Result)) + }) + } +} diff --git a/exp/controllers/azuremanagedmachinepool_controller.go b/exp/controllers/azuremanagedmachinepool_controller.go index addd215a376..5e6cb1c6d40 100644 --- a/exp/controllers/azuremanagedmachinepool_controller.go +++ b/exp/controllers/azuremanagedmachinepool_controller.go @@ -25,14 +25,6 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/record" - infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/azure" - "sigs.k8s.io/cluster-api-provider-azure/azure/scope" - infracontroller "sigs.k8s.io/cluster-api-provider-azure/controllers" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" - "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" - "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util" @@ -44,6 +36,15 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" + "sigs.k8s.io/cluster-api-provider-azure/azure/scope" + infracontroller "sigs.k8s.io/cluster-api-provider-azure/controllers" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/pkg/coalescing" + "sigs.k8s.io/cluster-api-provider-azure/util/reconciler" + "sigs.k8s.io/cluster-api-provider-azure/util/tele" ) // AzureManagedMachinePoolReconciler reconciles an AzureManagedMachinePool object. @@ -56,7 +57,7 @@ type AzureManagedMachinePoolReconciler struct { createAzureManagedMachinePoolService azureManagedMachinePoolServiceCreator } -type azureManagedMachinePoolServiceCreator func(managedControlPlaneScope *scope.ManagedControlPlaneScope) *azureManagedMachinePoolService +type azureManagedMachinePoolServiceCreator func(managedControlPlaneScope *scope.ManagedControlPlaneScope) (*azureManagedMachinePoolService, error) // NewAzureManagedMachinePoolReconciler returns a new AzureManagedMachinePoolReconciler instance. func NewAzureManagedMachinePoolReconciler(client client.Client, log logr.Logger, recorder record.EventRecorder, reconcileTimeout time.Duration, watchFilterValue string) *AzureManagedMachinePoolReconciler { @@ -240,7 +241,12 @@ func (ammpr *AzureManagedMachinePoolReconciler) reconcileNormal(ctx context.Cont return reconcile.Result{}, err } - if err := ammpr.createAzureManagedMachinePoolService(scope).Reconcile(ctx); err != nil { + svc, err := ammpr.createAzureManagedMachinePoolService(scope) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to create an AzureManageMachinePoolService") + } + + if err := svc.Reconcile(ctx); err != nil { // Handle transient and terminal errors log := scope.WithValues("name", scope.InfraMachinePool.Name, "namespace", scope.InfraMachinePool.Namespace) var reconcileError azure.ReconcileError @@ -278,7 +284,12 @@ func (ammpr *AzureManagedMachinePoolReconciler) reconcileDelete(ctx context.Cont // So, remove the finalizer. controllerutil.RemoveFinalizer(scope.InfraMachinePool, infrav1.ClusterFinalizer) } else { - if err := ammpr.createAzureManagedMachinePoolService(scope).Delete(ctx); err != nil { + svc, err := ammpr.createAzureManagedMachinePoolService(scope) + if err != nil { + return reconcile.Result{}, errors.Wrap(err, "failed to create an AzureManageMachinePoolService") + } + + if err := svc.Delete(ctx); err != nil { return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureManagedMachinePool %s/%s", scope.InfraMachinePool.Namespace, scope.InfraMachinePool.Name) } // Machine pool successfully deleted, remove the finalizer. diff --git a/exp/controllers/azuremanagedmachinepool_reconciler.go b/exp/controllers/azuremanagedmachinepool_reconciler.go index 3ab5b5196b7..75f91c2fc50 100644 --- a/exp/controllers/azuremanagedmachinepool_reconciler.go +++ b/exp/controllers/azuremanagedmachinepool_reconciler.go @@ -74,12 +74,21 @@ func (a *AgentPoolVMSSNotFoundError) Is(target error) bool { } // newAzureManagedMachinePoolService populates all the services based on input scope. -func newAzureManagedMachinePoolService(scope *scope.ManagedControlPlaneScope) *azureManagedMachinePoolService { +func newAzureManagedMachinePoolService(scope *scope.ManagedControlPlaneScope) (*azureManagedMachinePoolService, error) { + var authorizer azure.Authorizer = scope + if scope.Location() != "" { + regionalAuthorizer, err := azure.WithRegionalBaseURI(scope, scope.Location()) + if err != nil { + return nil, errors.Wrap(err, "failed to create a regional authorizer") + } + authorizer = regionalAuthorizer + } + return &azureManagedMachinePoolService{ scope: scope, agentPoolsSvc: agentpools.New(scope), - scaleSetsSvc: scalesets.NewClient(scope), - } + scaleSetsSvc: scalesets.NewClient(authorizer), + }, nil } // Reconcile reconciles all the services in a predetermined order. diff --git a/templates/test/ci/cluster-template-prow-aks-multi-tenancy.yaml b/templates/test/ci/cluster-template-prow-aks-multi-tenancy.yaml index 347da52bbdc..2b94b9d856a 100644 --- a/templates/test/ci/cluster-template-prow-aks-multi-tenancy.yaml +++ b/templates/test/ci/cluster-template-prow-aks-multi-tenancy.yaml @@ -32,7 +32,7 @@ spec: kind: AzureClusterIdentity name: ${CLUSTER_IDENTITY_NAME} namespace: ${CLUSTER_IDENTITY_NAMESPACE} - location: northcentralus + location: ${AZURE_LOCATION} resourceGroupName: ${AZURE_RESOURCE_GROUP:=${CLUSTER_NAME}} sshPublicKey: ${AZURE_SSH_PUBLIC_KEY_B64:=""} subscriptionID: ${AZURE_SUBSCRIPTION_ID} diff --git a/templates/test/ci/prow-aks-multi-tenancy/kustomization.yaml b/templates/test/ci/prow-aks-multi-tenancy/kustomization.yaml index 02dfa518db3..c984f8be5d0 100644 --- a/templates/test/ci/prow-aks-multi-tenancy/kustomization.yaml +++ b/templates/test/ci/prow-aks-multi-tenancy/kustomization.yaml @@ -5,4 +5,3 @@ resources: - ../../../flavors/aks-multi-tenancy patchesStrategicMerge: - ../patches/tags-aks.yaml - - ./patch_location.yaml diff --git a/templates/test/ci/prow-aks-multi-tenancy/patch_location.yaml b/templates/test/ci/prow-aks-multi-tenancy/patch_location.yaml deleted file mode 100644 index ee9e5a0fa54..00000000000 --- a/templates/test/ci/prow-aks-multi-tenancy/patch_location.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# TODO(@devigned) remove this patch once the AKS / VMSS list issue from -# https://github.com/kubernetes-sigs/cluster-api-provider-azure/pull/1800 - ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: AzureManagedControlPlane -metadata: - name: ${CLUSTER_NAME} - namespace: default -spec: - location: northcentralus \ No newline at end of file