diff --git a/admiral/cmd/admiral/cmd/root.go b/admiral/cmd/admiral/cmd/root.go index 3af47cc3..05fa8a26 100644 --- a/admiral/cmd/admiral/cmd/root.go +++ b/admiral/cmd/admiral/cmd/root.go @@ -246,6 +246,8 @@ func GetRootCmd(args []string) *cobra.Command { rootCmd.PersistentFlags().BoolVar(¶ms.AdmiralOperatorMode, "admiral_operator_mode", false, "Enable/Disable admiral operator functionality") rootCmd.PersistentFlags().StringVar(¶ms.OperatorSyncNamespace, "operator_sync_namespace", "admiral-operator-sync", "Namespace in which Admiral Operator will put its generated configurations") + rootCmd.PersistentFlags().StringVar(¶ms.OperatorSecretFilterTags, "operator_secret_filter_tags", "admiral/syncoperator", + "Filter tags for the specific admiral operator namespace secret to watch") return rootCmd } diff --git a/admiral/pkg/clusters/configSyncer.go b/admiral/pkg/clusters/configSyncer.go index cab23a9b..866b5542 100644 --- a/admiral/pkg/clusters/configSyncer.go +++ b/admiral/pkg/clusters/configSyncer.go @@ -50,7 +50,7 @@ func updateRegistryConfigForClusterPerEnvironment(ctxLogger *logrus.Entry, remot task := "updateRegistryConfigForClusterPerEnvironment" defer util.LogElapsedTimeForTask(ctxLogger, task, registryConfig.IdentityName, "", "", "processingTime")() k8sClient, err := remoteRegistry.ClientLoader.LoadKubeClientFromPath(common.GetKubeconfigPath()) - if err != nil && common.GetSecretFilterTags() == "admiral/syncrtay" { + if err != nil && common.GetSecretFilterTags() == common.GetOperatorSecretFilterTags() { ctxLogger.Infof(common.CtxLogFormat, task, registryConfig.IdentityName, "", "", "unable to get kube client") return errors.Wrap(err, "unable to get kube client") } diff --git a/admiral/pkg/clusters/configwriter.go b/admiral/pkg/clusters/configwriter.go index 670f29d3..afd24133 100644 --- a/admiral/pkg/clusters/configwriter.go +++ b/admiral/pkg/clusters/configwriter.go @@ -1,6 +1,7 @@ package clusters import ( + "fmt" "sort" "strconv" "strings" @@ -12,6 +13,8 @@ import ( networkingV1Alpha3 "istio.io/api/networking/v1alpha3" ) +const typeLabel = "type" + // IstioSEBuilder is an interface to construct Service Entry objects // from IdentityConfig objects. It can construct multiple Service Entries // from an IdentityConfig or construct just one given a IdentityConfigEnvironment. @@ -36,7 +39,7 @@ func (b *ServiceEntryBuilder) BuildServiceEntriesFromIdentityConfig(ctxLogger *l ) ctxLogger.Infof(common.CtxLogFormat, "buildServiceEntry", identity, common.GetSyncNamespace(), b.ClientCluster, "Beginning to build the SE spec") ingressEndpoints, err := getIngressEndpoints(identityConfig.Clusters) - if err != nil { + if err != nil || len(ingressEndpoints) == 0 { return serviceEntries, err } _, isServerOnClientCluster := ingressEndpoints[b.ClientCluster] @@ -110,7 +113,10 @@ func getServiceEntryEndpoint( var err error endpoint := ingressEndpoints[serverCluster] tmpEp := endpoint.DeepCopy() - tmpEp.Labels["type"] = identityConfigEnvironment.Type + tmpEp.Labels[typeLabel] = identityConfigEnvironment.Type + if len(identityConfigEnvironment.Services) == 0 { + return tmpEp, fmt.Errorf("there were no services for the asset in namespace %s on cluster %s", identityConfigEnvironment.Namespace, serverCluster) + } if clientCluster == serverCluster { for _, service := range identityConfigEnvironment.Services { if service.Weight == -1 { @@ -140,9 +146,8 @@ func getExportTo(ctxLogger *logrus.Entry, registryClient registry.IdentityConfig // For each client asset of cname, we fetch its identityConfig clientIdentityConfig, err = registryClient.GetIdentityConfigByIdentityName(clientAsset, ctxLogger) if err != nil { - // TODO: this should return an error. ctxLogger.Infof(common.CtxLogFormat, "buildServiceEntry", clientAsset, common.GetSyncNamespace(), "", "could not fetch IdentityConfig: "+err.Error()) - continue + return clientNamespaces, err } for _, clientIdentityConfigCluster := range clientIdentityConfig.Clusters { // For each cluster the client asset is deployed on, we check if that cluster is the client cluster we are writing to diff --git a/admiral/pkg/clusters/configwriter_test.go b/admiral/pkg/clusters/configwriter_test.go index c12d0665..79c47348 100644 --- a/admiral/pkg/clusters/configwriter_test.go +++ b/admiral/pkg/clusters/configwriter_test.go @@ -66,7 +66,7 @@ func createMockServiceEntry(env string, identity string, endpointAddress string, } func TestGetIngressEndpoints(t *testing.T) { - identityConfig := registry.GetSampleIdentityConfig() + identityConfig := registry.GetSampleIdentityConfig("sample") expectedIngressEndpoints := map[string]*networkingV1Alpha3.WorkloadEntry{ "cluster1": { Address: "abc-elb.us-west-2.elb.amazonaws.com.", @@ -104,7 +104,7 @@ func TestGetServiceEntryEndpoint(t *testing.T) { admiralParams := admiralParamsForConfigWriterTests() common.ResetSync() common.InitializeConfig(admiralParams) - e2eEnv := registry.GetSampleIdentityConfigEnvironment("e2e", "ns-1-usw2-e2e") + e2eEnv := registry.GetSampleIdentityConfigEnvironment("e2e", "ns-1-usw2-e2e", "sample") ingressEndpoints := map[string]*networkingV1Alpha3.WorkloadEntry{"cluster1": { Address: "abc-elb.us-west-2.elb.amazonaws.com.", Locality: "us-west-2", @@ -226,17 +226,24 @@ func TestBuildServiceEntriesFromIdentityConfig(t *testing.T) { common.InitializeConfig(admiralParams) rr, _ := InitAdmiralOperator(context.Background(), admiralParams) ctxLogger := common.GetCtxLogger(context.Background(), "test", "") - identityConfig := registry.GetSampleIdentityConfig() + identityConfig := registry.GetSampleIdentityConfig("sample") expectedLocalServiceEntryPRF := createMockServiceEntry("prf", "sample", "app-1-spk-root-service.ns-1-usw2-prf.svc.cluster.local.", 8090, []string{"istio-system", "ns-1-usw2-e2e", "ns-1-usw2-prf", "ns-1-usw2-qal"}) expectedLocalServiceEntryE2E := createMockServiceEntry("e2e", "sample", "app-1-spk-root-service.ns-1-usw2-e2e.svc.cluster.local.", 8090, []string{"istio-system", "ns-1-usw2-e2e", "ns-1-usw2-prf", "ns-1-usw2-qal"}) expectedLocalServiceEntryQAL := createMockServiceEntry("qal", "sample", "app-1-spk-root-service.ns-1-usw2-qal.svc.cluster.local.", 8090, []string{"istio-system", "ns-1-usw2-e2e", "ns-1-usw2-prf", "ns-1-usw2-qal"}) expectedLocalServiceEntries := []*networkingV1Alpha3.ServiceEntry{&expectedLocalServiceEntryQAL, &expectedLocalServiceEntryPRF, &expectedLocalServiceEntryE2E} + identityConfigFailsIngressEndpoints := registry.GetSampleIdentityConfig("sample") + identityConfigFailsIngressEndpoints.Clusters = map[string]*registry.IdentityConfigCluster{} + identityConfigFailsExportTo := registry.GetSampleIdentityConfig("sample") + identityConfigFailsExportTo.ClientAssets["fake"] = "fake" + identityConfigFailsServiceEntryEndpoint := registry.GetSampleIdentityConfig("sample") + identityConfigFailsServiceEntryEndpoint.Clusters["cluster1"].Environment["e2e"].Services = make(map[string]*registry.RegistryServiceConfig) testCases := []struct { name string clientCluster string event admiral.EventType identityConfig registry.IdentityConfig expectedServiceEntries []*networkingV1Alpha3.ServiceEntry + expectedErr bool }{ { name: "Given information to build an se, " + @@ -246,13 +253,41 @@ func TestBuildServiceEntriesFromIdentityConfig(t *testing.T) { event: admiral.Add, identityConfig: identityConfig, expectedServiceEntries: expectedLocalServiceEntries, + expectedErr: false, + }, + { + name: "Given getIngressEndpoints fails with a non-nil error or is empty, " + + "Then there should be an empty array of returned service entries", + clientCluster: "cluster1", + event: admiral.Add, + identityConfig: identityConfigFailsIngressEndpoints, + expectedServiceEntries: make([]*networkingV1Alpha3.ServiceEntry, 0), + expectedErr: false, + }, + { + name: "Given getExportTo fails with a non-nil error, " + + "Then there should be an empty array of returned service entries", + clientCluster: "cluster1", + event: admiral.Add, + identityConfig: identityConfigFailsExportTo, + expectedServiceEntries: make([]*networkingV1Alpha3.ServiceEntry, 0), + expectedErr: true, + }, + { + name: "Given getServiceEntryEndpoint fails with a non-nil error, " + + "Then there should be an empty array of returned service entries", + clientCluster: "cluster1", + event: admiral.Add, + identityConfig: identityConfigFailsServiceEntryEndpoint, + expectedServiceEntries: make([]*networkingV1Alpha3.ServiceEntry, 0), + expectedErr: true, }, } for _, c := range testCases { t.Run(c.name, func(t *testing.T) { serviceEntryBuilder := ServiceEntryBuilder{ClientCluster: c.clientCluster, RemoteRegistry: rr} serviceEntries, err := serviceEntryBuilder.BuildServiceEntriesFromIdentityConfig(ctxLogger, c.identityConfig) - if err != nil { + if err != nil && !c.expectedErr { t.Errorf("want=%v, \ngot=%v", nil, err) } opts := cmpopts.IgnoreUnexported(networkingV1Alpha3.ServiceEntry{}, networkingV1Alpha3.ServicePort{}, networkingV1Alpha3.WorkloadEntry{}) diff --git a/admiral/pkg/clusters/serviceentry.go b/admiral/pkg/clusters/serviceentry.go index aa01f46a..ba583a5b 100644 --- a/admiral/pkg/clusters/serviceentry.go +++ b/admiral/pkg/clusters/serviceentry.go @@ -1085,6 +1085,7 @@ func modifySidecarForLocalClusterCommunication( sidecar, err := sidecarConfig.IstioClient.NetworkingV1alpha3().Sidecars(sidecarNamespace).Get(ctx, common.GetWorkloadSidecarName(), v12.GetOptions{}) if err != nil { + ctxLogger.Warnf(common.CtxLogFormat, "modifySidecarForLocalClusterCommunication", sourceIdentity, sidecarNamespace, rc.ClusterID, err) return } if sidecar == nil || (sidecar.Spec.Egress == nil) { @@ -1132,12 +1133,40 @@ func addUpdateSidecar(ctxLogger *logrus.Entry, ctx context.Context, obj *v1alpha } _, err = rc.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars(namespace).Update(ctx, obj, v12.UpdateOptions{}) if err != nil { + err = retryUpdatingSidecar(ctxLogger, ctx, obj, exist, namespace, rc, err, "Update") ctxLogger.Infof(LogErrFormat, "Update", "Sidecar", obj.Name, rc.ClusterID, err) } else { ctxLogger.Infof(LogErrFormat, "Update", "Sidecar", obj.Name, rc.ClusterID, "Success") } } +func retryUpdatingSidecar(ctxLogger *logrus.Entry, ctx context.Context, obj *v1alpha3.Sidecar, exist *v1alpha3.Sidecar, namespace string, rc *RemoteController, err error, op string) error { + numRetries := 5 + if err != nil && (k8sErrors.IsConflict(err) || k8sErrors.IsInvalid(err)) { + for i := 0; i < numRetries; i++ { + ctxLogger.Errorf(common.CtxLogFormat, op, obj.Name, obj.Namespace, rc.ClusterID, err.Error()+". retrying sidecar update.") + + updatedSidecar, err := rc.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars(namespace).Get(ctx, exist.Name, v12.GetOptions{}) + // if old sidecar not found, just create a new sidecar instead + if err != nil { + ctxLogger.Infof(common.CtxLogFormat, op, exist.Name, exist.Namespace, rc.ClusterID, err.Error()+fmt.Sprintf(". Error getting old sidecar")) + continue + } + existingResourceVersion := updatedSidecar.GetResourceVersion() + ctxLogger.Infof(common.CtxLogFormat, op, obj.Name, obj.Namespace, rc.ClusterID, fmt.Sprintf("existingResourceVersion=%s resourceVersionUsedForUpdate=%s", updatedSidecar.ResourceVersion, obj.ResourceVersion)) + updatedSidecar.Spec = obj.Spec + updatedSidecar.Annotations = obj.Annotations + updatedSidecar.Labels = obj.Labels + updatedSidecar.SetResourceVersion(existingResourceVersion) + _, err = rc.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars(namespace).Update(ctx, updatedSidecar, v12.UpdateOptions{}) + if err == nil { + return nil + } + } + } + return err +} + func copySidecar(sidecar *v1alpha3.Sidecar) *v1alpha3.Sidecar { newSidecarObj := &v1alpha3.Sidecar{} newSidecarObj.Spec.WorkloadSelector = sidecar.Spec.WorkloadSelector @@ -1226,7 +1255,6 @@ func AddServiceEntriesWithDrWorker( //partitionedIdentity holds the originally passed in identity which could have a partition prefix partitionedIdentity := identityId //identityId is guaranteed to have the non-partitioned identity - // Operator Branch 1: since partition cache will not be filled, return identityId from getNonPartitionedIdentity identityId = getNonPartitionedIdentity(rr.AdmiralCache, identityId) // Operator: When calling this function make a channel with one cluster in it for cluster := range clusters { // TODO log cluster / service entry @@ -1238,6 +1266,10 @@ func AddServiceEntriesWithDrWorker( addSEorDRToAClusterError error ) + if common.IsAdmiralOperatorMode() { + syncNamespace = common.GetOperatorSyncNamespace() + } + rc := rr.GetRemoteController(cluster) if rc == nil { ctxLogger.Warnf(common.CtxLogFormat, "AddServiceEntriesWithDrWorker", "", "", cluster, "remote controller not found for the cluster") @@ -1252,7 +1284,6 @@ func AddServiceEntriesWithDrWorker( } //this get is within the loop to avoid race condition when one event could update destination rule on stale data - // TODO: Operator: Fill these caches in AdmiralCache in shardHandler globalTrafficPolicy, err := cache.GlobalTrafficCache.GetFromIdentity(partitionedIdentity, env) if err != nil { ctxLogger.Errorf(LogErrFormat, "GlobalTrafficCache", "", "", cluster, err.Error()) @@ -1452,12 +1483,11 @@ func AddServiceEntriesWithDrWorker( // build list of gateway clusters gwClusters := []string{} for _, gwAlias := range common.GetGatewayAssetAliases() { - // TODO: Operator fills this cache in produceIdentityConfigs dependents := rr.AdmiralCache.IdentityDependencyCache.Get(partitionedIdentity) if dependents != nil && dependents.Len() > 0 { dependents.Range(func(_ string, dependent string) { if strings.Contains(strings.ToLower(dependent), strings.ToLower(gwAlias)) { - gwClustersMap := getClusters(rr, dependent) + gwClustersMap := getClusters(rr, dependent, ctxLogger) if gwClustersMap != nil { for _, cluster := range gwClustersMap.GetKeys() { gwClusters = append(gwClusters, cluster) @@ -1532,7 +1562,6 @@ func AddServiceEntriesWithDrWorker( } } } - errors <- addSEorDRToAClusterError } } @@ -1547,10 +1576,18 @@ func getClusterRegion(rr *RemoteRegistry, cluster string, rc *RemoteController) return "", fmt.Errorf("failed to get region of cluster %v", cluster) } -func getClusters(rr *RemoteRegistry, dependent string) *common.Map { +func getClusters(rr *RemoteRegistry, dependent string, ctxLogger *logrus.Entry) *common.Map { if common.IsAdmiralOperatorMode() { - // TODO: go through registry client to pull dependent identity clusters and construct map... - return nil + // Any better way than calling service registry here? + dependentIdentityConfig, err := rr.RegistryClient.GetIdentityConfigByIdentityName(dependent, ctxLogger) + if err != nil { + return nil + } + gwClusterMap := common.NewMap() + for _, cluster := range dependentIdentityConfig.Clusters { + gwClusterMap.Put(cluster.Name, cluster.Name) + } + return gwClusterMap } return rr.AdmiralCache.IdentityClusterCache.Get(dependent) } diff --git a/admiral/pkg/clusters/serviceentry_test.go b/admiral/pkg/clusters/serviceentry_test.go index 3795a881..17d1894a 100644 --- a/admiral/pkg/clusters/serviceentry_test.go +++ b/admiral/pkg/clusters/serviceentry_test.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/util" "github.com/istio-ecosystem/admiral/admiral/pkg/registry" registryMocks "github.com/istio-ecosystem/admiral/admiral/pkg/registry/mocks" "github.com/stretchr/testify/mock" @@ -3010,6 +3011,78 @@ func TestModifyExistingSidecarForLocalClusterCommunication(t *testing.T) { } } +func TestRetryUpdatingSidecar(t *testing.T) { + ctxLogger := logrus.WithFields(logrus.Fields{"txId": "abc"}) + setupForServiceEntryTests() + var ( + assetIdentity = "test-identity" + identityNamespace = "test-sidecar-namespace" + sidecarName = "default" + assetHostsList = []string{"test-host"} + sidecar = &v1alpha3.Sidecar{ + ObjectMeta: metav1.ObjectMeta{ + Name: sidecarName, + Namespace: identityNamespace, + ResourceVersion: "12345", + }, + Spec: istioNetworkingV1Alpha3.Sidecar{ + Egress: []*istioNetworkingV1Alpha3.IstioEgressListener{ + { + Hosts: assetHostsList, + }, + }, + }, + } + sidecarController = &istio.SidecarController{} + remoteController = &RemoteController{} + sidecarCacheEgressMap = common.NewSidecarEgressMap() + ) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + sidecarCacheEgressMap.Put( + assetIdentity, + "test-dependency-namespace", + "test-local-fqdn", + map[string]string{ + "test.myservice.global": "1", + }, + ) + remoteController.SidecarController = sidecarController + sidecarController.IstioClient = istiofake.NewSimpleClientset() + createdSidecar, _ := sidecarController.IstioClient.NetworkingV1alpha3().Sidecars(identityNamespace).Create(context.TODO(), sidecar, metav1.CreateOptions{}) + sidecarEgressMap := make(map[string]common.SidecarEgress) + cnameMap := common.NewMap() + cnameMap.Put("test.myservice.global", "1") + sidecarEgressMap["test-dependency-namespace"] = common.SidecarEgress{Namespace: "test-dependency-namespace", FQDN: "test-local-fqdn", CNAMEs: cnameMap} + newSidecar := copySidecar(createdSidecar) + egressHosts := make(map[string]string) + for _, sidecarEgress := range sidecarEgressMap { + egressHost := sidecarEgress.Namespace + "/" + sidecarEgress.FQDN + egressHosts[egressHost] = egressHost + sidecarEgress.CNAMEs.Range(func(k, v string) { + scopedCname := sidecarEgress.Namespace + "/" + k + egressHosts[scopedCname] = scopedCname + }) + } + for egressHost := range egressHosts { + if !util.Contains(newSidecar.Spec.Egress[0].Hosts, egressHost) { + newSidecar.Spec.Egress[0].Hosts = append(newSidecar.Spec.Egress[0].Hosts, egressHost) + } + } + newSidecarConfig := createSidecarSkeleton(newSidecar.Spec, common.GetWorkloadSidecarName(), identityNamespace) + err := retryUpdatingSidecar(ctxLogger, ctx, newSidecarConfig, createdSidecar, identityNamespace, remoteController, k8sErrors.NewConflict(schema.GroupResource{}, "", nil), "Add") + if err != nil { + t.Errorf("failed to retry updating sidecar, got err: %v", err) + } + updatedSidecar, err := sidecarController.IstioClient.NetworkingV1alpha3().Sidecars("test-sidecar-namespace").Get(ctx, "default", metav1.GetOptions{}) + if err != nil { + t.Errorf("failed to get updated sidecar, got err: %v", err) + } + if updatedSidecar.ResourceVersion != createdSidecar.ResourceVersion { + t.Errorf("resource version check failed, expected %v but got %v", createdSidecar.ResourceVersion, updatedSidecar.ResourceVersion) + } +} + func TestCreateServiceEntry(t *testing.T) { setupForServiceEntryTests() ctxLogger := logrus.WithFields(logrus.Fields{ @@ -9831,6 +9904,40 @@ func TestPartitionAwarenessExportToMultipleRemote(t *testing.T) { } } +func TestGetClusters(t *testing.T) { + admiralParams := admiralParamsForServiceEntryTests() + admiralParams.AdmiralOperatorMode = true + admiralParams.OperatorSecretFilterTags = "admiral/syncoperator" + admiralParams.SecretFilterTags = "admiral/sync" + common.ResetSync() + common.InitializeConfig(admiralParams) + rr, _ := InitAdmiral(context.Background(), admiralParams) + expectedgwClusterMap := common.NewMap() + expectedgwClusterMap.Put("cluster1", "cluster1") + ctxLogger := logrus.WithFields(logrus.Fields{"txId": "abc"}) + testCases := []struct { + name string + dependent string + expectedMap *common.Map + }{ + { + name: "Given that dependent is a valid identity, " + + "When we call getClusters on it, " + + "Then we get a map with the clusters it is deployed on", + dependent: "sample", + expectedMap: expectedgwClusterMap, + }, + } + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + gwClusterMap := getClusters(rr, c.dependent, ctxLogger) + if !reflect.DeepEqual(gwClusterMap, c.expectedMap) { + t.Errorf("got=%+v, want %+v", gwClusterMap, c.expectedMap) + } + }) + } +} + func TestStateSyncerConfiguration(t *testing.T) { var ( env = "test" diff --git a/admiral/pkg/clusters/shard_handler.go b/admiral/pkg/clusters/shard_handler.go index 7df447c6..aece48be 100644 --- a/admiral/pkg/clusters/shard_handler.go +++ b/admiral/pkg/clusters/shard_handler.go @@ -9,12 +9,9 @@ import ( admiralapiv1 "github.com/istio-ecosystem/admiral-api/pkg/apis/admiral/v1" "github.com/istio-ecosystem/admiral/admiral/pkg/registry" - log "github.com/sirupsen/logrus" - k8sErrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + log "github.com/sirupsen/logrus" ) type ShardHandler struct { @@ -84,6 +81,7 @@ func HandleEventForShard(ctx context.Context, event admiral.EventType, obj *admi // ProduceIdentityConfigsFromShard creates a registry client and uses it to get the identity configs // of the assets on the shard, and puts those into configWriterData which go into the job channel func ProduceIdentityConfigsFromShard(ctxLogger *log.Entry, shard admiralapiv1.Shard, configWriterData chan<- *ConfigWriterData, rr *RemoteRegistry, producerWG *sync.WaitGroup) { + cnames := make(map[string]string) for _, clusterShard := range shard.Spec.Clusters { for _, identityItem := range clusterShard.Identities { identityConfig, err := rr.RegistryClient.GetIdentityConfigByIdentityName(identityItem.Name, ctxLogger) @@ -91,12 +89,37 @@ func ProduceIdentityConfigsFromShard(ctxLogger *log.Entry, shard admiralapiv1.Sh ctxLogger.Warnf(common.CtxLogFormat, "ProduceIdentityConfig", identityItem.Name, shard.Namespace, clusterShard.Name, err) } ctxLogger.Infof(common.CtxLogFormat, "ProduceIdentityConfig", identityConfig.IdentityName, shard.Namespace, clusterShard.Name, "successfully produced IdentityConfig") - //TODO: Fill rr.AdmiralCache - //1. IdentityDependencyCache (identityConfig.IdentityName -> clientAssets) - //2. GlobalTrafficCache (id + env -> gtp) - //3. OutlierDetectionCache (id + env -> od) - //4. ClientConnectionConfigCache (id + env -> ccc) - //5. ClusterLocalityCache (cluster -> cluster -> locality) (don't care about set functionality, only one locality per cluster) + // Fill the IdentityDependencyCache + for _, clientAsset := range identityConfig.ClientAssets { + rr.AdmiralCache.IdentityDependencyCache.Put(identityConfig.IdentityName, clientAsset, clientAsset) + } + // Fill the GTP, OD, and CCC caches + for _, identityConfigCluster := range identityConfig.Clusters { + for _, identityConfigEnv := range identityConfigCluster.Environment { + err = rr.AdmiralCache.GlobalTrafficCache.Put(&identityConfigEnv.TrafficPolicy.GlobalTrafficPolicy) + if err != nil { + ctxLogger.Warnf(common.CtxLogFormat, "ProduceIdentityConfigGTPPut", identityItem.Name, shard.Namespace, clusterShard.Name, err) + } + err = rr.AdmiralCache.OutlierDetectionCache.Put(&identityConfigEnv.TrafficPolicy.OutlierDetection) + if err != nil { + ctxLogger.Warnf(common.CtxLogFormat, "ProduceIdentityConfigODPut", identityItem.Name, shard.Namespace, clusterShard.Name, err) + } + err = rr.AdmiralCache.ClientConnectionConfigCache.Put(&identityConfigEnv.TrafficPolicy.ClientConnectionConfig) + if err != nil { + ctxLogger.Warnf(common.CtxLogFormat, "ProduceIdentityConfigCCCPut", identityItem.Name, shard.Namespace, clusterShard.Name, err) + } + // Fill the DependencyNamespaceCache + for _, clientAsset := range identityConfig.ClientAssets { + //TODO: How to deal with multiple services here? + cname := common.GetCnameVal([]string{identityConfigEnv.Name, strings.ToLower(identityConfig.IdentityName), common.GetHostnameSuffix()}) + cnames[cname] = "1" + localFqdn := identityConfigEnv.ServiceName + common.Sep + identityConfigEnv.Namespace + common.GetLocalDomainSuffix() + rr.AdmiralCache.DependencyNamespaceCache.Put(clientAsset, identityConfigEnv.Namespace, localFqdn, cnames) + } + } + // Fill the ClusterLocalityCache + rr.AdmiralCache.ClusterLocalityCache.Put(identityConfigCluster.Name, identityConfigCluster.Name, identityConfigCluster.Locality) + } configWriterData <- &ConfigWriterData{ IdentityConfig: &identityConfig, ClusterName: clusterShard.Name, @@ -117,58 +140,65 @@ func ConsumeIdentityConfigs(ctxLogger *log.Entry, ctx context.Context, configWri assetName := identityConfig.IdentityName clientCluster := data.ClusterName ctxLogger.Infof(common.CtxLogFormat, "ConsumeIdentityConfig", assetName, "", clientCluster, "starting to consume identityConfig") - //TODO: doesn't make much sense to have this as a struct, easier to just pass in the cluster and remote registry serviceEntryBuilder := ServiceEntryBuilder{ClientCluster: clientCluster, RemoteRegistry: rr} serviceEntries, err := serviceEntryBuilder.BuildServiceEntriesFromIdentityConfig(ctxLogger, *identityConfig) if err != nil { ctxLogger.Warnf(common.CtxLogFormat, "ConsumeIdentityConfig", assetName, "", clientCluster, err) data.Result = err.Error() } - // service deployed in cluster 1 with 2 env qal, e2e, cluster 2 with 3 env qal, e2e, prod - // write SEs to cluster 1 - // env -> list of cluster - // env -> se - // check if any of the clusters are a source cluster -> rethink this, won't work if one env is on a cluster but not on another - //isServiceEntryModifyCalledForSourceCluster := false - //for _, cluster := range identityConfig.Clusters { - // if cluster.Name == clientCluster { - // isServiceEntryModifyCalledForSourceCluster = true - // break - // } - //} - //ctx = context.WithValue(ctx, common.EventResourceType, identityConfig.Clusters[0].Environment[0].Type) - for _, se := range serviceEntries { - //clusters := make(chan string, 1) - //errors := make(chan error, 1) - //clusters <- clientCluster - //AddServiceEntriesWithDrWorker(ctxLogger, ctx, rr, - // true, //doGenerateAdditionalEndpoints() - // isServiceEntryModifyCalledForSourceCluster, - // assetName, - // strings.Split(se.Hosts[0], common.Sep)[0], - // se, - // clusters, - // errors) - rc := rr.GetRemoteController(clientCluster) - seName := strings.ToLower(se.Hosts[0]) + "-se" - sec := rc.ServiceEntryController - //TODO: se reconciliation cache - oldServiceEntry := sec.Cache.Get(seName, clientCluster) - if oldServiceEntry == nil { - ctxLogger.Infof(common.CtxLogFormat, "ConsumeIdentityConfig", seName, "", clientCluster, "starting to write se to cluster") - oldServiceEntry, err = rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(common.GetOperatorSyncNamespace()).Get(ctx, seName, metav1.GetOptions{}) - // if old service entry not find, just create a new service entry instead - if err != nil && k8sErrors.IsNotFound(err) { - ctxLogger.Infof(common.CtxLogFormat, "ConsumeIdentityConfig", seName, "", clientCluster, fmt.Sprintf("failed fetching old service entry, error=%v", err)) - oldServiceEntry = nil + isServiceEntryModifyCalledForSourceCluster := false + sourceClusterEnvironmentNamespaces := map[string]string{} + for _, cluster := range identityConfig.Clusters { + if cluster.Name == clientCluster { + isServiceEntryModifyCalledForSourceCluster = true + for _, environment := range cluster.Environment { + sourceClusterEnvironmentNamespaces[environment.Name] = environment.Namespace } + break } - newServiceEntry := createServiceEntrySkeleton(*se, seName, common.GetOperatorSyncNamespace()) - err = addUpdateServiceEntry(ctxLogger, ctx, newServiceEntry, oldServiceEntry, common.GetOperatorSyncNamespace(), rc) + } + // Get any type from the identityConfig + for _, cv := range identityConfig.Clusters { + for _, ev := range cv.Environment { + ctx = context.WithValue(ctx, common.EventResourceType, ev.Type) + break + } + break + } + for _, se := range serviceEntries { + isServiceEntryModifyCalledForSourceClusterAndEnv := false + env := strings.Split(se.Hosts[0], common.Sep)[0] + if _, ok := sourceClusterEnvironmentNamespaces[env]; ok && isServiceEntryModifyCalledForSourceCluster { + isServiceEntryModifyCalledForSourceClusterAndEnv = true + } + clusters := make(chan string, 1) + errors := make(chan error, 1) + go AddServiceEntriesWithDrWorker(ctxLogger, ctx, rr, + true, //TODO: doGenerateAdditionalEndpoints() + isServiceEntryModifyCalledForSourceClusterAndEnv, + assetName, + strings.Split(se.Hosts[0], common.Sep)[0], + copyServiceEntry(se), + clusters, + errors) + clusters <- clientCluster + close(clusters) + err := <-errors if err != nil { - ctxLogger.Warnf(common.CtxLogFormat, "ConsumeIdentityConfig", seName, "", clientCluster, err) + ctxLogger.Warnf(common.CtxLogFormat, "ConsumeIdentityConfig", strings.ToLower(se.Hosts[0])+"-se", "", clientCluster, err) data.Result = err.Error() } + if isServiceEntryModifyCalledForSourceClusterAndEnv { + ctxLogger.Infof(common.CtxLogFormat, "ConsumeIdentityConfig", strings.ToLower(se.Hosts[0])+"-se", "", clientCluster, "modifying Sidecar for local cluster communication") + err = modifySidecarForLocalClusterCommunication( + ctxLogger, + ctx, sourceClusterEnvironmentNamespaces[env], assetName, + rr.AdmiralCache.DependencyNamespaceCache, rr.GetRemoteController(clientCluster)) + if err != nil { + ctxLogger.Errorf(common.CtxLogFormat, "modifySidecarForLocalClusterCommunication", + assetName, sourceClusterEnvironmentNamespaces[env], "", err) + } + } } configWriterDataResults <- data } diff --git a/admiral/pkg/clusters/shard_handler_test.go b/admiral/pkg/clusters/shard_handler_test.go index 327f3be4..11559f7f 100644 --- a/admiral/pkg/clusters/shard_handler_test.go +++ b/admiral/pkg/clusters/shard_handler_test.go @@ -4,12 +4,18 @@ import ( "context" "encoding/json" "fmt" + "github.com/golang/protobuf/ptypes/duration" + "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/wrapperspb" + "istio.io/client-go/pkg/apis/networking/v1alpha3" "sync" "testing" admiralapiv1 "github.com/istio-ecosystem/admiral-api/pkg/apis/admiral/v1" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/istio" + "github.com/istio-ecosystem/admiral/admiral/pkg/test" "github.com/sirupsen/logrus" istioNetworkingV1Alpha3 "istio.io/api/networking/v1alpha3" istiofake "istio.io/client-go/pkg/clientset/versioned/fake" @@ -23,10 +29,14 @@ func setupForShardTests() common.AdmiralParams { admiralParams := admiralParamsForServiceEntryTests() admiralParams.EnableAbsoluteFQDN = true admiralParams.EnableAbsoluteFQDNForLocalEndpoints = true + admiralParams.DisableIPGeneration = true admiralParams.SANPrefix = "pre-prod.api.org.com" admiralParams.ExportToMaxNamespaces = 35 admiralParams.AdmiralOperatorMode = true - admiralParams.OperatorSyncNamespace = "shard-namespace" + admiralParams.OperatorSyncNamespace = "admiral-operator-sync" + admiralParams.AdditionalEndpointSuffixes = []string{"org"} + admiralParams.SecretFilterTags = "admiral/sync" + admiralParams.OperatorSecretFilterTags = "admiral/syncoperator" shardTestSingleton.Do(func() { common.ResetSync() initHappened = true @@ -40,6 +50,28 @@ func setupForShardTests() common.AdmiralParams { return admiralParams } +func createRemoteControllerForShardTests(cluster string) *RemoteController { + return &RemoteController{ + ClusterID: cluster, + ServiceEntryController: &istio.ServiceEntryController{ + IstioClient: istiofake.NewSimpleClientset(), + Cache: istio.NewServiceEntryCache(), + }, + DestinationRuleController: &istio.DestinationRuleController{ + IstioClient: istiofake.NewSimpleClientset(), + Cache: istio.NewDestinationRuleCache(), + }, + VirtualServiceController: &istio.VirtualServiceController{ + IstioClient: istiofake.NewSimpleClientset(), + VirtualServiceHandler: &test.MockVirtualServiceHandler{}, + }, + SidecarController: &istio.SidecarController{ + IstioClient: istiofake.NewSimpleClientset(), + SidecarHandler: &test.MockSidecarHandler{}, + }, + } +} + func createMockShard(shardName string, clusterName string, identityName string, identityEnv string) *admiralapiv1.Shard { identityItem := admiralapiv1.IdentityItem{ Name: identityName, @@ -78,70 +110,141 @@ func jsonPrint(v any) string { func TestShardHandler_Added(t *testing.T) { admiralParams := setupForShardTests() rr, _ := InitAdmiralOperator(context.Background(), admiralParams) - rc1 := &RemoteController{ - ClusterID: "cluster1", - ServiceEntryController: &istio.ServiceEntryController{ - IstioClient: istiofake.NewSimpleClientset(), - Cache: istio.NewServiceEntryCache(), - }, - } - rc2 := &RemoteController{ - ClusterID: "cluster-usw2-k8s", - ServiceEntryController: &istio.ServiceEntryController{ - IstioClient: istiofake.NewSimpleClientset(), - Cache: istio.NewServiceEntryCache(), - }, - } + rr.AdmiralDatabaseClient = &mockDatabaseClientWithError{} + rc1 := createRemoteControllerForShardTests("cluster1") + rc2 := createRemoteControllerForShardTests("cluster-usw2-k8s") rr.PutRemoteController("cluster1", rc1) rr.PutRemoteController("cluster-usw2-k8s", rc2) - sampleShard1 := createMockShard("shard-sample", "cluster1", "sample", "e2e") + //sampleShard1 := createMockShard("shard-sample", "cluster1", "sample", "e2e") sampleShard2 := createMockShard("blackhole-shard", "cluster-usw2-k8s", "ppdmeshtestblackhole", "ppd") - shardHandler := &ShardHandler{ - RemoteRegistry: rr, - } - se1 := &istioNetworkingV1Alpha3.ServiceEntry{ - Hosts: []string{"e2e.sample.mesh"}, - Ports: []*istioNetworkingV1Alpha3.ServicePort{{Number: 80, Protocol: "http", Name: "http"}}, - Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, - Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, - Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{{Address: "app-1-spk-root-service.ns-1-usw2-e2e.svc.cluster.local.", Ports: map[string]uint32{"http": 8090}, Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "rollout"}, Locality: "us-west-2"}}, - ExportTo: []string{common.NamespaceIstioSystem, "ns-1-usw2-e2e", "ns-1-usw2-prf", "ns-1-usw2-qal"}, - SubjectAltNames: []string{"spiffe://pre-prod.api.org.com/sample"}, + shardHandler := &ShardHandler{RemoteRegistry: rr} + defaultSidecar := &v1alpha3.Sidecar{ + ObjectMeta: v1.ObjectMeta{ + Name: "default", + }, + Spec: istioNetworkingV1Alpha3.Sidecar{ + Egress: []*istioNetworkingV1Alpha3.IstioEgressListener{ + { + Hosts: []string{"./*", common.NamespaceIstioSystem + "/*", common.GetOperatorSyncNamespace() + "/*"}, + }, + }, + }, } + //se1 := &istioNetworkingV1Alpha3.ServiceEntry{ + // Hosts: []string{"testDnsPrefix.e2e.sample.mesh"}, + // Ports: []*istioNetworkingV1Alpha3.ServicePort{{Number: 80, Protocol: "http", Name: "http"}}, + // Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, + // Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, + // Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ + // {Address: "app-1-spk-root-service.ns-1-usw2-e2e.svc.cluster.local.", + // Ports: map[string]uint32{"http": 8090}, + // Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "rollout"}, + // Locality: "us-west-2"}}, + // ExportTo: []string{common.NamespaceIstioSystem, "ns-1-usw2-e2e", "ns-1-usw2-prf", "ns-1-usw2-qal"}, + // SubjectAltNames: []string{"spiffe://pre-prod.api.org.com/sample"}, + //} se2 := &istioNetworkingV1Alpha3.ServiceEntry{ - Hosts: []string{"ppd.ppdmeshtestblackhole.mesh"}, + Hosts: []string{"testDnsPrefix.ppd.ppdmeshtestblackhole.mesh"}, Ports: []*istioNetworkingV1Alpha3.ServicePort{{Number: 80, Protocol: "http", Name: "http"}}, Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ - {Address: "abc-elb.us-east-2.elb.amazonaws.com.", Ports: map[string]uint32{"http": 15443}, Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "deployment"}, Locality: "us-east-2"}, + {Address: "abc-elb.us-east-2.elb.amazonaws.com.", + Ports: map[string]uint32{"http": 15443}, + Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "deployment"}, + Locality: "us-east-2"}, }, ExportTo: []string{common.NamespaceIstioSystem, "services-inboundd268-usw2-dev"}, SubjectAltNames: []string{"spiffe://pre-prod.api.org.com/ppdmeshtestblackhole"}, } + sidecar2 := &istioNetworkingV1Alpha3.Sidecar{ + Egress: []*istioNetworkingV1Alpha3.IstioEgressListener{ + { + Hosts: []string{"./*", common.NamespaceIstioSystem + "/*", common.GetOperatorSyncNamespace() + "/*"}, + }, + }, + } + rc1.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars("ns-1-usw2-prf").Create(context.Background(), defaultSidecar, v1.CreateOptions{}) + rc1.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars("ns-1-usw2-e2e").Create(context.Background(), defaultSidecar, v1.CreateOptions{}) + rc1.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars("ns-1-usw2-qal").Create(context.Background(), defaultSidecar, v1.CreateOptions{}) + rc2.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars("services-blackholed268-usw2-dev").Create(context.Background(), defaultSidecar, v1.CreateOptions{}) + dr2 := &istioNetworkingV1Alpha3.DestinationRule{ + Host: "ppd.ppdmeshtestblackhole.mesh", + TrafficPolicy: &istioNetworkingV1Alpha3.TrafficPolicy{ + LoadBalancer: &istioNetworkingV1Alpha3.LoadBalancerSettings{ + LbPolicy: &istioNetworkingV1Alpha3.LoadBalancerSettings_Simple{ + Simple: istioNetworkingV1Alpha3.LoadBalancerSettings_LEAST_REQUEST, + }, + WarmupDurationSecs: &duration.Duration{Seconds: 0}, + }, + ConnectionPool: &istioNetworkingV1Alpha3.ConnectionPoolSettings{ + Http: &istioNetworkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{ + Http2MaxRequests: 1000, + MaxRequestsPerConnection: 5, + }, + }, + OutlierDetection: &istioNetworkingV1Alpha3.OutlierDetection{ + ConsecutiveGatewayErrors: wrapperspb.UInt32(10), + Consecutive_5XxErrors: wrapperspb.UInt32(0), + Interval: &duration.Duration{Seconds: 10}, + BaseEjectionTime: &duration.Duration{Seconds: 0}, + MaxEjectionPercent: 33, + }, + Tls: &istioNetworkingV1Alpha3.ClientTLSSettings{ + Mode: istioNetworkingV1Alpha3.ClientTLSSettings_ISTIO_MUTUAL, + }, + }, + ExportTo: []string{common.NamespaceIstioSystem, "services-inboundd268-usw2-dev"}, + } + vs2 := &istioNetworkingV1Alpha3.VirtualService{ + Hosts: []string{"ppd.ppdmeshtestblackhole.org"}, + Http: []*istioNetworkingV1Alpha3.HTTPRoute{ + {Route: []*istioNetworkingV1Alpha3.HTTPRouteDestination{ + {Destination: &istioNetworkingV1Alpha3.Destination{ + Host: "ppd.ppdmeshtestblackhole.mesh", + Port: &istioNetworkingV1Alpha3.PortSelector{Number: 80}, + }, + }, + }, + }, + }, + } testCases := []struct { - name string - rc *RemoteController - shard *admiralapiv1.Shard - expectedSEName string - expectedSE *istioNetworkingV1Alpha3.ServiceEntry + name string + rc *RemoteController + shard *admiralapiv1.Shard + expectedSEName string + expectedSE *istioNetworkingV1Alpha3.ServiceEntry + expectedSidecarNS string + expectedSidecar *istioNetworkingV1Alpha3.Sidecar + expectedDRName string + expectedDR *istioNetworkingV1Alpha3.DestinationRule + expectedVSName string + expectedVS *istioNetworkingV1Alpha3.VirtualService }{ - { - name: "Given the server asset we want to write resources for is deployed on the client cluster " + - "And it is a client of itself " + - "Then an SE with local endpoint and istio-system in exportTo should be built", - rc: rc1, - shard: sampleShard1, - expectedSEName: "e2e.sample.mesh-se", - expectedSE: se1, - }, + //{ + // name: "Given the server asset we want to write resources for is deployed on the client cluster " + + // "And it is a client of itself " + + // "Then an SE with local endpoint and istio-system in exportTo should be built", + // rc: rc1, + // shard: sampleShard1, + // expectedSEName: "testdnsprefix.e2e.sample.mesh-se", + // expectedSE: se1, + // expectedSidecarNS: "ns-1-usw2-e2e", + //}, { name: "Given the server asset we want to write resources for is deployed on a remote cluster in env A and a client cluster in env B" + "Then an SE with only remote endpoint and istio-system in exportTo should be built for env B", - rc: rc2, - shard: sampleShard2, - expectedSEName: "ppd.ppdmeshtestblackhole.mesh-se", - expectedSE: se2, + rc: rc2, + shard: sampleShard2, + expectedSEName: "testdnsprefix.ppd.ppdmeshtestblackhole.mesh-se", + expectedSE: se2, + expectedSidecarNS: "services-blackholed268-usw2-dev", + expectedSidecar: sidecar2, + expectedDRName: "ppd.ppdmeshtestblackhole.mesh-default-dr", + expectedDR: dr2, + expectedVSName: "ppd.ppdmeshtestblackhole.org-vs", + expectedVS: vs2, }, //TODO: Given the server asset we want to write resources for is deployed remotely and locally in the same env, se should have local and remote endpoint and istio-system } @@ -150,17 +253,48 @@ func TestShardHandler_Added(t *testing.T) { t.Run(tt.name, func(t *testing.T) { shErr := shardHandler.Added(context.Background(), tt.shard) if shErr != nil { - t.Errorf("failed to produce SE with err: %v", shErr) + t.Errorf("failed to handle Shard with err: %v", shErr) } + // Check that expected SE matches the produced SE actualSE, seErr := tt.rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(common.GetOperatorSyncNamespace()).Get(context.Background(), tt.expectedSEName, v1.GetOptions{}) if seErr != nil { - t.Errorf("failed to get SE with err %v", seErr) + t.Errorf("failed to get ServiceEntry with err %v", seErr) } if actualSE == nil { - t.Errorf("expected se to not be nil") + t.Errorf("expected ServiceEntry to not be nil") } else if !compareServiceEntries(&actualSE.Spec, tt.expectedSE) { t.Errorf("got=%v, want=%v", jsonPrint(actualSE.Spec), jsonPrint(tt.expectedSE)) } + // Check that the expected Sidecar matches the produced Sidecar + actualSidecar, sidecarErr := tt.rc.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars(tt.expectedSidecarNS).Get(context.Background(), common.GetWorkloadSidecarName(), v1.GetOptions{}) + if actualSidecar == nil { + t.Errorf("expected Sidecar to not be nil") + } else if !cmp.Equal(&actualSidecar.Spec, tt.expectedSidecar, protocmp.Transform()) { + t.Errorf("got=%v, want=%v", jsonPrint(actualSidecar), jsonPrint(tt.expectedSidecar)) + } + if sidecarErr != nil { + t.Errorf("failed to get Sidecar with err %v", sidecarErr) + } + // Check that the expected DR matches the produced DR + actualDR, drErr := tt.rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(common.GetOperatorSyncNamespace()).Get(context.Background(), tt.expectedDRName, v1.GetOptions{}) + if actualDR == nil { + t.Errorf("expected DestinationRule to not be nil") + } else if !cmp.Equal(&actualDR.Spec, tt.expectedDR, protocmp.Transform()) { + t.Errorf("got=%v, want=%v", jsonPrint(actualDR), jsonPrint(tt.expectedDR)) + } + if drErr != nil { + t.Errorf("failed to get DestinationRule with err %v", drErr) + } + // Check that the expected VS matches the produced VS + actualVS, vsErr := tt.rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(common.GetOperatorSyncNamespace()).Get(context.Background(), tt.expectedVSName, v1.GetOptions{}) + if actualVS == nil { + t.Errorf("expected VirtualService to not be nil") + } else if !cmp.Equal(&actualVS.Spec, tt.expectedVS, protocmp.Transform()) { + t.Errorf("got=%v, want=%v", jsonPrint(actualVS), jsonPrint(tt.expectedVS)) + } + if vsErr != nil { + t.Errorf("failed to get VirtualService with err %v", vsErr) + } }) } } diff --git a/admiral/pkg/clusters/testdata/ppdmeshtestblackholeIdentityConfiguration.json b/admiral/pkg/clusters/testdata/ppdmeshtestblackholeIdentityConfiguration.json index 41f40fd2..b794c288 100644 --- a/admiral/pkg/clusters/testdata/ppdmeshtestblackholeIdentityConfiguration.json +++ b/admiral/pkg/clusters/testdata/ppdmeshtestblackholeIdentityConfiguration.json @@ -37,7 +37,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "ppdmeshtestblackhole" + }, + "annotations": { + "env": "prod" + } }, "spec": { "connectionPool": { @@ -51,7 +57,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "ppdmeshtestblackhole" + }, + "annotations": { + "env": "prod" + } }, "spec": { "policy": [ @@ -77,7 +89,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "ppdmeshtestblackhole" + }, + "annotations": { + "env": "prod" + } }, "spec": { "outlier_config": { @@ -126,7 +144,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "ppdmeshtestblackhole" + }, + "annotations": { + "env": "ppd" + } }, "spec": { "connectionPool": { @@ -140,7 +164,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "ppdmeshtestblackhole" + }, + "annotations": { + "env": "ppd" + } }, "spec": { "policy": [ @@ -166,7 +196,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "ppdmeshtestblackhole" + }, + "annotations": { + "env": "ppd" + } }, "spec": { "outlier_config": { diff --git a/admiral/pkg/clusters/testdata/ppdmeshtestinboundsIdentityConfiguration.json b/admiral/pkg/clusters/testdata/ppdmeshtestinboundsIdentityConfiguration.json index 90d353b2..bc564a40 100644 --- a/admiral/pkg/clusters/testdata/ppdmeshtestinboundsIdentityConfiguration.json +++ b/admiral/pkg/clusters/testdata/ppdmeshtestinboundsIdentityConfiguration.json @@ -37,7 +37,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "ppdmeshtestinbounds" + }, + "annotations": { + "env": "ppd" + } }, "spec": { "connectionPool": { @@ -51,7 +57,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "ppdmeshtestinbounds" + }, + "annotations": { + "env": "ppd" + } }, "spec": { "policy": [ @@ -77,7 +89,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "ppdmeshtestinbounds" + }, + "annotations": { + "env": "ppd" + } }, "spec": { "outlier_config": { diff --git a/admiral/pkg/clusters/testdata/sampleIdentityConfiguration.json b/admiral/pkg/clusters/testdata/sampleIdentityConfiguration.json index 7bb4029f..59f6eebb 100644 --- a/admiral/pkg/clusters/testdata/sampleIdentityConfiguration.json +++ b/admiral/pkg/clusters/testdata/sampleIdentityConfiguration.json @@ -37,7 +37,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "prf" + } }, "spec": { "connectionPool": { @@ -51,7 +57,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "prf" + } }, "spec": { "policy": [ @@ -77,7 +89,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "prf" + } }, "spec": { "outlier_config": { @@ -115,7 +133,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "e2e" + } }, "spec": { "connectionPool": { @@ -129,7 +153,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "e2e" + } }, "spec": { "policy": [ @@ -155,7 +185,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "e2e" + } }, "spec": { "outlier_config": { @@ -193,7 +229,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "qal" + } }, "spec": { "connectionPool": { @@ -207,7 +249,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "qal" + } }, "spec": { "policy": [ @@ -233,7 +281,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "qal" + } }, "spec": { "outlier_config": { @@ -248,6 +302,6 @@ } }, "clientAssets": { - "sample": "sample" + "sample": "sample" } } \ No newline at end of file diff --git a/admiral/pkg/clusters/types.go b/admiral/pkg/clusters/types.go index 99c61500..135cf82a 100644 --- a/admiral/pkg/clusters/types.go +++ b/admiral/pkg/clusters/types.go @@ -188,6 +188,7 @@ func NewRemoteRegistry(ctx context.Context, params common.AdmiralParams) *Remote if common.IsAdmiralOperatorMode() { rr.RegistryClient = registry.NewRegistryClient(registry.WithRegistryEndpoint("PLACEHOLDER")) + rr.AdmiralCache.ClusterLocalityCache = common.NewMapOfMaps() } return rr diff --git a/admiral/pkg/controller/common/config.go b/admiral/pkg/controller/common/config.go index c4f29a99..52e91daa 100644 --- a/admiral/pkg/controller/common/config.go +++ b/admiral/pkg/controller/common/config.go @@ -470,3 +470,9 @@ func GetOperatorSyncNamespace() string { defer wrapper.RUnlock() return wrapper.params.OperatorSyncNamespace } + +func GetOperatorSecretFilterTags() string { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.OperatorSecretFilterTags +} diff --git a/admiral/pkg/controller/common/config_test.go b/admiral/pkg/controller/common/config_test.go index a4be2f55..eb22ce05 100644 --- a/admiral/pkg/controller/common/config_test.go +++ b/admiral/pkg/controller/common/config_test.go @@ -43,6 +43,7 @@ func setupForConfigTests() { ExportToMaxNamespaces: 35, AdmiralOperatorMode: false, OperatorSyncNamespace: "admiral-sync", + OperatorSecretFilterTags: "admiral/syncoperator", } ResetSync() initHappened = true @@ -162,6 +163,10 @@ func TestConfigManagement(t *testing.T) { if GetOperatorSyncNamespace() != "admiral-sync" { t.Errorf("operator sync namespace mismatch, expected admiral-sync, got %v", GetOperatorSyncNamespace()) } + + if GetOperatorSecretFilterTags() != "admiral/syncoperator" { + t.Errorf("operator secret filter tags mismatch, expected admiral/syncoperator, got %s", GetOperatorSecretFilterTags()) + } } func TestGetCRDIdentityLabelWithCRDIdentity(t *testing.T) { diff --git a/admiral/pkg/controller/common/types.go b/admiral/pkg/controller/common/types.go index af7c3bf4..a817d68c 100644 --- a/admiral/pkg/controller/common/types.go +++ b/admiral/pkg/controller/common/types.go @@ -113,8 +113,9 @@ type AdmiralParams struct { GatewayAssetAliases []string //Admiral 2.0 params - AdmiralOperatorMode bool - OperatorSyncNamespace string + AdmiralOperatorMode bool + OperatorSyncNamespace string + OperatorSecretFilterTags string } func (b AdmiralParams) String() string { diff --git a/admiral/pkg/registry/configCache.go b/admiral/pkg/registry/configCache.go index dc9e41eb..f077f099 100644 --- a/admiral/pkg/registry/configCache.go +++ b/admiral/pkg/registry/configCache.go @@ -52,7 +52,7 @@ func writeToFileLocal(writer ConfigWriter, config *IdentityConfig) error { return err } pathName := currentDir + "/testdata/" + shortAlias[len(shortAlias)-1] + "IdentityConfiguration.json" - if common.GetSecretFilterTags() == "admiral/syncrtay" { + if common.GetSecretFilterTags() == common.GetOperatorSecretFilterTags() && common.GetOperatorSecretFilterTags() != "" { pathName = "/etc/serviceregistry/config/" + shortAlias[len(shortAlias)-1] + "IdentityConfiguration.json" } fmt.Printf("[state syncer] file path=%s", pathName) diff --git a/admiral/pkg/registry/configSyncer.go b/admiral/pkg/registry/configSyncer.go index a21c656c..6fe548a1 100644 --- a/admiral/pkg/registry/configSyncer.go +++ b/admiral/pkg/registry/configSyncer.go @@ -11,7 +11,6 @@ import ( const ( serviceRegistryIdentityConfigMapName = "service-registry-identityconfig" admiralQaNs = "services-admiral-use2-qal" - secretLabel = "admiral/syncrtay" ) type ConfigSyncer interface { diff --git a/admiral/pkg/registry/configWriter.go b/admiral/pkg/registry/configWriter.go index 25d964a0..90e60ebe 100644 --- a/admiral/pkg/registry/configWriter.go +++ b/admiral/pkg/registry/configWriter.go @@ -38,7 +38,7 @@ func (c *configMapWriter) Write(name string, data []byte) error { err error ) defer util.LogElapsedTimeForTask(c.ctxLogger, task, name, "", "", "processingTime")() - if common.GetSecretFilterTags() != secretLabel { + if common.GetSecretFilterTags() != common.GetOperatorSecretFilterTags() || common.GetOperatorSecretFilterTags() == "" { c.ctxLogger.Infof(common.CtxLogFormat, task, name, "", "", "writing to local file") return writeToFile(name, data) } diff --git a/admiral/pkg/registry/registry.go b/admiral/pkg/registry/registry.go index e566262e..a6047776 100644 --- a/admiral/pkg/registry/registry.go +++ b/admiral/pkg/registry/registry.go @@ -55,7 +55,7 @@ func (c *registryClient) GetIdentityConfigByIdentityName(identityAlias string, c func readIdentityConfigFromFile(shortAlias []string) ([]byte, error) { pathName := "testdata/" + shortAlias[len(shortAlias)-1] + "IdentityConfiguration.json" - if common.GetSecretFilterTags() == "admiral/syncrtay" { + if common.GetSecretFilterTags() == common.GetOperatorSecretFilterTags() && common.GetOperatorSyncNamespace() != "" { pathName = "/etc/serviceregistry/config/" + shortAlias[len(shortAlias)-1] + "IdentityConfiguration.json" } return os.ReadFile(pathName) diff --git a/admiral/pkg/registry/registry_test.go b/admiral/pkg/registry/registry_test.go index ecc0a99c..84bdb9da 100644 --- a/admiral/pkg/registry/registry_test.go +++ b/admiral/pkg/registry/registry_test.go @@ -16,7 +16,7 @@ import ( ) func TestParseIdentityConfigJSON(t *testing.T) { - identityConfig := GetSampleIdentityConfig() + identityConfig := GetSampleIdentityConfig("sample") testCases := []struct { name string identityConfig IdentityConfig @@ -47,7 +47,7 @@ func TestParseIdentityConfigJSON(t *testing.T) { } func TestIdentityConfigGetByIdentityName(t *testing.T) { - sampleIdentityConfig := GetSampleIdentityConfig() + sampleIdentityConfig := GetSampleIdentityConfig("sample") registryClient := NewRegistryClient(WithRegistryEndpoint("endpoint")) var jsonErr *json.SyntaxError ctxLogger := log.WithContext(context.Background()) @@ -93,7 +93,7 @@ func TestIdentityConfigGetByIdentityName(t *testing.T) { } func TestGetIdentityConfigByClusterName(t *testing.T) { - sampleIdentityConfig := GetSampleIdentityConfig() + sampleIdentityConfig := GetSampleIdentityConfig("sample") registryClient := NewRegistryClient(WithRegistryEndpoint("endpoint")) var jsonErr *json.SyntaxError ctxLogger := log.WithContext(context.Background()) diff --git a/admiral/pkg/registry/testdata/sampleIdentityConfiguration.json b/admiral/pkg/registry/testdata/sampleIdentityConfiguration.json index 1da11848..59f6eebb 100644 --- a/admiral/pkg/registry/testdata/sampleIdentityConfiguration.json +++ b/admiral/pkg/registry/testdata/sampleIdentityConfiguration.json @@ -37,7 +37,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "prf" + } }, "spec": { "connectionPool": { @@ -51,7 +57,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "prf" + } }, "spec": { "policy": [ @@ -77,7 +89,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "prf" + } }, "spec": { "outlier_config": { @@ -115,7 +133,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "e2e" + } }, "spec": { "connectionPool": { @@ -129,7 +153,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "e2e" + } }, "spec": { "policy": [ @@ -155,7 +185,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "e2e" + } }, "spec": { "outlier_config": { @@ -193,7 +229,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "qal" + } }, "spec": { "connectionPool": { @@ -207,7 +249,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "qal" + } }, "spec": { "policy": [ @@ -233,7 +281,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "qal" + } }, "spec": { "outlier_config": { diff --git a/admiral/pkg/registry/testutils.go b/admiral/pkg/registry/testutils.go index 46085bcd..df3fed29 100644 --- a/admiral/pkg/registry/testutils.go +++ b/admiral/pkg/registry/testutils.go @@ -7,13 +7,13 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func GetSampleIdentityConfigEnvironment(env string, namespace string) *IdentityConfigEnvironment { +func GetSampleIdentityConfigEnvironment(env string, namespace string, identity string) *IdentityConfigEnvironment { identityConfigEnvironment := &IdentityConfigEnvironment{ Name: env, Namespace: namespace, ServiceName: "app-1-spk-root-service", Services: map[string]*RegistryServiceConfig{ - "app-1-spk-root-service": &RegistryServiceConfig{ + "app-1-spk-root-service": { Name: "app-1-spk-root-service", Weight: -1, Ports: map[string]uint32{ @@ -27,7 +27,9 @@ func GetSampleIdentityConfigEnvironment(env string, namespace string) *IdentityC TrafficPolicy: TrafficPolicy{ ClientConnectionConfig: v1alpha1.ClientConnectionConfig{ ObjectMeta: v1.ObjectMeta{ - Name: "sampleCCC", + Name: "sampleCCC", + Labels: map[string]string{"identity": identity}, + Annotations: map[string]string{"env": env}, }, Spec: v1alpha1.ClientConnectionConfigSpec{ ConnectionPool: model.ConnectionPool{Http: &model.ConnectionPool_HTTP{ @@ -39,7 +41,9 @@ func GetSampleIdentityConfigEnvironment(env string, namespace string) *IdentityC }, GlobalTrafficPolicy: v1alpha1.GlobalTrafficPolicy{ ObjectMeta: v1.ObjectMeta{ - Name: "sampleGTP", + Name: "sampleGTP", + Labels: map[string]string{"identity": identity}, + Annotations: map[string]string{"env": env}, }, Spec: model.GlobalTrafficPolicy{ Policy: []*model.TrafficPolicy{ @@ -67,7 +71,9 @@ func GetSampleIdentityConfigEnvironment(env string, namespace string) *IdentityC }, OutlierDetection: v1alpha1.OutlierDetection{ ObjectMeta: v1.ObjectMeta{ - Name: "sampleOD", + Name: "sampleOD", + Labels: map[string]string{"identity": identity}, + Annotations: map[string]string{"env": env}, }, Spec: model.OutlierDetection{ OutlierConfig: &model.OutlierConfig{ @@ -82,10 +88,10 @@ func GetSampleIdentityConfigEnvironment(env string, namespace string) *IdentityC return identityConfigEnvironment } -func GetSampleIdentityConfig() IdentityConfig { - prfEnv := GetSampleIdentityConfigEnvironment("prf", "ns-1-usw2-prf") - e2eEnv := GetSampleIdentityConfigEnvironment("e2e", "ns-1-usw2-e2e") - qalEnv := GetSampleIdentityConfigEnvironment("qal", "ns-1-usw2-qal") +func GetSampleIdentityConfig(identity string) IdentityConfig { + prfEnv := GetSampleIdentityConfigEnvironment("prf", "ns-1-usw2-prf", identity) + e2eEnv := GetSampleIdentityConfigEnvironment("e2e", "ns-1-usw2-e2e", identity) + qalEnv := GetSampleIdentityConfigEnvironment("qal", "ns-1-usw2-qal", identity) environments := map[string]*IdentityConfigEnvironment{ "prf": prfEnv, "e2e": e2eEnv, @@ -103,7 +109,7 @@ func GetSampleIdentityConfig() IdentityConfig { Environment: environments, } identityConfig := IdentityConfig{ - IdentityName: "sample", + IdentityName: identity, Clusters: map[string]*IdentityConfigCluster{ "cluster1": &cluster}, ClientAssets: clientAssets,