From 60d0a9306a1395d22d0e54801652bb6c8144933e Mon Sep 17 00:00:00 2001 From: Ryan Tay Date: Wed, 7 Aug 2024 11:38:01 -0700 Subject: [PATCH 1/9] correct root param values Signed-off-by: Ryan Tay --- admiral/cmd/admiral/cmd/root.go | 16 +-- admiral/pkg/controller/admiral/shard.go | 5 +- admiral/pkg/controller/common/config.go | 12 +++ admiral/pkg/controller/common/types.go | 129 ++++++++++++------------ 4 files changed, 92 insertions(+), 70 deletions(-) diff --git a/admiral/cmd/admiral/cmd/root.go b/admiral/cmd/admiral/cmd/root.go index 3af47cc3..482c13c1 100644 --- a/admiral/cmd/admiral/cmd/root.go +++ b/admiral/cmd/admiral/cmd/root.go @@ -191,12 +191,12 @@ func GetRootCmd(args []string) *cobra.Command { "The value of envoy filter is to add additional config to the filter config section") rootCmd.PersistentFlags().BoolVar(¶ms.EnableRoutingPolicy, "enable_routing_policy", false, "If Routing Policy feature needs to be enabled") - rootCmd.PersistentFlags().StringArrayVar(¶ms.ExcludedIdentityList, "excluded_identity_list", []string{}, + rootCmd.PersistentFlags().StringSliceVar(¶ms.ExcludedIdentityList, "excluded_identity_list", []string{}, "List of identities which should be excluded from getting processed") rootCmd.PersistentFlags().BoolVar(¶ms.EnableDiffCheck, "enable_diff_check", true, "Enable diff check") - rootCmd.PersistentFlags().StringArrayVar(¶ms.AdditionalEndpointSuffixes, "additional_endpoint_suffixes", []string{}, + rootCmd.PersistentFlags().StringSliceVar(¶ms.AdditionalEndpointSuffixes, "additional_endpoint_suffixes", []string{}, "Suffixes that Admiral should use to generate additional endpoints through VirtualServices") - rootCmd.PersistentFlags().StringArrayVar(¶ms.AdditionalEndpointLabelFilters, "additional_endpoint_label_filters", []string{}, + rootCmd.PersistentFlags().StringSliceVar(¶ms.AdditionalEndpointLabelFilters, "additional_endpoint_label_filters", []string{}, "Labels that admiral will check on deployment/rollout before creating additional endpoints. '*' would indicate generating additional endpoints for all deployment/rollouts") rootCmd.PersistentFlags().BoolVar(¶ms.EnableWorkloadDataStorage, "enable_workload_data_storage", false, "When true, workload data will be stored in a persistent storage") @@ -205,7 +205,7 @@ func GetRootCmd(args []string) *cobra.Command { rootCmd.PersistentFlags().BoolVar(¶ms.DisableIPGeneration, "disable_ip_generation", false, "When set to true, ips will not be generated and written to service entries") rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.IdentityPartitionKey, "identity_partition_key", "admiral.io/identityPartition", "The annotation on a deployment/rollout spec, which will be used to divide an asset based on user-specified partition. Defaults to `admiral.io/identityPartition`.") - rootCmd.PersistentFlags().StringArrayVar(¶ms.ExportToIdentityList, "exportto_identity_list", []string{"*"}, "List of identities to write ExportTo field for") + rootCmd.PersistentFlags().StringSliceVar(¶ms.ExportToIdentityList, "exportto_identity_list", []string{"*"}, "List of identities to write ExportTo field for") rootCmd.PersistentFlags().IntVar(¶ms.ExportToMaxNamespaces, "exportto_max_namespaces", 35, "Max number of namespaces to write in ExportTo field before just replacing with *") // Admiral HA flags @@ -230,11 +230,11 @@ func GetRootCmd(args []string) *cobra.Command { rootCmd.PersistentFlags().BoolVar(¶ms.EnableServiceEntryCache, "enable_serviceentry_cache", false, "Enable/Disable Caching serviceentries") rootCmd.PersistentFlags().BoolVar(¶ms.EnableDestinationRuleCache, "enable_destinationrule_cache", false, "Enable/Disable Caching destinationrules") rootCmd.PersistentFlags().BoolVar(¶ms.EnableAbsoluteFQDN, "enable_absolute_fqdn", true, "Enable/Disable Absolute FQDN") - rootCmd.PersistentFlags().StringArrayVar(¶ms.AlphaIdentityList, "alpha_identity_list", []string{}, + rootCmd.PersistentFlags().StringSliceVar(¶ms.AlphaIdentityList, "alpha_identity_list", []string{}, "Identities which can be used for testing of alpha features") rootCmd.PersistentFlags().BoolVar(¶ms.EnableAbsoluteFQDNForLocalEndpoints, "enable_absolute_fqdn_for_local_endpoints", false, "Enable/Disable Absolute FQDN for local endpoints") rootCmd.PersistentFlags().BoolVar(¶ms.EnableClientConnectionConfigProcessing, "enable_client_connection_config_processing", false, "Enable/Disable ClientConnectionConfig Processing") - rootCmd.PersistentFlags().StringArrayVar(¶ms.GatewayAssetAliases, "gateway_asset_aliases", []string{"Intuit.platform.servicesgateway.servicesgateway"}, "The asset aliases used for API Gateway") + rootCmd.PersistentFlags().StringSliceVar(¶ms.GatewayAssetAliases, "gateway_asset_aliases", []string{"Intuit.platform.servicesgateway.servicesgateway"}, "The asset aliases used for API Gateway") rootCmd.PersistentFlags().BoolVar(¶ms.EnableActivePassive, "enable_active_passive", false, "Enable/Disable Active-Passive behavior") rootCmd.PersistentFlags().BoolVar(¶ms.EnableSWAwareNSCaches, "enable_sw_aware_ns_caches", false, "Enable/Disable SW Aware NS Caches") rootCmd.PersistentFlags().BoolVar(¶ms.AdmiralStateSyncerMode, "admiral_state_syncer_mode", false, "Enable/Disable admiral to run as state syncer only") @@ -246,6 +246,10 @@ func GetRootCmd(args []string) *cobra.Command { rootCmd.PersistentFlags().BoolVar(¶ms.AdmiralOperatorMode, "admiral_operator_mode", false, "Enable/Disable admiral operator functionality") rootCmd.PersistentFlags().StringVar(¶ms.OperatorSyncNamespace, "operator_sync_namespace", "admiral-operator-sync", "Namespace in which Admiral Operator will put its generated configurations") + rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.OperatorIdentityLabel, "operator_identity_label", "admiral.io/operatorIdentity", "used to filter which shard Admiral Operator will watch") + rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.ShardIdentityLabel, "shard_identity_label", "admiral.io/shardIdentity", "used to filter which shard Admiral Operator will watch") + rootCmd.PersistentFlags().StringVar(¶ms.OperatorIdentityValue, "operator_identity_value", "", "Admiral operator should watch shards where operatorIdentityLabel == operatorIdentityValue") + rootCmd.PersistentFlags().StringVar(¶ms.ShardIdentityValue, "shard_identity_value", "", "Admiral operator should watch shards where shardIdentityLabel == shardIdentityValue") return rootCmd } diff --git a/admiral/pkg/controller/admiral/shard.go b/admiral/pkg/controller/admiral/shard.go index 649c793d..9f2f2d63 100644 --- a/admiral/pkg/controller/admiral/shard.go +++ b/admiral/pkg/controller/admiral/shard.go @@ -149,9 +149,10 @@ func NewShardController(stopCh <-chan struct{}, handler ShardHandler, configPath if err != nil { return nil, fmt.Errorf("failed to create shard controller crd client: %v", err) } - //TODO: should not be hardcoded, fetch actual expected operator and shard identities from env variables //labelOptions := informers.WithTweakListOptions(func(opts *metav1.ListOptions) { - // opts.LabelSelector = "admiral.io/operatorIdentity=operatorIdentity, admiral.io/shardIdentity=dev" + // opIdLabel, opIdValue := common.GetOperatorIdentityLabelValueSet() + // shardIdLabel, shardIdValue := common.GetShardIdentityLabelValueSet() + // opts.LabelSelector = fmt.Sprintf("%s=%s, %s=%s", opIdLabel, opIdValue, shardIdLabel, shardIdValue) //}) //informerFactory := informers.NewSharedInformerFactoryWithOptions(shardController.K8sClient, resyncPeriod, labelOptions) informerFactory := informers.NewSharedInformerFactoryWithOptions(shardController.K8sClient, resyncPeriod) diff --git a/admiral/pkg/controller/common/config.go b/admiral/pkg/controller/common/config.go index c4f29a99..cee4c07f 100644 --- a/admiral/pkg/controller/common/config.go +++ b/admiral/pkg/controller/common/config.go @@ -470,3 +470,15 @@ func GetOperatorSyncNamespace() string { defer wrapper.RUnlock() return wrapper.params.OperatorSyncNamespace } + +func GetOperatorIdentityLabelValueSet() (string, string) { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.LabelSet.OperatorIdentityLabel, wrapper.params.OperatorIdentityValue +} + +func GetShardIdentityLabelValueSet() (string, string) { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.LabelSet.ShardIdentityLabel, wrapper.params.ShardIdentityValue +} diff --git a/admiral/pkg/controller/common/types.go b/admiral/pkg/controller/common/types.go index af7c3bf4..375e75d9 100644 --- a/admiral/pkg/controller/common/types.go +++ b/admiral/pkg/controller/common/types.go @@ -39,66 +39,66 @@ type SidecarEgressMap struct { } type AdmiralParams struct { - ArgoRolloutsEnabled bool - KubeconfigPath string - SecretFilterTags string - CacheReconcileDuration time.Duration - SeAndDrCacheReconcileDuration time.Duration - ClusterRegistriesNamespace string - DependenciesNamespace string - DnsConfigFile string - DNSTimeoutMs int - DNSRetries int - TrafficConfigNamespace string - SyncNamespace string - EnableSAN bool - SANPrefix string - AdmiralConfig string - Profile string - LabelSet *LabelSet - LogLevel int - HostnameSuffix string - PreviewHostnamePrefix string - MetricsEnabled bool - ChannelCapacity int - WorkloadSidecarUpdate string - WorkloadSidecarName string - AdmiralStateCheckerName string - DRStateStoreConfigPath string - ServiceEntryIPPrefix string - EnvoyFilterVersion string - DeprecatedEnvoyFilterVersion string - EnvoyFilterAdditionalConfig string - EnableRoutingPolicy bool - ExcludedIdentityList []string - AdditionalEndpointSuffixes []string - AdditionalEndpointLabelFilters []string - HAMode string - EnableWorkloadDataStorage bool - EnableDiffCheck bool - EnableProxyEnvoyFilter bool - EnableDependencyProcessing bool - DeploymentOrRolloutWorkerConcurrency int - DependentClusterWorkerConcurrency int - SeAddressConfigmap string - DependencyWarmupMultiplier int - EnableOutlierDetection bool - EnableClientConnectionConfigProcessing bool - MaxRequestsPerConnection int32 - EnableAbsoluteFQDN bool - EnableAbsoluteFQDNForLocalEndpoints bool - DisableDefaultAutomaticFailover bool - EnableServiceEntryCache bool - AlphaIdentityList []string - EnableDestinationRuleCache bool - DisableIPGeneration bool - EnableActivePassive bool - EnableSWAwareNSCaches bool - ExportToIdentityList []string - ExportToMaxNamespaces int - AdmiralStateSyncerMode bool - DefaultWarmupDurationSecs int64 - EnableGenerationCheck bool + ArgoRolloutsEnabled bool + KubeconfigPath string + SecretFilterTags string + CacheReconcileDuration time.Duration + SeAndDrCacheReconcileDuration time.Duration + ClusterRegistriesNamespace string + DependenciesNamespace string + DnsConfigFile string + DNSTimeoutMs int + DNSRetries int + TrafficConfigNamespace string + SyncNamespace string + EnableSAN bool + SANPrefix string + AdmiralConfig string + Profile string + LabelSet *LabelSet + LogLevel int + HostnameSuffix string + PreviewHostnamePrefix string + MetricsEnabled bool + ChannelCapacity int + WorkloadSidecarUpdate string + WorkloadSidecarName string + AdmiralStateCheckerName string + DRStateStoreConfigPath string + ServiceEntryIPPrefix string + EnvoyFilterVersion string + DeprecatedEnvoyFilterVersion string + EnvoyFilterAdditionalConfig string + EnableRoutingPolicy bool + ExcludedIdentityList []string + AdditionalEndpointSuffixes []string + AdditionalEndpointLabelFilters []string + HAMode string + EnableWorkloadDataStorage bool + EnableDiffCheck bool + EnableProxyEnvoyFilter bool + EnableDependencyProcessing bool + DeploymentOrRolloutWorkerConcurrency int + DependentClusterWorkerConcurrency int + SeAddressConfigmap string + DependencyWarmupMultiplier int + EnableOutlierDetection bool + EnableClientConnectionConfigProcessing bool + MaxRequestsPerConnection int32 + EnableAbsoluteFQDN bool + EnableAbsoluteFQDNForLocalEndpoints bool + DisableDefaultAutomaticFailover bool + EnableServiceEntryCache bool + AlphaIdentityList []string + EnableDestinationRuleCache bool + DisableIPGeneration bool + EnableActivePassive bool + EnableSWAwareNSCaches bool + ExportToIdentityList []string + ExportToMaxNamespaces int + EnableSyncIstioResourcesToSourceClusters bool + DefaultWarmupDurationSecs int64 + EnableGenerationCheck bool // Cartographer specific params TrafficConfigPersona bool @@ -113,8 +113,11 @@ type AdmiralParams struct { GatewayAssetAliases []string //Admiral 2.0 params - AdmiralOperatorMode bool - OperatorSyncNamespace string + AdmiralOperatorMode bool + OperatorSyncNamespace string + AdmiralStateSyncerMode bool + OperatorIdentityValue string + ShardIdentityValue string } func (b AdmiralParams) String() string { @@ -153,6 +156,8 @@ type LabelSet struct { GatewayApp string //the value for `app` key that will be used to fetch the loadblancer for cross cluster calls, also referred to as east west gateway AdmiralCRDIdentityLabel string //Label Used to identify identity label for crd IdentityPartitionKey string //Label used for partitioning assets with same identity into groups + OperatorIdentityLabel string + ShardIdentityLabel string } type TrafficObject struct { From b5cb10bd15f60cc58d9d80b2f995178026d31cd2 Mon Sep 17 00:00:00 2001 From: Ryan Tay Date: Wed, 7 Aug 2024 12:27:18 -0700 Subject: [PATCH 2/9] add delete flow logs Signed-off-by: Ryan Tay --- admiral/pkg/clusters/shard_handler.go | 3 ++- admiral/pkg/clusters/shard_handler_test.go | 3 ++- admiral/pkg/controller/admiral/shard.go | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/admiral/pkg/clusters/shard_handler.go b/admiral/pkg/clusters/shard_handler.go index 7df447c6..5264bd88 100644 --- a/admiral/pkg/clusters/shard_handler.go +++ b/admiral/pkg/clusters/shard_handler.go @@ -38,11 +38,12 @@ func (sh *ShardHandler) Added(ctx context.Context, obj *admiralapiv1.Shard) erro } func (sh *ShardHandler) Deleted(ctx context.Context, obj *admiralapiv1.Shard) error { - // TODO: Not yet implemented //err := HandleEventForShard(ctx, admiral.Delete, obj, sh.RemoteRegistry) //if err != nil { // return fmt.Errorf(LogErrFormat, common.Delete, common.ShardResourceType, obj.Name, "", err) //} + ctxLogger := common.GetCtxLogger(ctx, obj.Name, "") + ctxLogger.Warnf(common.CtxLogFormat, "ShardHandlerDeleted", obj.Name, obj.Namespace, "", "", "Shard object was deleted") return nil } diff --git a/admiral/pkg/clusters/shard_handler_test.go b/admiral/pkg/clusters/shard_handler_test.go index 327f3be4..25f2bf7c 100644 --- a/admiral/pkg/clusters/shard_handler_test.go +++ b/admiral/pkg/clusters/shard_handler_test.go @@ -171,7 +171,8 @@ func TestShardHandler_Deleted(t *testing.T) { shardHandler := &ShardHandler{ RemoteRegistry: rr, } - err := shardHandler.Deleted(context.Background(), nil) + shard := createMockShard("testShard", "testCluster", "testIdentity", "testEnv") + err := shardHandler.Deleted(context.Background(), shard) if err != nil { t.Errorf("expected nil err for delete, for %v", err) } diff --git a/admiral/pkg/controller/admiral/shard.go b/admiral/pkg/controller/admiral/shard.go index 9f2f2d63..2aa81803 100644 --- a/admiral/pkg/controller/admiral/shard.go +++ b/admiral/pkg/controller/admiral/shard.go @@ -213,7 +213,7 @@ func (d *ShardController) Deleted(ctx context.Context, obj interface{}) error { if err == nil && len(key) > 0 { d.Cache.DeleteFromShardClusterCache(key, shard) } - return err + return d.ShardHandler.Deleted(ctx, shard) } func (d *ShardController) LogValueOfAdmiralIoIgnore(obj interface{}) { From 62877afcbbd6e71a848a9f67dfc044ec0874c1c8 Mon Sep 17 00:00:00 2001 From: kpharasi Date: Wed, 14 Aug 2024 10:32:40 -0700 Subject: [PATCH 3/9] Add links to Readme Signed-off-by: kpharasi Signed-off-by: Ryan Tay --- README.md | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 452dfe1f..9a687215 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,9 @@ Organizations below are **officially** using Admiral. Please send a PR with your ## Collaboration and Communication -[Admiral Slack Channel](https://istio.slack.com/archives/CT3F18T08) - `Note:` This channel is under Istio slack org, please fill out this [form](https://docs.google.com/forms/d/e/1FAIpQLSfdsupDfOWBtNVvVvXED6ULxtR4UIsYGCH_cQcRr0VcG1ZqQQ/viewform) to get access to Istio slack. +[Admiral Slack Channel](https://istio.slack.com/archives/CT3F18T08) + +`Note:` This channel is under Istio slack org, please fill out this [form](https://docs.google.com/forms/d/e/1FAIpQLSfdsupDfOWBtNVvVvXED6ULxtR4UIsYGCH_cQcRr0VcG1ZqQQ/viewform) to get access to Istio slack. ## Local Development Refer to [Local Development Setup](./CONTRIBUTING.md#setting-up-for-local-development) @@ -61,15 +63,15 @@ Details can be found [here](./docs/Processes.md) ## Admiral Sequence Diagram ### Legend: -SE - Istio ServiceEntry +SE - [Istio ServiceEntry](https://istio.io/latest/docs/reference/config/networking/service-entry/) -VS - Istio VirtualService +VS - [Istio VirtualService](https://istio.io/latest/docs/reference/config/networking/virtual-service/) -DR - Istio DestinationRule +DR - [Istio DestinationRule](https://istio.io/latest/docs/reference/config/networking/destination-rule/) -K8sAPI - Kubernetes API Server +K8s API - [Kubernetes API Server](https://kubernetes.io/docs/concepts/overview/kubernetes-api/) -GTP - Admiral GlobalTrafficPolicy +GTP - [Admiral GlobalTrafficPolicy](https://github.com/istio-ecosystem/admiral/blob/master/docs/Architecture.md#global-traffic-policy) ```mermaid sequenceDiagram From fb5e44f889dcb07acb909e684c299632c320af61 Mon Sep 17 00:00:00 2001 From: kpharasi Date: Wed, 14 Aug 2024 11:18:05 -0700 Subject: [PATCH 4/9] Remove directory no longer needed Signed-off-by: kpharasi Signed-off-by: Ryan Tay --- integration/admiral_deployment.tmpl | 261 ---------------------------- integration/go.mod | 3 - integration/render_template.go | 47 ----- 3 files changed, 311 deletions(-) delete mode 100644 integration/admiral_deployment.tmpl delete mode 100644 integration/go.mod delete mode 100644 integration/render_template.go diff --git a/integration/admiral_deployment.tmpl b/integration/admiral_deployment.tmpl deleted file mode 100644 index 0c7803d3..00000000 --- a/integration/admiral_deployment.tmpl +++ /dev/null @@ -1,261 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: pr-build-{{ .AdmiralLabel }} - applications.argoproj.io/app-name: services-admiral-usw2-qal - assetId: "8287766806579881856" - buildType: maven - l1: services - l2: mesh - name: pr-build-{{ .AdmiralLabel }} - namespace: services-admiral-usw2-qal -spec: - progressDeadlineSeconds: 600 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - app: pr-build-{{ .AdmiralLabel }} - template: - metadata: - annotations: - iam.amazonaws.com/role: arn:aws:iam::857109464775:role/k8s-services-admiral-usw2-qal - labels: - app: pr-build-{{ .AdmiralLabel }} - applications.argoproj.io/app-name: services-admiral-usw2-qal - assetId: "8287766806579881856" - l1: services - l2: mesh - splunk-index: iks - spec: - containers: - - args: - - --dependency_namespace - - services-admiral-usw2-qal - - --secret_namespace - - services-admiral-usw2-qal - - --sync_namespace - - admiral-sync - - --secret_filter_tags - - {{ .SecretFilterTag }} - - --san_prefix - - pre-prod.api.intuit.com - - --secret_resolver - - intuit - - --secret_resolver_config_path - - /etc/admiral/config.yaml - - --hostname_suffix - - mesh - - --workload_identity_key - - alpha.istio.io/identity - - --admiral_crd_identity_label - - assetAlias - - --workload_sidecar_update - - enabled - - --workload_sidecar_name - - default - - --argo_rollouts=true - - --enable_san=true - - --sync_period - - 60s - - --log_level - - "5" - - --admiral_state_checker_name - - dynamodbbasedstatechecker - - --dr_state_store_config_path - - /etc/admiral/config.yaml - - --envoy_filter_version - - 1.13,1.17 - - --enable_routing_policy=true - - --envoy_filter_additional_config - - 'dynamicRouterHost: internal.intuit.services.mesh.dynamicroutingservice.mesh' - - --additional_endpoint_suffixes - - intuit - - --additional_endpoint_label_filters - - iks.intuit.com/express-version - - --enable_workload_data_storage - - --enable_dependency_processing=true - - --se_ip_prefix - - "244.0" - - --se_address_configmap - - se-address-configmap-qal - - --max_requests_per_connection=5 - - --disable_default_automatic_failover=true - - --log_to_file=true - - --log_file_path=/app/logs/admiral.log - - --enable_serviceentry_cache=true - - --enable_destinationrule_cache=true - - --enable_absolute_fqdn=true - - --alpha_identity_list=* - - --enable_absolute_fqdn_for_local_endpoints=true - - --enable_active_passive=true - - --disable_ip_generation=true - - --enable_client_connection_config_processing=true - - --enable_sw_aware_ns_caches=true - image: {{ .BuildImage }} - imagePullPolicy: IfNotPresent - name: admiral - ports: - - containerPort: 8082 - name: debug - protocol: TCP - - containerPort: 8080 - name: admiral - protocol: TCP - - containerPort: 6900 - name: admiral-metrics - protocol: TCP - resources: - limits: - cpu: "4" - memory: 12288M - requests: - cpu: "2" - memory: 10240M - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /etc/admiral - name: admiral-config-vol - - mountPath: /app/logs/ - name: logdir - - env: - - name: SPLUNK_SECRET_PATH - value: /etc/secrets/splunk - - name: SERVICE_LOG_CONTAINER_NAME - value: admiral - image: docker.intuit.com/cloud/logging/k8ssplunkforwarder/service/base_splunk_forwarder:9.0.16 - imagePullPolicy: IfNotPresent - name: splunk-forwarder - resources: - requests: - cpu: 300m - memory: 300M - startupProbe: - exec: - command: - - /opt/splunkforwarder/health.sh - failureThreshold: 30 - initialDelaySeconds: 2 - periodSeconds: 2 - successThreshold: 1 - timeoutSeconds: 1 - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /app/logs/ - name: logdir - - mountPath: /etc/secrets/splunk - name: splunk-secrets - - mountPath: /etc/podinfo - name: podinfo - - mountPath: /etc/splunk - name: splunk-indexers-volume - - mountPath: /opt/splunkforwarder/etc/system/local/inputs.conf - name: splunk-inputs-conf - subPath: splunk.conf - - args: - - /usr/bin/envoy - - -c - - /etc/envoy/envoy.yaml - - --log-level - - info - command: - - sudo - image: docker.intuit.com/oicp/standard/envoy/debian11-envoy1:1.0.31 - imagePullPolicy: IfNotPresent - livenessProbe: - failureThreshold: 5 - httpGet: - path: /health/ready - port: 443 - scheme: HTTPS - initialDelaySeconds: 50 - periodSeconds: 5 - successThreshold: 1 - timeoutSeconds: 1 - name: envoy - ports: - - containerPort: 443 - protocol: TCP - readinessProbe: - failureThreshold: 3 - httpGet: - path: /health/ready - port: 443 - scheme: HTTPS - initialDelaySeconds: 50 - periodSeconds: 5 - successThreshold: 3 - timeoutSeconds: 1 - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /etc/envoy - name: envoy-config-vol - - mountPath: /etc/envoy/ssl - name: envoy-tls-cert-vol - dnsPolicy: ClusterFirst - initContainers: - - env: - - name: MYSTIKO_CONFIG - value: /mystiko/config.yaml - image: docker.intuit.com/intgctls-platctls/mystiko-cli/service/mystiko:1.4.0 - imagePullPolicy: IfNotPresent - name: mystiko-splunk-certs - resources: {} - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - volumeMounts: - - mountPath: /etc/secrets/splunk - name: splunk-secrets - - mountPath: /mystiko - name: mystiko-config - nodeSelector: - node.kubernetes.io/instancegroup: services-admiral-usw2-qal-default - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - serviceAccount: admiral - serviceAccountName: admiral - terminationGracePeriodSeconds: 30 - volumes: - - configMap: - defaultMode: 420 - name: admiral-config-configmap - name: admiral-config-vol - - configMap: - defaultMode: 420 - name: envoy-config-configmap - name: envoy-config-vol - - name: envoy-tls-cert-vol - secret: - defaultMode: 438 - secretName: admiral-envoy-tls-cert - - configMap: - defaultMode: 420 - name: mystiko-config - name: mystiko-config - - emptyDir: {} - name: logdir - - configMap: - defaultMode: 420 - name: splunk-inputs-conf - name: splunk-inputs-conf - - configMap: - defaultMode: 420 - name: splunk-indexers - name: splunk-indexers-volume - - emptyDir: - medium: Memory - name: splunk-secrets - - downwardAPI: - defaultMode: 420 - items: - - fieldRef: - apiVersion: v1 - fieldPath: metadata.labels - path: labels - name: podinfo diff --git a/integration/go.mod b/integration/go.mod deleted file mode 100644 index 20a79f0d..00000000 --- a/integration/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module integration - -go 1.18 diff --git a/integration/render_template.go b/integration/render_template.go deleted file mode 100644 index b225916f..00000000 --- a/integration/render_template.go +++ /dev/null @@ -1,47 +0,0 @@ -package main - -import ( - "fmt" - "os" - "text/template" -) - -type yamlInputs struct { - BuildImage string - AdmiralLabel string - SecretFilterTag string -} - -func main() { - - yaml := yamlInputs{ - BuildImage: os.Getenv("ADMIRAL_BUILD_IMAGE"), - AdmiralLabel: os.Getenv("ADMIRAL_LABEL"), - SecretFilterTag: os.Getenv("SECRET_FILTER_TAG"), - } - - fmt.Println("Rendering template with the following inputs:") - fmt.Println("BuildImage: ", yaml.BuildImage) - fmt.Println("AdmiralLabel: ", yaml.AdmiralLabel) - fmt.Println("SecretFilterTag: ", yaml.SecretFilterTag) - - // Create the file - f, err := os.OpenFile("admiral_rendered_deployment.yaml", os.O_WRONLY|os.O_CREATE, 0600) - if err != nil { - panic(err) - } - defer f.Close() - - // Render the template - var tmplFile = "admiral_deployment.tmpl" - tmpl, err := template.New(tmplFile).ParseFiles(tmplFile) - if err != nil { - panic(err) - } - - // Execute the template to the file - err = tmpl.Execute(f, yaml) - if err != nil { - panic(err) - } -} From 618a3ff4f4f5657fbcf31f49e7d9fb368f1175fe Mon Sep 17 00:00:00 2001 From: Ryan Tay Date: Wed, 14 Aug 2024 10:29:45 -0700 Subject: [PATCH 5/9] Signed-off-by: Ryan Tay Co-authored-by: Anubhav Aeron Co-authored-by: nirvanagit Adding operator functionality --- admiral/cmd/admiral/cmd/root.go | 2 + admiral/pkg/clusters/configSyncer.go | 2 +- admiral/pkg/clusters/configwriter.go | 13 +- admiral/pkg/clusters/configwriter_test.go | 43 +++- admiral/pkg/clusters/serviceentry.go | 53 +++- admiral/pkg/clusters/serviceentry_test.go | 107 ++++++++ admiral/pkg/clusters/shard_handler.go | 132 ++++++---- admiral/pkg/clusters/shard_handler_test.go | 234 ++++++++++++++---- ...eshtestblackholeIdentityConfiguration.json | 48 +++- ...meshtestinboundsIdentityConfiguration.json | 24 +- .../testdata/sampleIdentityConfiguration.json | 74 +++++- admiral/pkg/clusters/types.go | 1 + admiral/pkg/controller/common/config.go | 6 + admiral/pkg/controller/common/config_test.go | 5 + admiral/pkg/controller/common/types.go | 1 + admiral/pkg/registry/configCache.go | 2 +- admiral/pkg/registry/configSyncer.go | 1 - admiral/pkg/registry/configWriter.go | 2 +- admiral/pkg/registry/registry.go | 2 +- admiral/pkg/registry/registry_test.go | 6 +- .../testdata/sampleIdentityConfiguration.json | 72 +++++- admiral/pkg/registry/testutils.go | 26 +- 22 files changed, 693 insertions(+), 163 deletions(-) diff --git a/admiral/cmd/admiral/cmd/root.go b/admiral/cmd/admiral/cmd/root.go index 482c13c1..7bf02701 100644 --- a/admiral/cmd/admiral/cmd/root.go +++ b/admiral/cmd/admiral/cmd/root.go @@ -250,6 +250,8 @@ func GetRootCmd(args []string) *cobra.Command { rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.ShardIdentityLabel, "shard_identity_label", "admiral.io/shardIdentity", "used to filter which shard Admiral Operator will watch") rootCmd.PersistentFlags().StringVar(¶ms.OperatorIdentityValue, "operator_identity_value", "", "Admiral operator should watch shards where operatorIdentityLabel == operatorIdentityValue") rootCmd.PersistentFlags().StringVar(¶ms.ShardIdentityValue, "shard_identity_value", "", "Admiral operator should watch shards where shardIdentityLabel == shardIdentityValue") + rootCmd.PersistentFlags().StringVar(¶ms.OperatorSecretFilterTags, "operator_secret_filter_tags", "admiral/syncoperator", + "Filter tags for the specific admiral operator namespace secret to watch") return rootCmd } diff --git a/admiral/pkg/clusters/configSyncer.go b/admiral/pkg/clusters/configSyncer.go index cab23a9b..866b5542 100644 --- a/admiral/pkg/clusters/configSyncer.go +++ b/admiral/pkg/clusters/configSyncer.go @@ -50,7 +50,7 @@ func updateRegistryConfigForClusterPerEnvironment(ctxLogger *logrus.Entry, remot task := "updateRegistryConfigForClusterPerEnvironment" defer util.LogElapsedTimeForTask(ctxLogger, task, registryConfig.IdentityName, "", "", "processingTime")() k8sClient, err := remoteRegistry.ClientLoader.LoadKubeClientFromPath(common.GetKubeconfigPath()) - if err != nil && common.GetSecretFilterTags() == "admiral/syncrtay" { + if err != nil && common.GetSecretFilterTags() == common.GetOperatorSecretFilterTags() { ctxLogger.Infof(common.CtxLogFormat, task, registryConfig.IdentityName, "", "", "unable to get kube client") return errors.Wrap(err, "unable to get kube client") } diff --git a/admiral/pkg/clusters/configwriter.go b/admiral/pkg/clusters/configwriter.go index 670f29d3..afd24133 100644 --- a/admiral/pkg/clusters/configwriter.go +++ b/admiral/pkg/clusters/configwriter.go @@ -1,6 +1,7 @@ package clusters import ( + "fmt" "sort" "strconv" "strings" @@ -12,6 +13,8 @@ import ( networkingV1Alpha3 "istio.io/api/networking/v1alpha3" ) +const typeLabel = "type" + // IstioSEBuilder is an interface to construct Service Entry objects // from IdentityConfig objects. It can construct multiple Service Entries // from an IdentityConfig or construct just one given a IdentityConfigEnvironment. @@ -36,7 +39,7 @@ func (b *ServiceEntryBuilder) BuildServiceEntriesFromIdentityConfig(ctxLogger *l ) ctxLogger.Infof(common.CtxLogFormat, "buildServiceEntry", identity, common.GetSyncNamespace(), b.ClientCluster, "Beginning to build the SE spec") ingressEndpoints, err := getIngressEndpoints(identityConfig.Clusters) - if err != nil { + if err != nil || len(ingressEndpoints) == 0 { return serviceEntries, err } _, isServerOnClientCluster := ingressEndpoints[b.ClientCluster] @@ -110,7 +113,10 @@ func getServiceEntryEndpoint( var err error endpoint := ingressEndpoints[serverCluster] tmpEp := endpoint.DeepCopy() - tmpEp.Labels["type"] = identityConfigEnvironment.Type + tmpEp.Labels[typeLabel] = identityConfigEnvironment.Type + if len(identityConfigEnvironment.Services) == 0 { + return tmpEp, fmt.Errorf("there were no services for the asset in namespace %s on cluster %s", identityConfigEnvironment.Namespace, serverCluster) + } if clientCluster == serverCluster { for _, service := range identityConfigEnvironment.Services { if service.Weight == -1 { @@ -140,9 +146,8 @@ func getExportTo(ctxLogger *logrus.Entry, registryClient registry.IdentityConfig // For each client asset of cname, we fetch its identityConfig clientIdentityConfig, err = registryClient.GetIdentityConfigByIdentityName(clientAsset, ctxLogger) if err != nil { - // TODO: this should return an error. ctxLogger.Infof(common.CtxLogFormat, "buildServiceEntry", clientAsset, common.GetSyncNamespace(), "", "could not fetch IdentityConfig: "+err.Error()) - continue + return clientNamespaces, err } for _, clientIdentityConfigCluster := range clientIdentityConfig.Clusters { // For each cluster the client asset is deployed on, we check if that cluster is the client cluster we are writing to diff --git a/admiral/pkg/clusters/configwriter_test.go b/admiral/pkg/clusters/configwriter_test.go index c12d0665..79c47348 100644 --- a/admiral/pkg/clusters/configwriter_test.go +++ b/admiral/pkg/clusters/configwriter_test.go @@ -66,7 +66,7 @@ func createMockServiceEntry(env string, identity string, endpointAddress string, } func TestGetIngressEndpoints(t *testing.T) { - identityConfig := registry.GetSampleIdentityConfig() + identityConfig := registry.GetSampleIdentityConfig("sample") expectedIngressEndpoints := map[string]*networkingV1Alpha3.WorkloadEntry{ "cluster1": { Address: "abc-elb.us-west-2.elb.amazonaws.com.", @@ -104,7 +104,7 @@ func TestGetServiceEntryEndpoint(t *testing.T) { admiralParams := admiralParamsForConfigWriterTests() common.ResetSync() common.InitializeConfig(admiralParams) - e2eEnv := registry.GetSampleIdentityConfigEnvironment("e2e", "ns-1-usw2-e2e") + e2eEnv := registry.GetSampleIdentityConfigEnvironment("e2e", "ns-1-usw2-e2e", "sample") ingressEndpoints := map[string]*networkingV1Alpha3.WorkloadEntry{"cluster1": { Address: "abc-elb.us-west-2.elb.amazonaws.com.", Locality: "us-west-2", @@ -226,17 +226,24 @@ func TestBuildServiceEntriesFromIdentityConfig(t *testing.T) { common.InitializeConfig(admiralParams) rr, _ := InitAdmiralOperator(context.Background(), admiralParams) ctxLogger := common.GetCtxLogger(context.Background(), "test", "") - identityConfig := registry.GetSampleIdentityConfig() + identityConfig := registry.GetSampleIdentityConfig("sample") expectedLocalServiceEntryPRF := createMockServiceEntry("prf", "sample", "app-1-spk-root-service.ns-1-usw2-prf.svc.cluster.local.", 8090, []string{"istio-system", "ns-1-usw2-e2e", "ns-1-usw2-prf", "ns-1-usw2-qal"}) expectedLocalServiceEntryE2E := createMockServiceEntry("e2e", "sample", "app-1-spk-root-service.ns-1-usw2-e2e.svc.cluster.local.", 8090, []string{"istio-system", "ns-1-usw2-e2e", "ns-1-usw2-prf", "ns-1-usw2-qal"}) expectedLocalServiceEntryQAL := createMockServiceEntry("qal", "sample", "app-1-spk-root-service.ns-1-usw2-qal.svc.cluster.local.", 8090, []string{"istio-system", "ns-1-usw2-e2e", "ns-1-usw2-prf", "ns-1-usw2-qal"}) expectedLocalServiceEntries := []*networkingV1Alpha3.ServiceEntry{&expectedLocalServiceEntryQAL, &expectedLocalServiceEntryPRF, &expectedLocalServiceEntryE2E} + identityConfigFailsIngressEndpoints := registry.GetSampleIdentityConfig("sample") + identityConfigFailsIngressEndpoints.Clusters = map[string]*registry.IdentityConfigCluster{} + identityConfigFailsExportTo := registry.GetSampleIdentityConfig("sample") + identityConfigFailsExportTo.ClientAssets["fake"] = "fake" + identityConfigFailsServiceEntryEndpoint := registry.GetSampleIdentityConfig("sample") + identityConfigFailsServiceEntryEndpoint.Clusters["cluster1"].Environment["e2e"].Services = make(map[string]*registry.RegistryServiceConfig) testCases := []struct { name string clientCluster string event admiral.EventType identityConfig registry.IdentityConfig expectedServiceEntries []*networkingV1Alpha3.ServiceEntry + expectedErr bool }{ { name: "Given information to build an se, " + @@ -246,13 +253,41 @@ func TestBuildServiceEntriesFromIdentityConfig(t *testing.T) { event: admiral.Add, identityConfig: identityConfig, expectedServiceEntries: expectedLocalServiceEntries, + expectedErr: false, + }, + { + name: "Given getIngressEndpoints fails with a non-nil error or is empty, " + + "Then there should be an empty array of returned service entries", + clientCluster: "cluster1", + event: admiral.Add, + identityConfig: identityConfigFailsIngressEndpoints, + expectedServiceEntries: make([]*networkingV1Alpha3.ServiceEntry, 0), + expectedErr: false, + }, + { + name: "Given getExportTo fails with a non-nil error, " + + "Then there should be an empty array of returned service entries", + clientCluster: "cluster1", + event: admiral.Add, + identityConfig: identityConfigFailsExportTo, + expectedServiceEntries: make([]*networkingV1Alpha3.ServiceEntry, 0), + expectedErr: true, + }, + { + name: "Given getServiceEntryEndpoint fails with a non-nil error, " + + "Then there should be an empty array of returned service entries", + clientCluster: "cluster1", + event: admiral.Add, + identityConfig: identityConfigFailsServiceEntryEndpoint, + expectedServiceEntries: make([]*networkingV1Alpha3.ServiceEntry, 0), + expectedErr: true, }, } for _, c := range testCases { t.Run(c.name, func(t *testing.T) { serviceEntryBuilder := ServiceEntryBuilder{ClientCluster: c.clientCluster, RemoteRegistry: rr} serviceEntries, err := serviceEntryBuilder.BuildServiceEntriesFromIdentityConfig(ctxLogger, c.identityConfig) - if err != nil { + if err != nil && !c.expectedErr { t.Errorf("want=%v, \ngot=%v", nil, err) } opts := cmpopts.IgnoreUnexported(networkingV1Alpha3.ServiceEntry{}, networkingV1Alpha3.ServicePort{}, networkingV1Alpha3.WorkloadEntry{}) diff --git a/admiral/pkg/clusters/serviceentry.go b/admiral/pkg/clusters/serviceentry.go index aa01f46a..ba583a5b 100644 --- a/admiral/pkg/clusters/serviceentry.go +++ b/admiral/pkg/clusters/serviceentry.go @@ -1085,6 +1085,7 @@ func modifySidecarForLocalClusterCommunication( sidecar, err := sidecarConfig.IstioClient.NetworkingV1alpha3().Sidecars(sidecarNamespace).Get(ctx, common.GetWorkloadSidecarName(), v12.GetOptions{}) if err != nil { + ctxLogger.Warnf(common.CtxLogFormat, "modifySidecarForLocalClusterCommunication", sourceIdentity, sidecarNamespace, rc.ClusterID, err) return } if sidecar == nil || (sidecar.Spec.Egress == nil) { @@ -1132,12 +1133,40 @@ func addUpdateSidecar(ctxLogger *logrus.Entry, ctx context.Context, obj *v1alpha } _, err = rc.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars(namespace).Update(ctx, obj, v12.UpdateOptions{}) if err != nil { + err = retryUpdatingSidecar(ctxLogger, ctx, obj, exist, namespace, rc, err, "Update") ctxLogger.Infof(LogErrFormat, "Update", "Sidecar", obj.Name, rc.ClusterID, err) } else { ctxLogger.Infof(LogErrFormat, "Update", "Sidecar", obj.Name, rc.ClusterID, "Success") } } +func retryUpdatingSidecar(ctxLogger *logrus.Entry, ctx context.Context, obj *v1alpha3.Sidecar, exist *v1alpha3.Sidecar, namespace string, rc *RemoteController, err error, op string) error { + numRetries := 5 + if err != nil && (k8sErrors.IsConflict(err) || k8sErrors.IsInvalid(err)) { + for i := 0; i < numRetries; i++ { + ctxLogger.Errorf(common.CtxLogFormat, op, obj.Name, obj.Namespace, rc.ClusterID, err.Error()+". retrying sidecar update.") + + updatedSidecar, err := rc.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars(namespace).Get(ctx, exist.Name, v12.GetOptions{}) + // if old sidecar not found, just create a new sidecar instead + if err != nil { + ctxLogger.Infof(common.CtxLogFormat, op, exist.Name, exist.Namespace, rc.ClusterID, err.Error()+fmt.Sprintf(". Error getting old sidecar")) + continue + } + existingResourceVersion := updatedSidecar.GetResourceVersion() + ctxLogger.Infof(common.CtxLogFormat, op, obj.Name, obj.Namespace, rc.ClusterID, fmt.Sprintf("existingResourceVersion=%s resourceVersionUsedForUpdate=%s", updatedSidecar.ResourceVersion, obj.ResourceVersion)) + updatedSidecar.Spec = obj.Spec + updatedSidecar.Annotations = obj.Annotations + updatedSidecar.Labels = obj.Labels + updatedSidecar.SetResourceVersion(existingResourceVersion) + _, err = rc.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars(namespace).Update(ctx, updatedSidecar, v12.UpdateOptions{}) + if err == nil { + return nil + } + } + } + return err +} + func copySidecar(sidecar *v1alpha3.Sidecar) *v1alpha3.Sidecar { newSidecarObj := &v1alpha3.Sidecar{} newSidecarObj.Spec.WorkloadSelector = sidecar.Spec.WorkloadSelector @@ -1226,7 +1255,6 @@ func AddServiceEntriesWithDrWorker( //partitionedIdentity holds the originally passed in identity which could have a partition prefix partitionedIdentity := identityId //identityId is guaranteed to have the non-partitioned identity - // Operator Branch 1: since partition cache will not be filled, return identityId from getNonPartitionedIdentity identityId = getNonPartitionedIdentity(rr.AdmiralCache, identityId) // Operator: When calling this function make a channel with one cluster in it for cluster := range clusters { // TODO log cluster / service entry @@ -1238,6 +1266,10 @@ func AddServiceEntriesWithDrWorker( addSEorDRToAClusterError error ) + if common.IsAdmiralOperatorMode() { + syncNamespace = common.GetOperatorSyncNamespace() + } + rc := rr.GetRemoteController(cluster) if rc == nil { ctxLogger.Warnf(common.CtxLogFormat, "AddServiceEntriesWithDrWorker", "", "", cluster, "remote controller not found for the cluster") @@ -1252,7 +1284,6 @@ func AddServiceEntriesWithDrWorker( } //this get is within the loop to avoid race condition when one event could update destination rule on stale data - // TODO: Operator: Fill these caches in AdmiralCache in shardHandler globalTrafficPolicy, err := cache.GlobalTrafficCache.GetFromIdentity(partitionedIdentity, env) if err != nil { ctxLogger.Errorf(LogErrFormat, "GlobalTrafficCache", "", "", cluster, err.Error()) @@ -1452,12 +1483,11 @@ func AddServiceEntriesWithDrWorker( // build list of gateway clusters gwClusters := []string{} for _, gwAlias := range common.GetGatewayAssetAliases() { - // TODO: Operator fills this cache in produceIdentityConfigs dependents := rr.AdmiralCache.IdentityDependencyCache.Get(partitionedIdentity) if dependents != nil && dependents.Len() > 0 { dependents.Range(func(_ string, dependent string) { if strings.Contains(strings.ToLower(dependent), strings.ToLower(gwAlias)) { - gwClustersMap := getClusters(rr, dependent) + gwClustersMap := getClusters(rr, dependent, ctxLogger) if gwClustersMap != nil { for _, cluster := range gwClustersMap.GetKeys() { gwClusters = append(gwClusters, cluster) @@ -1532,7 +1562,6 @@ func AddServiceEntriesWithDrWorker( } } } - errors <- addSEorDRToAClusterError } } @@ -1547,10 +1576,18 @@ func getClusterRegion(rr *RemoteRegistry, cluster string, rc *RemoteController) return "", fmt.Errorf("failed to get region of cluster %v", cluster) } -func getClusters(rr *RemoteRegistry, dependent string) *common.Map { +func getClusters(rr *RemoteRegistry, dependent string, ctxLogger *logrus.Entry) *common.Map { if common.IsAdmiralOperatorMode() { - // TODO: go through registry client to pull dependent identity clusters and construct map... - return nil + // Any better way than calling service registry here? + dependentIdentityConfig, err := rr.RegistryClient.GetIdentityConfigByIdentityName(dependent, ctxLogger) + if err != nil { + return nil + } + gwClusterMap := common.NewMap() + for _, cluster := range dependentIdentityConfig.Clusters { + gwClusterMap.Put(cluster.Name, cluster.Name) + } + return gwClusterMap } return rr.AdmiralCache.IdentityClusterCache.Get(dependent) } diff --git a/admiral/pkg/clusters/serviceentry_test.go b/admiral/pkg/clusters/serviceentry_test.go index 3795a881..17d1894a 100644 --- a/admiral/pkg/clusters/serviceentry_test.go +++ b/admiral/pkg/clusters/serviceentry_test.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/util" "github.com/istio-ecosystem/admiral/admiral/pkg/registry" registryMocks "github.com/istio-ecosystem/admiral/admiral/pkg/registry/mocks" "github.com/stretchr/testify/mock" @@ -3010,6 +3011,78 @@ func TestModifyExistingSidecarForLocalClusterCommunication(t *testing.T) { } } +func TestRetryUpdatingSidecar(t *testing.T) { + ctxLogger := logrus.WithFields(logrus.Fields{"txId": "abc"}) + setupForServiceEntryTests() + var ( + assetIdentity = "test-identity" + identityNamespace = "test-sidecar-namespace" + sidecarName = "default" + assetHostsList = []string{"test-host"} + sidecar = &v1alpha3.Sidecar{ + ObjectMeta: metav1.ObjectMeta{ + Name: sidecarName, + Namespace: identityNamespace, + ResourceVersion: "12345", + }, + Spec: istioNetworkingV1Alpha3.Sidecar{ + Egress: []*istioNetworkingV1Alpha3.IstioEgressListener{ + { + Hosts: assetHostsList, + }, + }, + }, + } + sidecarController = &istio.SidecarController{} + remoteController = &RemoteController{} + sidecarCacheEgressMap = common.NewSidecarEgressMap() + ) + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + sidecarCacheEgressMap.Put( + assetIdentity, + "test-dependency-namespace", + "test-local-fqdn", + map[string]string{ + "test.myservice.global": "1", + }, + ) + remoteController.SidecarController = sidecarController + sidecarController.IstioClient = istiofake.NewSimpleClientset() + createdSidecar, _ := sidecarController.IstioClient.NetworkingV1alpha3().Sidecars(identityNamespace).Create(context.TODO(), sidecar, metav1.CreateOptions{}) + sidecarEgressMap := make(map[string]common.SidecarEgress) + cnameMap := common.NewMap() + cnameMap.Put("test.myservice.global", "1") + sidecarEgressMap["test-dependency-namespace"] = common.SidecarEgress{Namespace: "test-dependency-namespace", FQDN: "test-local-fqdn", CNAMEs: cnameMap} + newSidecar := copySidecar(createdSidecar) + egressHosts := make(map[string]string) + for _, sidecarEgress := range sidecarEgressMap { + egressHost := sidecarEgress.Namespace + "/" + sidecarEgress.FQDN + egressHosts[egressHost] = egressHost + sidecarEgress.CNAMEs.Range(func(k, v string) { + scopedCname := sidecarEgress.Namespace + "/" + k + egressHosts[scopedCname] = scopedCname + }) + } + for egressHost := range egressHosts { + if !util.Contains(newSidecar.Spec.Egress[0].Hosts, egressHost) { + newSidecar.Spec.Egress[0].Hosts = append(newSidecar.Spec.Egress[0].Hosts, egressHost) + } + } + newSidecarConfig := createSidecarSkeleton(newSidecar.Spec, common.GetWorkloadSidecarName(), identityNamespace) + err := retryUpdatingSidecar(ctxLogger, ctx, newSidecarConfig, createdSidecar, identityNamespace, remoteController, k8sErrors.NewConflict(schema.GroupResource{}, "", nil), "Add") + if err != nil { + t.Errorf("failed to retry updating sidecar, got err: %v", err) + } + updatedSidecar, err := sidecarController.IstioClient.NetworkingV1alpha3().Sidecars("test-sidecar-namespace").Get(ctx, "default", metav1.GetOptions{}) + if err != nil { + t.Errorf("failed to get updated sidecar, got err: %v", err) + } + if updatedSidecar.ResourceVersion != createdSidecar.ResourceVersion { + t.Errorf("resource version check failed, expected %v but got %v", createdSidecar.ResourceVersion, updatedSidecar.ResourceVersion) + } +} + func TestCreateServiceEntry(t *testing.T) { setupForServiceEntryTests() ctxLogger := logrus.WithFields(logrus.Fields{ @@ -9831,6 +9904,40 @@ func TestPartitionAwarenessExportToMultipleRemote(t *testing.T) { } } +func TestGetClusters(t *testing.T) { + admiralParams := admiralParamsForServiceEntryTests() + admiralParams.AdmiralOperatorMode = true + admiralParams.OperatorSecretFilterTags = "admiral/syncoperator" + admiralParams.SecretFilterTags = "admiral/sync" + common.ResetSync() + common.InitializeConfig(admiralParams) + rr, _ := InitAdmiral(context.Background(), admiralParams) + expectedgwClusterMap := common.NewMap() + expectedgwClusterMap.Put("cluster1", "cluster1") + ctxLogger := logrus.WithFields(logrus.Fields{"txId": "abc"}) + testCases := []struct { + name string + dependent string + expectedMap *common.Map + }{ + { + name: "Given that dependent is a valid identity, " + + "When we call getClusters on it, " + + "Then we get a map with the clusters it is deployed on", + dependent: "sample", + expectedMap: expectedgwClusterMap, + }, + } + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + gwClusterMap := getClusters(rr, c.dependent, ctxLogger) + if !reflect.DeepEqual(gwClusterMap, c.expectedMap) { + t.Errorf("got=%+v, want %+v", gwClusterMap, c.expectedMap) + } + }) + } +} + func TestStateSyncerConfiguration(t *testing.T) { var ( env = "test" diff --git a/admiral/pkg/clusters/shard_handler.go b/admiral/pkg/clusters/shard_handler.go index 5264bd88..c4a924b4 100644 --- a/admiral/pkg/clusters/shard_handler.go +++ b/admiral/pkg/clusters/shard_handler.go @@ -9,12 +9,9 @@ import ( admiralapiv1 "github.com/istio-ecosystem/admiral-api/pkg/apis/admiral/v1" "github.com/istio-ecosystem/admiral/admiral/pkg/registry" - log "github.com/sirupsen/logrus" - k8sErrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + log "github.com/sirupsen/logrus" ) type ShardHandler struct { @@ -85,6 +82,7 @@ func HandleEventForShard(ctx context.Context, event admiral.EventType, obj *admi // ProduceIdentityConfigsFromShard creates a registry client and uses it to get the identity configs // of the assets on the shard, and puts those into configWriterData which go into the job channel func ProduceIdentityConfigsFromShard(ctxLogger *log.Entry, shard admiralapiv1.Shard, configWriterData chan<- *ConfigWriterData, rr *RemoteRegistry, producerWG *sync.WaitGroup) { + cnames := make(map[string]string) for _, clusterShard := range shard.Spec.Clusters { for _, identityItem := range clusterShard.Identities { identityConfig, err := rr.RegistryClient.GetIdentityConfigByIdentityName(identityItem.Name, ctxLogger) @@ -92,12 +90,37 @@ func ProduceIdentityConfigsFromShard(ctxLogger *log.Entry, shard admiralapiv1.Sh ctxLogger.Warnf(common.CtxLogFormat, "ProduceIdentityConfig", identityItem.Name, shard.Namespace, clusterShard.Name, err) } ctxLogger.Infof(common.CtxLogFormat, "ProduceIdentityConfig", identityConfig.IdentityName, shard.Namespace, clusterShard.Name, "successfully produced IdentityConfig") - //TODO: Fill rr.AdmiralCache - //1. IdentityDependencyCache (identityConfig.IdentityName -> clientAssets) - //2. GlobalTrafficCache (id + env -> gtp) - //3. OutlierDetectionCache (id + env -> od) - //4. ClientConnectionConfigCache (id + env -> ccc) - //5. ClusterLocalityCache (cluster -> cluster -> locality) (don't care about set functionality, only one locality per cluster) + // Fill the IdentityDependencyCache + for _, clientAsset := range identityConfig.ClientAssets { + rr.AdmiralCache.IdentityDependencyCache.Put(identityConfig.IdentityName, clientAsset, clientAsset) + } + // Fill the GTP, OD, and CCC caches + for _, identityConfigCluster := range identityConfig.Clusters { + for _, identityConfigEnv := range identityConfigCluster.Environment { + err = rr.AdmiralCache.GlobalTrafficCache.Put(&identityConfigEnv.TrafficPolicy.GlobalTrafficPolicy) + if err != nil { + ctxLogger.Warnf(common.CtxLogFormat, "ProduceIdentityConfigGTPPut", identityItem.Name, shard.Namespace, clusterShard.Name, err) + } + err = rr.AdmiralCache.OutlierDetectionCache.Put(&identityConfigEnv.TrafficPolicy.OutlierDetection) + if err != nil { + ctxLogger.Warnf(common.CtxLogFormat, "ProduceIdentityConfigODPut", identityItem.Name, shard.Namespace, clusterShard.Name, err) + } + err = rr.AdmiralCache.ClientConnectionConfigCache.Put(&identityConfigEnv.TrafficPolicy.ClientConnectionConfig) + if err != nil { + ctxLogger.Warnf(common.CtxLogFormat, "ProduceIdentityConfigCCCPut", identityItem.Name, shard.Namespace, clusterShard.Name, err) + } + // Fill the DependencyNamespaceCache + for _, clientAsset := range identityConfig.ClientAssets { + //TODO: How to deal with multiple services here? + cname := common.GetCnameVal([]string{identityConfigEnv.Name, strings.ToLower(identityConfig.IdentityName), common.GetHostnameSuffix()}) + cnames[cname] = "1" + localFqdn := identityConfigEnv.ServiceName + common.Sep + identityConfigEnv.Namespace + common.GetLocalDomainSuffix() + rr.AdmiralCache.DependencyNamespaceCache.Put(clientAsset, identityConfigEnv.Namespace, localFqdn, cnames) + } + } + // Fill the ClusterLocalityCache + rr.AdmiralCache.ClusterLocalityCache.Put(identityConfigCluster.Name, identityConfigCluster.Name, identityConfigCluster.Locality) + } configWriterData <- &ConfigWriterData{ IdentityConfig: &identityConfig, ClusterName: clusterShard.Name, @@ -118,58 +141,65 @@ func ConsumeIdentityConfigs(ctxLogger *log.Entry, ctx context.Context, configWri assetName := identityConfig.IdentityName clientCluster := data.ClusterName ctxLogger.Infof(common.CtxLogFormat, "ConsumeIdentityConfig", assetName, "", clientCluster, "starting to consume identityConfig") - //TODO: doesn't make much sense to have this as a struct, easier to just pass in the cluster and remote registry serviceEntryBuilder := ServiceEntryBuilder{ClientCluster: clientCluster, RemoteRegistry: rr} serviceEntries, err := serviceEntryBuilder.BuildServiceEntriesFromIdentityConfig(ctxLogger, *identityConfig) if err != nil { ctxLogger.Warnf(common.CtxLogFormat, "ConsumeIdentityConfig", assetName, "", clientCluster, err) data.Result = err.Error() } - // service deployed in cluster 1 with 2 env qal, e2e, cluster 2 with 3 env qal, e2e, prod - // write SEs to cluster 1 - // env -> list of cluster - // env -> se - // check if any of the clusters are a source cluster -> rethink this, won't work if one env is on a cluster but not on another - //isServiceEntryModifyCalledForSourceCluster := false - //for _, cluster := range identityConfig.Clusters { - // if cluster.Name == clientCluster { - // isServiceEntryModifyCalledForSourceCluster = true - // break - // } - //} - //ctx = context.WithValue(ctx, common.EventResourceType, identityConfig.Clusters[0].Environment[0].Type) - for _, se := range serviceEntries { - //clusters := make(chan string, 1) - //errors := make(chan error, 1) - //clusters <- clientCluster - //AddServiceEntriesWithDrWorker(ctxLogger, ctx, rr, - // true, //doGenerateAdditionalEndpoints() - // isServiceEntryModifyCalledForSourceCluster, - // assetName, - // strings.Split(se.Hosts[0], common.Sep)[0], - // se, - // clusters, - // errors) - rc := rr.GetRemoteController(clientCluster) - seName := strings.ToLower(se.Hosts[0]) + "-se" - sec := rc.ServiceEntryController - //TODO: se reconciliation cache - oldServiceEntry := sec.Cache.Get(seName, clientCluster) - if oldServiceEntry == nil { - ctxLogger.Infof(common.CtxLogFormat, "ConsumeIdentityConfig", seName, "", clientCluster, "starting to write se to cluster") - oldServiceEntry, err = rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(common.GetOperatorSyncNamespace()).Get(ctx, seName, metav1.GetOptions{}) - // if old service entry not find, just create a new service entry instead - if err != nil && k8sErrors.IsNotFound(err) { - ctxLogger.Infof(common.CtxLogFormat, "ConsumeIdentityConfig", seName, "", clientCluster, fmt.Sprintf("failed fetching old service entry, error=%v", err)) - oldServiceEntry = nil + isServiceEntryModifyCalledForSourceCluster := false + sourceClusterEnvironmentNamespaces := map[string]string{} + for _, cluster := range identityConfig.Clusters { + if cluster.Name == clientCluster { + isServiceEntryModifyCalledForSourceCluster = true + for _, environment := range cluster.Environment { + sourceClusterEnvironmentNamespaces[environment.Name] = environment.Namespace } + break } - newServiceEntry := createServiceEntrySkeleton(*se, seName, common.GetOperatorSyncNamespace()) - err = addUpdateServiceEntry(ctxLogger, ctx, newServiceEntry, oldServiceEntry, common.GetOperatorSyncNamespace(), rc) + } + // Get any type from the identityConfig + for _, cv := range identityConfig.Clusters { + for _, ev := range cv.Environment { + ctx = context.WithValue(ctx, common.EventResourceType, ev.Type) + break + } + break + } + for _, se := range serviceEntries { + isServiceEntryModifyCalledForSourceClusterAndEnv := false + env := strings.Split(se.Hosts[0], common.Sep)[0] + if _, ok := sourceClusterEnvironmentNamespaces[env]; ok && isServiceEntryModifyCalledForSourceCluster { + isServiceEntryModifyCalledForSourceClusterAndEnv = true + } + clusters := make(chan string, 1) + errors := make(chan error, 1) + go AddServiceEntriesWithDrWorker(ctxLogger, ctx, rr, + true, //TODO: doGenerateAdditionalEndpoints() + isServiceEntryModifyCalledForSourceClusterAndEnv, + assetName, + strings.Split(se.Hosts[0], common.Sep)[0], + copyServiceEntry(se), + clusters, + errors) + clusters <- clientCluster + close(clusters) + err := <-errors if err != nil { - ctxLogger.Warnf(common.CtxLogFormat, "ConsumeIdentityConfig", seName, "", clientCluster, err) + ctxLogger.Warnf(common.CtxLogFormat, "ConsumeIdentityConfig", strings.ToLower(se.Hosts[0])+"-se", "", clientCluster, err) data.Result = err.Error() } + if isServiceEntryModifyCalledForSourceClusterAndEnv { + ctxLogger.Infof(common.CtxLogFormat, "ConsumeIdentityConfig", strings.ToLower(se.Hosts[0])+"-se", "", clientCluster, "modifying Sidecar for local cluster communication") + err = modifySidecarForLocalClusterCommunication( + ctxLogger, + ctx, sourceClusterEnvironmentNamespaces[env], assetName, + rr.AdmiralCache.DependencyNamespaceCache, rr.GetRemoteController(clientCluster)) + if err != nil { + ctxLogger.Errorf(common.CtxLogFormat, "modifySidecarForLocalClusterCommunication", + assetName, sourceClusterEnvironmentNamespaces[env], "", err) + } + } } configWriterDataResults <- data } diff --git a/admiral/pkg/clusters/shard_handler_test.go b/admiral/pkg/clusters/shard_handler_test.go index 25f2bf7c..2f21ab53 100644 --- a/admiral/pkg/clusters/shard_handler_test.go +++ b/admiral/pkg/clusters/shard_handler_test.go @@ -4,12 +4,18 @@ import ( "context" "encoding/json" "fmt" + "github.com/golang/protobuf/ptypes/duration" + "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/testing/protocmp" + "google.golang.org/protobuf/types/known/wrapperspb" + "istio.io/client-go/pkg/apis/networking/v1alpha3" "sync" "testing" admiralapiv1 "github.com/istio-ecosystem/admiral-api/pkg/apis/admiral/v1" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/istio" + "github.com/istio-ecosystem/admiral/admiral/pkg/test" "github.com/sirupsen/logrus" istioNetworkingV1Alpha3 "istio.io/api/networking/v1alpha3" istiofake "istio.io/client-go/pkg/clientset/versioned/fake" @@ -23,10 +29,14 @@ func setupForShardTests() common.AdmiralParams { admiralParams := admiralParamsForServiceEntryTests() admiralParams.EnableAbsoluteFQDN = true admiralParams.EnableAbsoluteFQDNForLocalEndpoints = true + admiralParams.DisableIPGeneration = true admiralParams.SANPrefix = "pre-prod.api.org.com" admiralParams.ExportToMaxNamespaces = 35 admiralParams.AdmiralOperatorMode = true - admiralParams.OperatorSyncNamespace = "shard-namespace" + admiralParams.OperatorSyncNamespace = "admiral-operator-sync" + admiralParams.AdditionalEndpointSuffixes = []string{"org"} + admiralParams.SecretFilterTags = "admiral/sync" + admiralParams.OperatorSecretFilterTags = "admiral/syncoperator" shardTestSingleton.Do(func() { common.ResetSync() initHappened = true @@ -40,6 +50,28 @@ func setupForShardTests() common.AdmiralParams { return admiralParams } +func createRemoteControllerForShardTests(cluster string) *RemoteController { + return &RemoteController{ + ClusterID: cluster, + ServiceEntryController: &istio.ServiceEntryController{ + IstioClient: istiofake.NewSimpleClientset(), + Cache: istio.NewServiceEntryCache(), + }, + DestinationRuleController: &istio.DestinationRuleController{ + IstioClient: istiofake.NewSimpleClientset(), + Cache: istio.NewDestinationRuleCache(), + }, + VirtualServiceController: &istio.VirtualServiceController{ + IstioClient: istiofake.NewSimpleClientset(), + VirtualServiceHandler: &test.MockVirtualServiceHandler{}, + }, + SidecarController: &istio.SidecarController{ + IstioClient: istiofake.NewSimpleClientset(), + SidecarHandler: &test.MockSidecarHandler{}, + }, + } +} + func createMockShard(shardName string, clusterName string, identityName string, identityEnv string) *admiralapiv1.Shard { identityItem := admiralapiv1.IdentityItem{ Name: identityName, @@ -78,70 +110,141 @@ func jsonPrint(v any) string { func TestShardHandler_Added(t *testing.T) { admiralParams := setupForShardTests() rr, _ := InitAdmiralOperator(context.Background(), admiralParams) - rc1 := &RemoteController{ - ClusterID: "cluster1", - ServiceEntryController: &istio.ServiceEntryController{ - IstioClient: istiofake.NewSimpleClientset(), - Cache: istio.NewServiceEntryCache(), - }, - } - rc2 := &RemoteController{ - ClusterID: "cluster-usw2-k8s", - ServiceEntryController: &istio.ServiceEntryController{ - IstioClient: istiofake.NewSimpleClientset(), - Cache: istio.NewServiceEntryCache(), - }, - } + rr.AdmiralDatabaseClient = &mockDatabaseClientWithError{} + rc1 := createRemoteControllerForShardTests("cluster1") + rc2 := createRemoteControllerForShardTests("cluster-usw2-k8s") rr.PutRemoteController("cluster1", rc1) rr.PutRemoteController("cluster-usw2-k8s", rc2) - sampleShard1 := createMockShard("shard-sample", "cluster1", "sample", "e2e") + //sampleShard1 := createMockShard("shard-sample", "cluster1", "sample", "e2e") sampleShard2 := createMockShard("blackhole-shard", "cluster-usw2-k8s", "ppdmeshtestblackhole", "ppd") - shardHandler := &ShardHandler{ - RemoteRegistry: rr, - } - se1 := &istioNetworkingV1Alpha3.ServiceEntry{ - Hosts: []string{"e2e.sample.mesh"}, - Ports: []*istioNetworkingV1Alpha3.ServicePort{{Number: 80, Protocol: "http", Name: "http"}}, - Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, - Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, - Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{{Address: "app-1-spk-root-service.ns-1-usw2-e2e.svc.cluster.local.", Ports: map[string]uint32{"http": 8090}, Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "rollout"}, Locality: "us-west-2"}}, - ExportTo: []string{common.NamespaceIstioSystem, "ns-1-usw2-e2e", "ns-1-usw2-prf", "ns-1-usw2-qal"}, - SubjectAltNames: []string{"spiffe://pre-prod.api.org.com/sample"}, + shardHandler := &ShardHandler{RemoteRegistry: rr} + defaultSidecar := &v1alpha3.Sidecar{ + ObjectMeta: v1.ObjectMeta{ + Name: "default", + }, + Spec: istioNetworkingV1Alpha3.Sidecar{ + Egress: []*istioNetworkingV1Alpha3.IstioEgressListener{ + { + Hosts: []string{"./*", common.NamespaceIstioSystem + "/*", common.GetOperatorSyncNamespace() + "/*"}, + }, + }, + }, } + //se1 := &istioNetworkingV1Alpha3.ServiceEntry{ + // Hosts: []string{"testDnsPrefix.e2e.sample.mesh"}, + // Ports: []*istioNetworkingV1Alpha3.ServicePort{{Number: 80, Protocol: "http", Name: "http"}}, + // Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, + // Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, + // Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ + // {Address: "app-1-spk-root-service.ns-1-usw2-e2e.svc.cluster.local.", + // Ports: map[string]uint32{"http": 8090}, + // Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "rollout"}, + // Locality: "us-west-2"}}, + // ExportTo: []string{common.NamespaceIstioSystem, "ns-1-usw2-e2e", "ns-1-usw2-prf", "ns-1-usw2-qal"}, + // SubjectAltNames: []string{"spiffe://pre-prod.api.org.com/sample"}, + //} se2 := &istioNetworkingV1Alpha3.ServiceEntry{ - Hosts: []string{"ppd.ppdmeshtestblackhole.mesh"}, + Hosts: []string{"testDnsPrefix.ppd.ppdmeshtestblackhole.mesh"}, Ports: []*istioNetworkingV1Alpha3.ServicePort{{Number: 80, Protocol: "http", Name: "http"}}, Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ - {Address: "abc-elb.us-east-2.elb.amazonaws.com.", Ports: map[string]uint32{"http": 15443}, Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "deployment"}, Locality: "us-east-2"}, + {Address: "abc-elb.us-east-2.elb.amazonaws.com.", + Ports: map[string]uint32{"http": 15443}, + Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "deployment"}, + Locality: "us-east-2"}, }, ExportTo: []string{common.NamespaceIstioSystem, "services-inboundd268-usw2-dev"}, SubjectAltNames: []string{"spiffe://pre-prod.api.org.com/ppdmeshtestblackhole"}, } + sidecar2 := &istioNetworkingV1Alpha3.Sidecar{ + Egress: []*istioNetworkingV1Alpha3.IstioEgressListener{ + { + Hosts: []string{"./*", common.NamespaceIstioSystem + "/*", common.GetOperatorSyncNamespace() + "/*"}, + }, + }, + } + rc1.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars("ns-1-usw2-prf").Create(context.Background(), defaultSidecar, v1.CreateOptions{}) + rc1.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars("ns-1-usw2-e2e").Create(context.Background(), defaultSidecar, v1.CreateOptions{}) + rc1.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars("ns-1-usw2-qal").Create(context.Background(), defaultSidecar, v1.CreateOptions{}) + rc2.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars("services-blackholed268-usw2-dev").Create(context.Background(), defaultSidecar, v1.CreateOptions{}) + dr2 := &istioNetworkingV1Alpha3.DestinationRule{ + Host: "ppd.ppdmeshtestblackhole.mesh", + TrafficPolicy: &istioNetworkingV1Alpha3.TrafficPolicy{ + LoadBalancer: &istioNetworkingV1Alpha3.LoadBalancerSettings{ + LbPolicy: &istioNetworkingV1Alpha3.LoadBalancerSettings_Simple{ + Simple: istioNetworkingV1Alpha3.LoadBalancerSettings_LEAST_REQUEST, + }, + WarmupDurationSecs: &duration.Duration{Seconds: 0}, + }, + ConnectionPool: &istioNetworkingV1Alpha3.ConnectionPoolSettings{ + Http: &istioNetworkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{ + Http2MaxRequests: 1000, + MaxRequestsPerConnection: 5, + }, + }, + OutlierDetection: &istioNetworkingV1Alpha3.OutlierDetection{ + ConsecutiveGatewayErrors: wrapperspb.UInt32(10), + Consecutive_5XxErrors: wrapperspb.UInt32(0), + Interval: &duration.Duration{Seconds: 10}, + BaseEjectionTime: &duration.Duration{Seconds: 0}, + MaxEjectionPercent: 33, + }, + Tls: &istioNetworkingV1Alpha3.ClientTLSSettings{ + Mode: istioNetworkingV1Alpha3.ClientTLSSettings_ISTIO_MUTUAL, + }, + }, + ExportTo: []string{common.NamespaceIstioSystem, "services-inboundd268-usw2-dev"}, + } + vs2 := &istioNetworkingV1Alpha3.VirtualService{ + Hosts: []string{"ppd.ppdmeshtestblackhole.org"}, + Http: []*istioNetworkingV1Alpha3.HTTPRoute{ + {Route: []*istioNetworkingV1Alpha3.HTTPRouteDestination{ + {Destination: &istioNetworkingV1Alpha3.Destination{ + Host: "ppd.ppdmeshtestblackhole.mesh", + Port: &istioNetworkingV1Alpha3.PortSelector{Number: 80}, + }, + }, + }, + }, + }, + } testCases := []struct { - name string - rc *RemoteController - shard *admiralapiv1.Shard - expectedSEName string - expectedSE *istioNetworkingV1Alpha3.ServiceEntry + name string + rc *RemoteController + shard *admiralapiv1.Shard + expectedSEName string + expectedSE *istioNetworkingV1Alpha3.ServiceEntry + expectedSidecarNS string + expectedSidecar *istioNetworkingV1Alpha3.Sidecar + expectedDRName string + expectedDR *istioNetworkingV1Alpha3.DestinationRule + expectedVSName string + expectedVS *istioNetworkingV1Alpha3.VirtualService }{ - { - name: "Given the server asset we want to write resources for is deployed on the client cluster " + - "And it is a client of itself " + - "Then an SE with local endpoint and istio-system in exportTo should be built", - rc: rc1, - shard: sampleShard1, - expectedSEName: "e2e.sample.mesh-se", - expectedSE: se1, - }, + //{ + // name: "Given the server asset we want to write resources for is deployed on the client cluster " + + // "And it is a client of itself " + + // "Then an SE with local endpoint and istio-system in exportTo should be built", + // rc: rc1, + // shard: sampleShard1, + // expectedSEName: "testdnsprefix.e2e.sample.mesh-se", + // expectedSE: se1, + // expectedSidecarNS: "ns-1-usw2-e2e", + //}, { name: "Given the server asset we want to write resources for is deployed on a remote cluster in env A and a client cluster in env B" + "Then an SE with only remote endpoint and istio-system in exportTo should be built for env B", - rc: rc2, - shard: sampleShard2, - expectedSEName: "ppd.ppdmeshtestblackhole.mesh-se", - expectedSE: se2, + rc: rc2, + shard: sampleShard2, + expectedSEName: "testdnsprefix.ppd.ppdmeshtestblackhole.mesh-se", + expectedSE: se2, + expectedSidecarNS: "services-blackholed268-usw2-dev", + expectedSidecar: sidecar2, + expectedDRName: "ppd.ppdmeshtestblackhole.mesh-default-dr", + expectedDR: dr2, + expectedVSName: "ppd.ppdmeshtestblackhole.org-vs", + expectedVS: vs2, }, //TODO: Given the server asset we want to write resources for is deployed remotely and locally in the same env, se should have local and remote endpoint and istio-system } @@ -150,17 +253,48 @@ func TestShardHandler_Added(t *testing.T) { t.Run(tt.name, func(t *testing.T) { shErr := shardHandler.Added(context.Background(), tt.shard) if shErr != nil { - t.Errorf("failed to produce SE with err: %v", shErr) + t.Errorf("failed to handle Shard with err: %v", shErr) } + // Check that expected SE matches the produced SE actualSE, seErr := tt.rc.ServiceEntryController.IstioClient.NetworkingV1alpha3().ServiceEntries(common.GetOperatorSyncNamespace()).Get(context.Background(), tt.expectedSEName, v1.GetOptions{}) if seErr != nil { - t.Errorf("failed to get SE with err %v", seErr) + t.Errorf("failed to get ServiceEntry with err %v", seErr) } if actualSE == nil { - t.Errorf("expected se to not be nil") + t.Errorf("expected ServiceEntry to not be nil") } else if !compareServiceEntries(&actualSE.Spec, tt.expectedSE) { t.Errorf("got=%v, want=%v", jsonPrint(actualSE.Spec), jsonPrint(tt.expectedSE)) } + // Check that the expected Sidecar matches the produced Sidecar + actualSidecar, sidecarErr := tt.rc.SidecarController.IstioClient.NetworkingV1alpha3().Sidecars(tt.expectedSidecarNS).Get(context.Background(), common.GetWorkloadSidecarName(), v1.GetOptions{}) + if actualSidecar == nil { + t.Errorf("expected Sidecar to not be nil") + } else if !cmp.Equal(&actualSidecar.Spec, tt.expectedSidecar, protocmp.Transform()) { + t.Errorf("got=%v, want=%v", jsonPrint(actualSidecar), jsonPrint(tt.expectedSidecar)) + } + if sidecarErr != nil { + t.Errorf("failed to get Sidecar with err %v", sidecarErr) + } + // Check that the expected DR matches the produced DR + actualDR, drErr := tt.rc.DestinationRuleController.IstioClient.NetworkingV1alpha3().DestinationRules(common.GetOperatorSyncNamespace()).Get(context.Background(), tt.expectedDRName, v1.GetOptions{}) + if actualDR == nil { + t.Errorf("expected DestinationRule to not be nil") + } else if !cmp.Equal(&actualDR.Spec, tt.expectedDR, protocmp.Transform()) { + t.Errorf("got=%v, want=%v", jsonPrint(actualDR), jsonPrint(tt.expectedDR)) + } + if drErr != nil { + t.Errorf("failed to get DestinationRule with err %v", drErr) + } + // Check that the expected VS matches the produced VS + actualVS, vsErr := tt.rc.VirtualServiceController.IstioClient.NetworkingV1alpha3().VirtualServices(common.GetOperatorSyncNamespace()).Get(context.Background(), tt.expectedVSName, v1.GetOptions{}) + if actualVS == nil { + t.Errorf("expected VirtualService to not be nil") + } else if !cmp.Equal(&actualVS.Spec, tt.expectedVS, protocmp.Transform()) { + t.Errorf("got=%v, want=%v", jsonPrint(actualVS), jsonPrint(tt.expectedVS)) + } + if vsErr != nil { + t.Errorf("failed to get VirtualService with err %v", vsErr) + } }) } } diff --git a/admiral/pkg/clusters/testdata/ppdmeshtestblackholeIdentityConfiguration.json b/admiral/pkg/clusters/testdata/ppdmeshtestblackholeIdentityConfiguration.json index 41f40fd2..b794c288 100644 --- a/admiral/pkg/clusters/testdata/ppdmeshtestblackholeIdentityConfiguration.json +++ b/admiral/pkg/clusters/testdata/ppdmeshtestblackholeIdentityConfiguration.json @@ -37,7 +37,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "ppdmeshtestblackhole" + }, + "annotations": { + "env": "prod" + } }, "spec": { "connectionPool": { @@ -51,7 +57,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "ppdmeshtestblackhole" + }, + "annotations": { + "env": "prod" + } }, "spec": { "policy": [ @@ -77,7 +89,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "ppdmeshtestblackhole" + }, + "annotations": { + "env": "prod" + } }, "spec": { "outlier_config": { @@ -126,7 +144,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "ppdmeshtestblackhole" + }, + "annotations": { + "env": "ppd" + } }, "spec": { "connectionPool": { @@ -140,7 +164,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "ppdmeshtestblackhole" + }, + "annotations": { + "env": "ppd" + } }, "spec": { "policy": [ @@ -166,7 +196,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "ppdmeshtestblackhole" + }, + "annotations": { + "env": "ppd" + } }, "spec": { "outlier_config": { diff --git a/admiral/pkg/clusters/testdata/ppdmeshtestinboundsIdentityConfiguration.json b/admiral/pkg/clusters/testdata/ppdmeshtestinboundsIdentityConfiguration.json index 90d353b2..bc564a40 100644 --- a/admiral/pkg/clusters/testdata/ppdmeshtestinboundsIdentityConfiguration.json +++ b/admiral/pkg/clusters/testdata/ppdmeshtestinboundsIdentityConfiguration.json @@ -37,7 +37,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "ppdmeshtestinbounds" + }, + "annotations": { + "env": "ppd" + } }, "spec": { "connectionPool": { @@ -51,7 +57,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "ppdmeshtestinbounds" + }, + "annotations": { + "env": "ppd" + } }, "spec": { "policy": [ @@ -77,7 +89,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "ppdmeshtestinbounds" + }, + "annotations": { + "env": "ppd" + } }, "spec": { "outlier_config": { diff --git a/admiral/pkg/clusters/testdata/sampleIdentityConfiguration.json b/admiral/pkg/clusters/testdata/sampleIdentityConfiguration.json index 7bb4029f..59f6eebb 100644 --- a/admiral/pkg/clusters/testdata/sampleIdentityConfiguration.json +++ b/admiral/pkg/clusters/testdata/sampleIdentityConfiguration.json @@ -37,7 +37,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "prf" + } }, "spec": { "connectionPool": { @@ -51,7 +57,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "prf" + } }, "spec": { "policy": [ @@ -77,7 +89,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "prf" + } }, "spec": { "outlier_config": { @@ -115,7 +133,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "e2e" + } }, "spec": { "connectionPool": { @@ -129,7 +153,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "e2e" + } }, "spec": { "policy": [ @@ -155,7 +185,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "e2e" + } }, "spec": { "outlier_config": { @@ -193,7 +229,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "qal" + } }, "spec": { "connectionPool": { @@ -207,7 +249,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "qal" + } }, "spec": { "policy": [ @@ -233,7 +281,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "qal" + } }, "spec": { "outlier_config": { @@ -248,6 +302,6 @@ } }, "clientAssets": { - "sample": "sample" + "sample": "sample" } } \ No newline at end of file diff --git a/admiral/pkg/clusters/types.go b/admiral/pkg/clusters/types.go index 99c61500..135cf82a 100644 --- a/admiral/pkg/clusters/types.go +++ b/admiral/pkg/clusters/types.go @@ -188,6 +188,7 @@ func NewRemoteRegistry(ctx context.Context, params common.AdmiralParams) *Remote if common.IsAdmiralOperatorMode() { rr.RegistryClient = registry.NewRegistryClient(registry.WithRegistryEndpoint("PLACEHOLDER")) + rr.AdmiralCache.ClusterLocalityCache = common.NewMapOfMaps() } return rr diff --git a/admiral/pkg/controller/common/config.go b/admiral/pkg/controller/common/config.go index cee4c07f..fff85022 100644 --- a/admiral/pkg/controller/common/config.go +++ b/admiral/pkg/controller/common/config.go @@ -482,3 +482,9 @@ func GetShardIdentityLabelValueSet() (string, string) { defer wrapper.RUnlock() return wrapper.params.LabelSet.ShardIdentityLabel, wrapper.params.ShardIdentityValue } + +func GetOperatorSecretFilterTags() string { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.OperatorSecretFilterTags +} diff --git a/admiral/pkg/controller/common/config_test.go b/admiral/pkg/controller/common/config_test.go index a4be2f55..eb22ce05 100644 --- a/admiral/pkg/controller/common/config_test.go +++ b/admiral/pkg/controller/common/config_test.go @@ -43,6 +43,7 @@ func setupForConfigTests() { ExportToMaxNamespaces: 35, AdmiralOperatorMode: false, OperatorSyncNamespace: "admiral-sync", + OperatorSecretFilterTags: "admiral/syncoperator", } ResetSync() initHappened = true @@ -162,6 +163,10 @@ func TestConfigManagement(t *testing.T) { if GetOperatorSyncNamespace() != "admiral-sync" { t.Errorf("operator sync namespace mismatch, expected admiral-sync, got %v", GetOperatorSyncNamespace()) } + + if GetOperatorSecretFilterTags() != "admiral/syncoperator" { + t.Errorf("operator secret filter tags mismatch, expected admiral/syncoperator, got %s", GetOperatorSecretFilterTags()) + } } func TestGetCRDIdentityLabelWithCRDIdentity(t *testing.T) { diff --git a/admiral/pkg/controller/common/types.go b/admiral/pkg/controller/common/types.go index 375e75d9..5be6c8b3 100644 --- a/admiral/pkg/controller/common/types.go +++ b/admiral/pkg/controller/common/types.go @@ -118,6 +118,7 @@ type AdmiralParams struct { AdmiralStateSyncerMode bool OperatorIdentityValue string ShardIdentityValue string + OperatorSecretFilterTags string } func (b AdmiralParams) String() string { diff --git a/admiral/pkg/registry/configCache.go b/admiral/pkg/registry/configCache.go index dc9e41eb..f077f099 100644 --- a/admiral/pkg/registry/configCache.go +++ b/admiral/pkg/registry/configCache.go @@ -52,7 +52,7 @@ func writeToFileLocal(writer ConfigWriter, config *IdentityConfig) error { return err } pathName := currentDir + "/testdata/" + shortAlias[len(shortAlias)-1] + "IdentityConfiguration.json" - if common.GetSecretFilterTags() == "admiral/syncrtay" { + if common.GetSecretFilterTags() == common.GetOperatorSecretFilterTags() && common.GetOperatorSecretFilterTags() != "" { pathName = "/etc/serviceregistry/config/" + shortAlias[len(shortAlias)-1] + "IdentityConfiguration.json" } fmt.Printf("[state syncer] file path=%s", pathName) diff --git a/admiral/pkg/registry/configSyncer.go b/admiral/pkg/registry/configSyncer.go index a21c656c..6fe548a1 100644 --- a/admiral/pkg/registry/configSyncer.go +++ b/admiral/pkg/registry/configSyncer.go @@ -11,7 +11,6 @@ import ( const ( serviceRegistryIdentityConfigMapName = "service-registry-identityconfig" admiralQaNs = "services-admiral-use2-qal" - secretLabel = "admiral/syncrtay" ) type ConfigSyncer interface { diff --git a/admiral/pkg/registry/configWriter.go b/admiral/pkg/registry/configWriter.go index 25d964a0..90e60ebe 100644 --- a/admiral/pkg/registry/configWriter.go +++ b/admiral/pkg/registry/configWriter.go @@ -38,7 +38,7 @@ func (c *configMapWriter) Write(name string, data []byte) error { err error ) defer util.LogElapsedTimeForTask(c.ctxLogger, task, name, "", "", "processingTime")() - if common.GetSecretFilterTags() != secretLabel { + if common.GetSecretFilterTags() != common.GetOperatorSecretFilterTags() || common.GetOperatorSecretFilterTags() == "" { c.ctxLogger.Infof(common.CtxLogFormat, task, name, "", "", "writing to local file") return writeToFile(name, data) } diff --git a/admiral/pkg/registry/registry.go b/admiral/pkg/registry/registry.go index e566262e..a6047776 100644 --- a/admiral/pkg/registry/registry.go +++ b/admiral/pkg/registry/registry.go @@ -55,7 +55,7 @@ func (c *registryClient) GetIdentityConfigByIdentityName(identityAlias string, c func readIdentityConfigFromFile(shortAlias []string) ([]byte, error) { pathName := "testdata/" + shortAlias[len(shortAlias)-1] + "IdentityConfiguration.json" - if common.GetSecretFilterTags() == "admiral/syncrtay" { + if common.GetSecretFilterTags() == common.GetOperatorSecretFilterTags() && common.GetOperatorSyncNamespace() != "" { pathName = "/etc/serviceregistry/config/" + shortAlias[len(shortAlias)-1] + "IdentityConfiguration.json" } return os.ReadFile(pathName) diff --git a/admiral/pkg/registry/registry_test.go b/admiral/pkg/registry/registry_test.go index ecc0a99c..84bdb9da 100644 --- a/admiral/pkg/registry/registry_test.go +++ b/admiral/pkg/registry/registry_test.go @@ -16,7 +16,7 @@ import ( ) func TestParseIdentityConfigJSON(t *testing.T) { - identityConfig := GetSampleIdentityConfig() + identityConfig := GetSampleIdentityConfig("sample") testCases := []struct { name string identityConfig IdentityConfig @@ -47,7 +47,7 @@ func TestParseIdentityConfigJSON(t *testing.T) { } func TestIdentityConfigGetByIdentityName(t *testing.T) { - sampleIdentityConfig := GetSampleIdentityConfig() + sampleIdentityConfig := GetSampleIdentityConfig("sample") registryClient := NewRegistryClient(WithRegistryEndpoint("endpoint")) var jsonErr *json.SyntaxError ctxLogger := log.WithContext(context.Background()) @@ -93,7 +93,7 @@ func TestIdentityConfigGetByIdentityName(t *testing.T) { } func TestGetIdentityConfigByClusterName(t *testing.T) { - sampleIdentityConfig := GetSampleIdentityConfig() + sampleIdentityConfig := GetSampleIdentityConfig("sample") registryClient := NewRegistryClient(WithRegistryEndpoint("endpoint")) var jsonErr *json.SyntaxError ctxLogger := log.WithContext(context.Background()) diff --git a/admiral/pkg/registry/testdata/sampleIdentityConfiguration.json b/admiral/pkg/registry/testdata/sampleIdentityConfiguration.json index 1da11848..59f6eebb 100644 --- a/admiral/pkg/registry/testdata/sampleIdentityConfiguration.json +++ b/admiral/pkg/registry/testdata/sampleIdentityConfiguration.json @@ -37,7 +37,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "prf" + } }, "spec": { "connectionPool": { @@ -51,7 +57,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "prf" + } }, "spec": { "policy": [ @@ -77,7 +89,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "prf" + } }, "spec": { "outlier_config": { @@ -115,7 +133,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "e2e" + } }, "spec": { "connectionPool": { @@ -129,7 +153,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "e2e" + } }, "spec": { "policy": [ @@ -155,7 +185,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "e2e" + } }, "spec": { "outlier_config": { @@ -193,7 +229,13 @@ "trafficPolicy": { "clientConnectionConfig": { "metadata": { - "name": "sampleCCC" + "name": "sampleCCC", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "qal" + } }, "spec": { "connectionPool": { @@ -207,7 +249,13 @@ }, "globalTrafficPolicy": { "metadata": { - "name": "sampleGTP" + "name": "sampleGTP", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "qal" + } }, "spec": { "policy": [ @@ -233,7 +281,13 @@ }, "outlierDetection": { "metadata": { - "name": "sampleOD" + "name": "sampleOD", + "labels": { + "identity": "sample" + }, + "annotations": { + "env": "qal" + } }, "spec": { "outlier_config": { diff --git a/admiral/pkg/registry/testutils.go b/admiral/pkg/registry/testutils.go index 46085bcd..df3fed29 100644 --- a/admiral/pkg/registry/testutils.go +++ b/admiral/pkg/registry/testutils.go @@ -7,13 +7,13 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func GetSampleIdentityConfigEnvironment(env string, namespace string) *IdentityConfigEnvironment { +func GetSampleIdentityConfigEnvironment(env string, namespace string, identity string) *IdentityConfigEnvironment { identityConfigEnvironment := &IdentityConfigEnvironment{ Name: env, Namespace: namespace, ServiceName: "app-1-spk-root-service", Services: map[string]*RegistryServiceConfig{ - "app-1-spk-root-service": &RegistryServiceConfig{ + "app-1-spk-root-service": { Name: "app-1-spk-root-service", Weight: -1, Ports: map[string]uint32{ @@ -27,7 +27,9 @@ func GetSampleIdentityConfigEnvironment(env string, namespace string) *IdentityC TrafficPolicy: TrafficPolicy{ ClientConnectionConfig: v1alpha1.ClientConnectionConfig{ ObjectMeta: v1.ObjectMeta{ - Name: "sampleCCC", + Name: "sampleCCC", + Labels: map[string]string{"identity": identity}, + Annotations: map[string]string{"env": env}, }, Spec: v1alpha1.ClientConnectionConfigSpec{ ConnectionPool: model.ConnectionPool{Http: &model.ConnectionPool_HTTP{ @@ -39,7 +41,9 @@ func GetSampleIdentityConfigEnvironment(env string, namespace string) *IdentityC }, GlobalTrafficPolicy: v1alpha1.GlobalTrafficPolicy{ ObjectMeta: v1.ObjectMeta{ - Name: "sampleGTP", + Name: "sampleGTP", + Labels: map[string]string{"identity": identity}, + Annotations: map[string]string{"env": env}, }, Spec: model.GlobalTrafficPolicy{ Policy: []*model.TrafficPolicy{ @@ -67,7 +71,9 @@ func GetSampleIdentityConfigEnvironment(env string, namespace string) *IdentityC }, OutlierDetection: v1alpha1.OutlierDetection{ ObjectMeta: v1.ObjectMeta{ - Name: "sampleOD", + Name: "sampleOD", + Labels: map[string]string{"identity": identity}, + Annotations: map[string]string{"env": env}, }, Spec: model.OutlierDetection{ OutlierConfig: &model.OutlierConfig{ @@ -82,10 +88,10 @@ func GetSampleIdentityConfigEnvironment(env string, namespace string) *IdentityC return identityConfigEnvironment } -func GetSampleIdentityConfig() IdentityConfig { - prfEnv := GetSampleIdentityConfigEnvironment("prf", "ns-1-usw2-prf") - e2eEnv := GetSampleIdentityConfigEnvironment("e2e", "ns-1-usw2-e2e") - qalEnv := GetSampleIdentityConfigEnvironment("qal", "ns-1-usw2-qal") +func GetSampleIdentityConfig(identity string) IdentityConfig { + prfEnv := GetSampleIdentityConfigEnvironment("prf", "ns-1-usw2-prf", identity) + e2eEnv := GetSampleIdentityConfigEnvironment("e2e", "ns-1-usw2-e2e", identity) + qalEnv := GetSampleIdentityConfigEnvironment("qal", "ns-1-usw2-qal", identity) environments := map[string]*IdentityConfigEnvironment{ "prf": prfEnv, "e2e": e2eEnv, @@ -103,7 +109,7 @@ func GetSampleIdentityConfig() IdentityConfig { Environment: environments, } identityConfig := IdentityConfig{ - IdentityName: "sample", + IdentityName: identity, Clusters: map[string]*IdentityConfigCluster{ "cluster1": &cluster}, ClientAssets: clientAssets, From 0331095faaed8fa7249626ead38c0f0c09ea1b59 Mon Sep 17 00:00:00 2001 From: Adil Fulara Date: Tue, 13 Aug 2024 17:23:43 -0700 Subject: [PATCH 6/9] MESH-5373: Node locality is missing occasionally. - Add support for update event on NodeController Signed-off-by: Adil Fulara Signed-off-by: Ryan Tay --- admiral/pkg/controller/admiral/node.go | 22 +++++++++++++------ admiral/pkg/controller/admiral/node_test.go | 24 +++++++-------------- 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/admiral/pkg/controller/admiral/node.go b/admiral/pkg/controller/admiral/node.go index a19a43a7..0d261ce6 100644 --- a/admiral/pkg/controller/admiral/node.go +++ b/admiral/pkg/controller/admiral/node.go @@ -57,19 +57,29 @@ func NewNodeController(stopCh <-chan struct{}, handler NodeHandler, config *rest return &nodeController, nil } -func (p *NodeController) Added(ctx context.Context, obj interface{}) error { +func process(ctx context.Context, obj interface{}) (string, error) { node, ok := obj.(*k8sV1.Node) if !ok { - return fmt.Errorf("type assertion failed, %v is not of type *v1.Node", obj) + return "", fmt.Errorf("type assertion failed, %v is not of type *v1.Node", obj) } - if p.Locality == nil { - p.Locality = &Locality{Region: common.GetNodeLocality(node)} + return common.GetNodeLocality(node), nil +} + +func (d *NodeController) Added(ctx context.Context, obj interface{}) error { + region, err := process(ctx, obj) + if err != nil { + return err } + d.Locality = &Locality{Region: region} return nil } -func (p *NodeController) Updated(ctx context.Context, obj interface{}, oldObj interface{}) error { - //ignore +func (d *NodeController) Updated(ctx context.Context, obj interface{}, oldObj interface{}) error { + region, err := process(ctx, obj) + if err != nil { + return err + } + d.Locality = &Locality{Region: region} return nil } diff --git a/admiral/pkg/controller/admiral/node_test.go b/admiral/pkg/controller/admiral/node_test.go index 502b3a7b..2048b581 100644 --- a/admiral/pkg/controller/admiral/node_test.go +++ b/admiral/pkg/controller/admiral/node_test.go @@ -102,31 +102,23 @@ func TestNodeAddUpdateDelete(t *testing.T) { if nodeController == nil { t.Errorf("Node controller should never be nil without an error thrown") + return } region := "us-west-2" nodeObj := &k8sV1.Node{Spec: k8sV1.NodeSpec{}, ObjectMeta: v1.ObjectMeta{Labels: map[string]string{common.NodeRegionLabel: region}}} ctx := context.Background() - nodeController.Added(ctx, nodeObj) + _ = nodeController.Added(ctx, nodeObj) + assert.Equal(t, "us-west-2", nodeController.Locality.Region, "region expected %v, got: %v", region, nodeController.Locality.Region) - locality := nodeController.Locality + nodeObj.Labels[common.NodeRegionLabel] = "us-east-2" + _ = nodeController.Updated(ctx, nodeObj, nodeObj) + assert.Equal(t, "us-east-2", nodeController.Locality.Region, "region expected %v, got: %v", region, nodeController.Locality.Region) - if locality.Region != region { - t.Errorf("region expected %v, got: %v", region, locality.Region) - } - - nodeController.Updated(ctx, nodeObj, nodeObj) - //update should make no difference - if locality.Region != region { - t.Errorf("region expected %v, got: %v", region, locality.Region) - } - - nodeController.Deleted(ctx, nodeObj) + _ = nodeController.Deleted(ctx, nodeObj) //delete should make no difference - if locality.Region != region { - t.Errorf("region expected %v, got: %v", region, locality.Region) - } + assert.Equal(t, "us-east-2", nodeController.Locality.Region, "region expected %v, got: %v", region, nodeController.Locality.Region) } // TODO: This is just a placeholder for when we add diff check for other types From 5cd8ec53c12e4a1fadd9a2514d34256f455e5bbe Mon Sep 17 00:00:00 2001 From: Ryan Tay Date: Thu, 15 Aug 2024 15:05:33 -0700 Subject: [PATCH 7/9] address comments Signed-off-by: Ryan Tay --- admiral/cmd/admiral/cmd/root.go | 8 ++++---- admiral/pkg/controller/admiral/shard.go | 14 +++++++------- admiral/pkg/controller/admiral/shard_test.go | 3 +++ admiral/pkg/controller/common/config.go | 8 ++++---- admiral/pkg/controller/common/types.go | 14 +++++++------- 5 files changed, 25 insertions(+), 22 deletions(-) diff --git a/admiral/cmd/admiral/cmd/root.go b/admiral/cmd/admiral/cmd/root.go index 7bf02701..a1c23548 100644 --- a/admiral/cmd/admiral/cmd/root.go +++ b/admiral/cmd/admiral/cmd/root.go @@ -246,10 +246,10 @@ func GetRootCmd(args []string) *cobra.Command { rootCmd.PersistentFlags().BoolVar(¶ms.AdmiralOperatorMode, "admiral_operator_mode", false, "Enable/Disable admiral operator functionality") rootCmd.PersistentFlags().StringVar(¶ms.OperatorSyncNamespace, "operator_sync_namespace", "admiral-operator-sync", "Namespace in which Admiral Operator will put its generated configurations") - rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.OperatorIdentityLabel, "operator_identity_label", "admiral.io/operatorIdentity", "used to filter which shard Admiral Operator will watch") - rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.ShardIdentityLabel, "shard_identity_label", "admiral.io/shardIdentity", "used to filter which shard Admiral Operator will watch") - rootCmd.PersistentFlags().StringVar(¶ms.OperatorIdentityValue, "operator_identity_value", "", "Admiral operator should watch shards where operatorIdentityLabel == operatorIdentityValue") - rootCmd.PersistentFlags().StringVar(¶ms.ShardIdentityValue, "shard_identity_value", "", "Admiral operator should watch shards where shardIdentityLabel == shardIdentityValue") + rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.OperatorIdentityLabelKey, "operator_identity_label_key", "admiral.io/operatorIdentity", "used to filter which shard Admiral Operator will watch") + rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.ShardIdentityLabelKey, "shard_identity_label_key", "admiral.io/shardIdentity", "used to filter which shard Admiral Operator will watch") + rootCmd.PersistentFlags().StringVar(¶ms.OperatorIdentityValue, "operator_identity_value", "", "Admiral operator should watch shards where operatorIdentityLabelKey == operatorIdentityValue") + rootCmd.PersistentFlags().StringVar(¶ms.ShardIdentityValue, "shard_identity_value", "", "Admiral operator should watch shards where shardIdentityLabelKey == shardIdentityValue") rootCmd.PersistentFlags().StringVar(¶ms.OperatorSecretFilterTags, "operator_secret_filter_tags", "admiral/syncoperator", "Filter tags for the specific admiral operator namespace secret to watch") return rootCmd diff --git a/admiral/pkg/controller/admiral/shard.go b/admiral/pkg/controller/admiral/shard.go index 2aa81803..a0745fcb 100644 --- a/admiral/pkg/controller/admiral/shard.go +++ b/admiral/pkg/controller/admiral/shard.go @@ -149,13 +149,13 @@ func NewShardController(stopCh <-chan struct{}, handler ShardHandler, configPath if err != nil { return nil, fmt.Errorf("failed to create shard controller crd client: %v", err) } - //labelOptions := informers.WithTweakListOptions(func(opts *metav1.ListOptions) { - // opIdLabel, opIdValue := common.GetOperatorIdentityLabelValueSet() - // shardIdLabel, shardIdValue := common.GetShardIdentityLabelValueSet() - // opts.LabelSelector = fmt.Sprintf("%s=%s, %s=%s", opIdLabel, opIdValue, shardIdLabel, shardIdValue) - //}) - //informerFactory := informers.NewSharedInformerFactoryWithOptions(shardController.K8sClient, resyncPeriod, labelOptions) - informerFactory := informers.NewSharedInformerFactoryWithOptions(shardController.K8sClient, resyncPeriod) + labelOptions := informers.WithTweakListOptions(func(opts *metav1.ListOptions) { + opIdLabel, opIdValue := common.GetOperatorIdentityLabelKeyValueSet() + shardIdLabel, shardIdValue := common.GetShardIdentityLabelKeyValueSet() + opts.LabelSelector = fmt.Sprintf("%s=%s, %s=%s", opIdLabel, opIdValue, shardIdLabel, shardIdValue) + }) + informerFactory := informers.NewSharedInformerFactoryWithOptions(shardController.K8sClient, resyncPeriod, labelOptions) + //informerFactory := informers.NewSharedInformerFactoryWithOptions(shardController.K8sClient, resyncPeriod) informerFactory.Start(stopCh) shardController.informer = v1.NewShardInformer(shardController.CrdClient, namespace, diff --git a/admiral/pkg/controller/admiral/shard_test.go b/admiral/pkg/controller/admiral/shard_test.go index 67e4e816..379588bd 100644 --- a/admiral/pkg/controller/admiral/shard_test.go +++ b/admiral/pkg/controller/admiral/shard_test.go @@ -42,6 +42,9 @@ func GetMockShard() *admiralapiv1.Shard { LastUpdatedTime: v1.Time{}, }, } + opIdLabel, opIdValue := common.GetOperatorIdentityLabelKeyValueSet() + shardIdLabel, shardIdValue := common.GetShardIdentityLabelKeyValueSet() + shard.Labels = map[string]string{opIdLabel: opIdValue, shardIdLabel: shardIdValue} return &shard } diff --git a/admiral/pkg/controller/common/config.go b/admiral/pkg/controller/common/config.go index fff85022..8be9caca 100644 --- a/admiral/pkg/controller/common/config.go +++ b/admiral/pkg/controller/common/config.go @@ -471,16 +471,16 @@ func GetOperatorSyncNamespace() string { return wrapper.params.OperatorSyncNamespace } -func GetOperatorIdentityLabelValueSet() (string, string) { +func GetOperatorIdentityLabelKeyValueSet() (string, string) { wrapper.RLock() defer wrapper.RUnlock() - return wrapper.params.LabelSet.OperatorIdentityLabel, wrapper.params.OperatorIdentityValue + return wrapper.params.LabelSet.OperatorIdentityLabelKey, wrapper.params.OperatorIdentityValue } -func GetShardIdentityLabelValueSet() (string, string) { +func GetShardIdentityLabelKeyValueSet() (string, string) { wrapper.RLock() defer wrapper.RUnlock() - return wrapper.params.LabelSet.ShardIdentityLabel, wrapper.params.ShardIdentityValue + return wrapper.params.LabelSet.ShardIdentityLabelKey, wrapper.params.ShardIdentityValue } func GetOperatorSecretFilterTags() string { diff --git a/admiral/pkg/controller/common/types.go b/admiral/pkg/controller/common/types.go index 5be6c8b3..4d213e8a 100644 --- a/admiral/pkg/controller/common/types.go +++ b/admiral/pkg/controller/common/types.go @@ -113,11 +113,11 @@ type AdmiralParams struct { GatewayAssetAliases []string //Admiral 2.0 params - AdmiralOperatorMode bool - OperatorSyncNamespace string - AdmiralStateSyncerMode bool - OperatorIdentityValue string - ShardIdentityValue string + AdmiralOperatorMode bool + OperatorSyncNamespace string + AdmiralStateSyncerMode bool + OperatorIdentityValue string + ShardIdentityValue string OperatorSecretFilterTags string } @@ -157,8 +157,8 @@ type LabelSet struct { GatewayApp string //the value for `app` key that will be used to fetch the loadblancer for cross cluster calls, also referred to as east west gateway AdmiralCRDIdentityLabel string //Label Used to identify identity label for crd IdentityPartitionKey string //Label used for partitioning assets with same identity into groups - OperatorIdentityLabel string - ShardIdentityLabel string + OperatorIdentityLabelKey string + ShardIdentityLabelKey string } type TrafficObject struct { From f86c135bebc9e7e2a4faa7ba7c21b546e64da973 Mon Sep 17 00:00:00 2001 From: Ryan Tay Date: Thu, 15 Aug 2024 15:05:54 -0700 Subject: [PATCH 8/9] remove dead code Signed-off-by: Ryan Tay --- admiral/pkg/controller/admiral/shard.go | 1 - 1 file changed, 1 deletion(-) diff --git a/admiral/pkg/controller/admiral/shard.go b/admiral/pkg/controller/admiral/shard.go index a0745fcb..1af0c215 100644 --- a/admiral/pkg/controller/admiral/shard.go +++ b/admiral/pkg/controller/admiral/shard.go @@ -155,7 +155,6 @@ func NewShardController(stopCh <-chan struct{}, handler ShardHandler, configPath opts.LabelSelector = fmt.Sprintf("%s=%s, %s=%s", opIdLabel, opIdValue, shardIdLabel, shardIdValue) }) informerFactory := informers.NewSharedInformerFactoryWithOptions(shardController.K8sClient, resyncPeriod, labelOptions) - //informerFactory := informers.NewSharedInformerFactoryWithOptions(shardController.K8sClient, resyncPeriod) informerFactory.Start(stopCh) shardController.informer = v1.NewShardInformer(shardController.CrdClient, namespace, From afd5f8a16d3bf791b67329a792eff89ed24148d8 Mon Sep 17 00:00:00 2001 From: Ryan Tay Date: Fri, 16 Aug 2024 09:58:48 -0700 Subject: [PATCH 9/9] fix key value Signed-off-by: Ryan Tay --- admiral/cmd/admiral/cmd/root.go | 1 - admiral/pkg/clusters/shard_handler.go | 4 ---- admiral/pkg/controller/admiral/shard.go | 6 ++++-- admiral/pkg/controller/admiral/shard_test.go | 4 ++-- admiral/pkg/controller/common/config.go | 4 ++-- admiral/pkg/controller/common/types.go | 1 - 6 files changed, 8 insertions(+), 12 deletions(-) diff --git a/admiral/cmd/admiral/cmd/root.go b/admiral/cmd/admiral/cmd/root.go index a1c23548..b9fd1e7a 100644 --- a/admiral/cmd/admiral/cmd/root.go +++ b/admiral/cmd/admiral/cmd/root.go @@ -246,7 +246,6 @@ func GetRootCmd(args []string) *cobra.Command { rootCmd.PersistentFlags().BoolVar(¶ms.AdmiralOperatorMode, "admiral_operator_mode", false, "Enable/Disable admiral operator functionality") rootCmd.PersistentFlags().StringVar(¶ms.OperatorSyncNamespace, "operator_sync_namespace", "admiral-operator-sync", "Namespace in which Admiral Operator will put its generated configurations") - rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.OperatorIdentityLabelKey, "operator_identity_label_key", "admiral.io/operatorIdentity", "used to filter which shard Admiral Operator will watch") rootCmd.PersistentFlags().StringVar(¶ms.LabelSet.ShardIdentityLabelKey, "shard_identity_label_key", "admiral.io/shardIdentity", "used to filter which shard Admiral Operator will watch") rootCmd.PersistentFlags().StringVar(¶ms.OperatorIdentityValue, "operator_identity_value", "", "Admiral operator should watch shards where operatorIdentityLabelKey == operatorIdentityValue") rootCmd.PersistentFlags().StringVar(¶ms.ShardIdentityValue, "shard_identity_value", "", "Admiral operator should watch shards where shardIdentityLabelKey == shardIdentityValue") diff --git a/admiral/pkg/clusters/shard_handler.go b/admiral/pkg/clusters/shard_handler.go index c4a924b4..fde2999a 100644 --- a/admiral/pkg/clusters/shard_handler.go +++ b/admiral/pkg/clusters/shard_handler.go @@ -35,10 +35,6 @@ func (sh *ShardHandler) Added(ctx context.Context, obj *admiralapiv1.Shard) erro } func (sh *ShardHandler) Deleted(ctx context.Context, obj *admiralapiv1.Shard) error { - //err := HandleEventForShard(ctx, admiral.Delete, obj, sh.RemoteRegistry) - //if err != nil { - // return fmt.Errorf(LogErrFormat, common.Delete, common.ShardResourceType, obj.Name, "", err) - //} ctxLogger := common.GetCtxLogger(ctx, obj.Name, "") ctxLogger.Warnf(common.CtxLogFormat, "ShardHandlerDeleted", obj.Name, obj.Namespace, "", "", "Shard object was deleted") return nil diff --git a/admiral/pkg/controller/admiral/shard.go b/admiral/pkg/controller/admiral/shard.go index 1af0c215..d25d65e3 100644 --- a/admiral/pkg/controller/admiral/shard.go +++ b/admiral/pkg/controller/admiral/shard.go @@ -18,6 +18,8 @@ import ( "time" ) +const OperatorIdentityLabelKey = "admiral.io/operatorIdentity" + type ShardHandler interface { Added(ctx context.Context, obj *admiralapiv1.Shard) error Deleted(ctx context.Context, obj *admiralapiv1.Shard) error @@ -150,9 +152,9 @@ func NewShardController(stopCh <-chan struct{}, handler ShardHandler, configPath return nil, fmt.Errorf("failed to create shard controller crd client: %v", err) } labelOptions := informers.WithTweakListOptions(func(opts *metav1.ListOptions) { - opIdLabel, opIdValue := common.GetOperatorIdentityLabelKeyValueSet() + opIdValue := common.GetOperatorIdentityLabelValue() shardIdLabel, shardIdValue := common.GetShardIdentityLabelKeyValueSet() - opts.LabelSelector = fmt.Sprintf("%s=%s, %s=%s", opIdLabel, opIdValue, shardIdLabel, shardIdValue) + opts.LabelSelector = fmt.Sprintf("%s=%s, %s=%s", OperatorIdentityLabelKey, opIdValue, shardIdLabel, shardIdValue) }) informerFactory := informers.NewSharedInformerFactoryWithOptions(shardController.K8sClient, resyncPeriod, labelOptions) informerFactory.Start(stopCh) diff --git a/admiral/pkg/controller/admiral/shard_test.go b/admiral/pkg/controller/admiral/shard_test.go index 379588bd..52818a39 100644 --- a/admiral/pkg/controller/admiral/shard_test.go +++ b/admiral/pkg/controller/admiral/shard_test.go @@ -42,9 +42,9 @@ func GetMockShard() *admiralapiv1.Shard { LastUpdatedTime: v1.Time{}, }, } - opIdLabel, opIdValue := common.GetOperatorIdentityLabelKeyValueSet() + opIdValue := common.GetOperatorIdentityLabelValue() shardIdLabel, shardIdValue := common.GetShardIdentityLabelKeyValueSet() - shard.Labels = map[string]string{opIdLabel: opIdValue, shardIdLabel: shardIdValue} + shard.Labels = map[string]string{OperatorIdentityLabelKey: opIdValue, shardIdLabel: shardIdValue} return &shard } diff --git a/admiral/pkg/controller/common/config.go b/admiral/pkg/controller/common/config.go index 8be9caca..55154690 100644 --- a/admiral/pkg/controller/common/config.go +++ b/admiral/pkg/controller/common/config.go @@ -471,10 +471,10 @@ func GetOperatorSyncNamespace() string { return wrapper.params.OperatorSyncNamespace } -func GetOperatorIdentityLabelKeyValueSet() (string, string) { +func GetOperatorIdentityLabelValue() string { wrapper.RLock() defer wrapper.RUnlock() - return wrapper.params.LabelSet.OperatorIdentityLabelKey, wrapper.params.OperatorIdentityValue + return wrapper.params.OperatorIdentityValue } func GetShardIdentityLabelKeyValueSet() (string, string) { diff --git a/admiral/pkg/controller/common/types.go b/admiral/pkg/controller/common/types.go index 4d213e8a..661480c5 100644 --- a/admiral/pkg/controller/common/types.go +++ b/admiral/pkg/controller/common/types.go @@ -157,7 +157,6 @@ type LabelSet struct { GatewayApp string //the value for `app` key that will be used to fetch the loadblancer for cross cluster calls, also referred to as east west gateway AdmiralCRDIdentityLabel string //Label Used to identify identity label for crd IdentityPartitionKey string //Label used for partitioning assets with same identity into groups - OperatorIdentityLabelKey string ShardIdentityLabelKey string }