diff --git a/admiral/pkg/controller/common/config.go b/admiral/pkg/controller/common/config.go index 6cf0c732..7e460dd4 100644 --- a/admiral/pkg/controller/common/config.go +++ b/admiral/pkg/controller/common/config.go @@ -1,33 +1,47 @@ package common import ( - "github.com/istio-ecosystem/admiral/admiral/pkg/monitoring" + "strings" + "sync" "time" + "github.com/istio-ecosystem/admiral/admiral/pkg/monitoring" + "github.com/istio-ecosystem/admiral/admiral/pkg/util" "github.com/matryer/resync" log "github.com/sirupsen/logrus" ) -var admiralParams = AdmiralParams{ - LabelSet: &LabelSet{}, +type admiralParamsWrapper struct { + params AdmiralParams + sync.RWMutex + resync.Once } -var once resync.Once +// Singleton +var wrapper = admiralParamsWrapper{ + params: AdmiralParams{ + LabelSet: &LabelSet{}, + }, +} func ResetSync() { - once.Reset() + wrapper.Reset() } func InitializeConfig(params AdmiralParams) { var initHappened = false - once.Do(func() { - admiralParams = params - initHappened = true - InitializeMetrics() + wrapper.Do(func() { + wrapper.Lock() + defer wrapper.Unlock() + wrapper.params = params + if wrapper.params.LabelSet == nil { + wrapper.params.LabelSet = &LabelSet{} + } err := monitoring.InitializeMonitoring() if err != nil { log.Errorf("failed to setup monitoring: %v", err) } + initHappened = true }) if initHappened { log.Info("InitializeConfig was called.") @@ -37,108 +51,410 @@ func InitializeConfig(params AdmiralParams) { } func GetAdmiralParams() AdmiralParams { - return admiralParams + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params +} + +func GetAdmiralProfile() string { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.Profile } func GetArgoRolloutsEnabled() bool { - return admiralParams.ArgoRolloutsEnabled + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.ArgoRolloutsEnabled +} + +func GetSecretFilterTags() string { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.SecretFilterTags } func GetKubeconfigPath() string { - return admiralParams.KubeconfigPath + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.KubeconfigPath } func GetCacheRefreshDuration() time.Duration { - return admiralParams.CacheRefreshDuration + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.CacheReconcileDuration } func GetClusterRegistriesNamespace() string { - return admiralParams.ClusterRegistriesNamespace + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.ClusterRegistriesNamespace } func GetDependenciesNamespace() string { - return admiralParams.DependenciesNamespace + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.DependenciesNamespace } func GetSyncNamespace() string { - return admiralParams.SyncNamespace + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.SyncNamespace } func GetEnableSAN() bool { - return admiralParams.EnableSAN + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.EnableSAN } func GetSANPrefix() string { - return admiralParams.SANPrefix + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.SANPrefix } -func GetSecretResolver() string { - return admiralParams.SecretResolver +func GetAdmiralConfigPath() string { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.AdmiralConfig } func GetLabelSet() *LabelSet { - return admiralParams.LabelSet + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.LabelSet } func GetAdditionalEndpointSuffixes() []string { - return admiralParams.AdditionalEndpointSuffixes + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.AdditionalEndpointSuffixes } func GetAdditionalEndpointLabelFilters() []string { - return admiralParams.AdditionalEndpointLabelFilters + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.AdditionalEndpointLabelFilters +} + +func GetEnableWorkloadDataStorage() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.EnableWorkloadDataStorage } func GetHostnameSuffix() string { - return admiralParams.HostnameSuffix + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.HostnameSuffix } func GetWorkloadIdentifier() string { - return admiralParams.LabelSet.WorkloadIdentityKey + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.LabelSet.WorkloadIdentityKey } -func GetGlobalTrafficDeploymentLabel() string { - return admiralParams.LabelSet.GlobalTrafficDeploymentLabel +func GetPartitionIdentifier() string { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.LabelSet.IdentityPartitionKey +} + +func GetTrafficConfigIdentifier() string { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.LabelSet.TrafficConfigIdentityKey +} + +func GetAdmiralCRDIdentityLabel() string { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.LabelSet.AdmiralCRDIdentityLabel } func GetRoutingPolicyLabel() string { - return admiralParams.LabelSet.WorkloadIdentityKey + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.LabelSet.WorkloadIdentityKey } func GetWorkloadSidecarUpdate() string { - return admiralParams.WorkloadSidecarUpdate + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.WorkloadSidecarUpdate } -func GetEnvoyFilterVersion() string { - return admiralParams.EnvoyFilterVersion +func GetEnvoyFilterVersion() []string { + wrapper.RLock() + defer wrapper.RUnlock() + if len(strings.TrimSpace(wrapper.params.EnvoyFilterVersion)) == 0 { + return []string{} + } + return strings.Split(wrapper.params.EnvoyFilterVersion, ",") +} + +func GetDeprecatedEnvoyFilterVersion() []string { + wrapper.RLock() + defer wrapper.RUnlock() + if len(strings.TrimSpace(wrapper.params.DeprecatedEnvoyFilterVersion)) == 0 { + return []string{} + } + return strings.Split(wrapper.params.DeprecatedEnvoyFilterVersion, ",") } func GetEnvoyFilterAdditionalConfig() string { - return admiralParams.EnvoyFilterAdditionalConfig + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.EnvoyFilterAdditionalConfig } func GetEnableRoutingPolicy() bool { - return admiralParams.EnableRoutingPolicy + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.EnableRoutingPolicy } func GetWorkloadSidecarName() string { - return admiralParams.WorkloadSidecarName + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.WorkloadSidecarName } func GetEnvKey() string { - return admiralParams.LabelSet.EnvKey + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.LabelSet.EnvKey } func GetMetricsEnabled() bool { - return admiralParams.MetricsEnabled + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.MetricsEnabled } -///Setters - be careful +func IsPersonaTrafficConfig() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.TrafficConfigPersona +} + +// This function is used to determine if a feature is enabled or not. +// If the feature is not present in the list, it is assumed to be enabled. +// Also any value other than "disabled" is assumed to be enabled. +func IsCartographerFeatureDisabled(featureName string) bool { + wrapper.RLock() + defer wrapper.RUnlock() + + if wrapper.params.CartographerFeatures == nil { + return false + } + // If the feature exists in the list and is set to disabled, return true + if val, ok := wrapper.params.CartographerFeatures[featureName]; ok { + return val == "disabled" + } else { + return false + } +} + +func IsDefaultPersona() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return !wrapper.params.TrafficConfigPersona +} + +func GetHAMode() string { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.HAMode +} + +func GetDiffCheckEnabled() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.EnableDiffCheck +} + +func IsProxyEnvoyFilterEnabled() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.EnableProxyEnvoyFilter +} + +func IsDependencyProcessingEnabled() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.EnableDependencyProcessing +} + +func GetSeAddressConfigMap() string { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.SeAddressConfigmap +} + +func DeploymentOrRolloutWorkerConcurrency() int { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.DeploymentOrRolloutWorkerConcurrency +} + +func DependentClusterWorkerConcurrency() int { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.DependentClusterWorkerConcurrency +} + +func DependencyWarmupMultiplier() int { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.DependencyWarmupMultiplier +} + +func MaxRequestsPerConnection() int32 { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.MaxRequestsPerConnection +} + +func IsAbsoluteFQDNEnabled() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.EnableAbsoluteFQDN +} + +func IsClientConnectionConfigProcessingEnabled() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.EnableClientConnectionConfigProcessing +} + +func IsAbsoluteFQDNEnabledForLocalEndpoints() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.EnableAbsoluteFQDNForLocalEndpoints +} + +func DisableDefaultAutomaticFailover() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.DisableDefaultAutomaticFailover +} + +func EnableServiceEntryCache() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.EnableServiceEntryCache +} + +func EnableDestinationRuleCache() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.EnableDestinationRuleCache +} + +func AlphaIdentityList() []string { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.AlphaIdentityList +} func SetKubeconfigPath(path string) { - admiralParams.KubeconfigPath = path + wrapper.Lock() + defer wrapper.Unlock() + wrapper.params.KubeconfigPath = path } -// for unit test only func SetEnablePrometheus(value bool) { - admiralParams.MetricsEnabled = value + wrapper.Lock() + defer wrapper.Unlock() + wrapper.params.MetricsEnabled = value +} + +func SetArgoRolloutsEnabled(value bool) { + wrapper.Lock() + defer wrapper.Unlock() + wrapper.params.ArgoRolloutsEnabled = value +} + +func SetCartographerFeature(featureName string, val string) { + wrapper.Lock() + defer wrapper.Unlock() + if wrapper.params.CartographerFeatures == nil { + wrapper.params.CartographerFeatures = make(map[string]string) + } + wrapper.params.CartographerFeatures[featureName] = val +} + +func GetGatewayAssetAliases() []string { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.GatewayAssetAliases +} + +func DisableIPGeneration() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.DisableIPGeneration +} + +func EnableActivePassive() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.EnableActivePassive +} + +func EnableExportTo(identityOrCname string) bool { + wrapper.RLock() + defer wrapper.RUnlock() + if wrapper.params.ExportToIdentityList != nil { + for _, identity := range wrapper.params.ExportToIdentityList { + if identity != "" && (identity == "*" || strings.Contains(strings.ToLower(identityOrCname), strings.ToLower(identity))) && wrapper.params.EnableSWAwareNSCaches { + return true + } + } + } + return false +} + +func EnableSWAwareNSCaches() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.EnableSWAwareNSCaches +} + +func DoSyncIstioResourcesToSourceClusters() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.EnableSyncIstioResourcesToSourceClusters +} + +func GetResyncIntervals() util.ResyncIntervals { + wrapper.RLock() + defer wrapper.RUnlock() + return util.ResyncIntervals{ + UniversalReconcileInterval: wrapper.params.CacheReconcileDuration, + SeAndDrReconcileInterval: wrapper.params.SeAndDrCacheReconcileDuration, + } +} + +func GetExportToMaxNamespaces() int { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.ExportToMaxNamespaces +} + +func IsAdmiralStateSyncerMode() bool { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.AdmiralStateSyncerMode +} + +func GetDefaultWarmupDurationSecs() int64 { + wrapper.RLock() + defer wrapper.RUnlock() + return wrapper.params.DefaultWarmupDurationSecs } diff --git a/admiral/pkg/controller/common/config_test.go b/admiral/pkg/controller/common/config_test.go index 23a11be2..ce271aaf 100644 --- a/admiral/pkg/controller/common/config_test.go +++ b/admiral/pkg/controller/common/config_test.go @@ -1,42 +1,60 @@ package common import ( + "sync" "testing" "time" + + "github.com/stretchr/testify/assert" + + log "github.com/sirupsen/logrus" ) -func TestConfigManagement(t *testing.T) { +var configTestSingleton sync.Once + +func setupForConfigTests() { + var initHappened bool + configTestSingleton.Do(func() { + p := AdmiralParams{ + KubeconfigPath: "testdata/fake.config", + LabelSet: &LabelSet{ + WorkloadIdentityKey: "identity", + AdmiralCRDIdentityLabel: "identity", + IdentityPartitionKey: "admiral.io/identityPartition", + }, + EnableSAN: true, + SANPrefix: "prefix", + HostnameSuffix: "mesh", + SyncNamespace: "admiral-sync", + SecretFilterTags: "admiral/sync", + CacheReconcileDuration: time.Minute, + ClusterRegistriesNamespace: "default", + DependenciesNamespace: "default", + Profile: "default", + WorkloadSidecarName: "default", + WorkloadSidecarUpdate: "disabled", + MetricsEnabled: true, + DeprecatedEnvoyFilterVersion: "1.10,1.17", + EnvoyFilterVersion: "1.10,1.13,1.17", + CartographerFeatures: map[string]string{"throttle_filter_gen": "disabled"}, + DisableIPGeneration: false, + EnableSWAwareNSCaches: true, + ExportToIdentityList: []string{"*"}, + ExportToMaxNamespaces: 35, + } + ResetSync() + initHappened = true + InitializeConfig(p) + }) + if !initHappened { + log.Warn("InitializeConfig was NOT called from setupForConfigTests") + } else { + log.Info("InitializeConfig was called setupForConfigTests") + } +} - //Initial state comes from the init method in configInitializer.go - //p := AdmiralParams{ - // KubeconfigPath: "testdata/fake.config", - // LabelSet: &LabelSet{}, - // EnableSAN: true, - // SANPrefix: "prefix", - // HostnameSuffix: "mesh", - // SyncNamespace: "ns", - //} - // - //p.LabelSet.WorkloadIdentityKey="identity" - - //trying to initialize again. If the singleton pattern works, none of these will have changed - p := AdmiralParams{ - KubeconfigPath: "DIFFERENT", - LabelSet: &LabelSet{}, - EnableSAN: false, - SANPrefix: "BAD_PREFIX", - HostnameSuffix: "NOT_MESH", - SyncNamespace: "NOT_A_NAMESPACE", - CacheRefreshDuration: time.Hour, - ClusterRegistriesNamespace: "NOT_DEFAULT", - DependenciesNamespace: "NOT_DEFAULT", - SecretResolver: "INSECURE_RESOLVER", - } - - p.LabelSet.WorkloadIdentityKey = "BAD_LABEL" - p.LabelSet.GlobalTrafficDeploymentLabel = "ANOTHER_BAD_LABEL" - - InitializeConfig(p) +func TestConfigManagement(t *testing.T) { + setupForConfigTests() if GetWorkloadIdentifier() != "identity" { t.Errorf("Workload identifier mismatch, expected identity, got %v", GetWorkloadIdentifier()) @@ -44,20 +62,23 @@ func TestConfigManagement(t *testing.T) { if GetKubeconfigPath() != "testdata/fake.config" { t.Errorf("Kubeconfig path mismatch, expected testdata/fake.config, got %v", GetKubeconfigPath()) } + if GetSecretFilterTags() != "admiral/sync" { + t.Errorf("Filter tags mismatch, expected admiral/sync, got %v", GetSecretFilterTags()) + } if GetSANPrefix() != "prefix" { t.Errorf("San prefix mismatch, expected prefix, got %v", GetSANPrefix()) } if GetHostnameSuffix() != "mesh" { t.Errorf("Hostname suffix mismatch, expected mesh, got %v", GetHostnameSuffix()) } - if GetSyncNamespace() != "ns" { + if GetSyncNamespace() != "admiral-sync" { t.Errorf("Sync namespace mismatch, expected ns, got %v", GetSyncNamespace()) } if GetEnableSAN() != true { t.Errorf("Enable SAN mismatch, expected true, got %v", GetEnableSAN()) } if GetCacheRefreshDuration() != time.Minute { - t.Errorf("Cachee refresh duration mismatch, expected %v, got %v", time.Minute, GetCacheRefreshDuration()) + t.Errorf("Cache refresh duration mismatch, expected %v, got %v", time.Minute, GetCacheRefreshDuration()) } if GetClusterRegistriesNamespace() != "default" { t.Errorf("Cluster registry namespace mismatch, expected default, got %v", GetClusterRegistriesNamespace()) @@ -65,14 +86,11 @@ func TestConfigManagement(t *testing.T) { if GetDependenciesNamespace() != "default" { t.Errorf("Dependency namespace mismatch, expected default, got %v", GetDependenciesNamespace()) } - if GetSecretResolver() != "" { - t.Errorf("Secret resolver mismatch, expected empty string, got %v", GetSecretResolver()) + if GetAdmiralProfile() != "default" { + t.Errorf("Secret resolver mismatch, expected empty string, got %v", GetAdmiralProfile()) } - if GetGlobalTrafficDeploymentLabel() != "identity" { - t.Fatalf("GTP Deployment label mismatch. Expected identity, got %v", GetGlobalTrafficDeploymentLabel()) - } - if GetGlobalTrafficDeploymentLabel() != "identity" { - t.Fatalf("GTP Deployment label mismatch. Expected identity, got %v", GetGlobalTrafficDeploymentLabel()) + if GetAdmiralCRDIdentityLabel() != "identity" { + t.Fatalf("Admiral CRD Identity label mismatch. Expected identity, got %v", GetAdmiralCRDIdentityLabel()) } if GetWorkloadSidecarName() != "default" { t.Fatalf("Workload Sidecar Name mismatch. Expected default, got %v", GetWorkloadSidecarName()) @@ -91,4 +109,85 @@ func TestConfigManagement(t *testing.T) { t.Errorf("Enable Prometheus mismatch, expected false, got %v", GetMetricsEnabled()) } + if IsPersonaTrafficConfig() != false { + t.Errorf("Enable Traffic Persona mismatch, expected false, got %v", IsPersonaTrafficConfig()) + } + + if IsDefaultPersona() != true { + t.Errorf("Enable Default Persona mismatch, expected false, got %v", IsDefaultPersona()) + } + + if len(GetDeprecatedEnvoyFilterVersion()) != 2 { + t.Errorf("Get deprecated envoy filter version by splitting with ',', expected 2, got %v", len(GetDeprecatedEnvoyFilterVersion())) + } + + if len(GetEnvoyFilterVersion()) != 3 { + t.Errorf("Get envoy filter version by splitting with ',', expected 3, got %v", len(GetEnvoyFilterVersion())) + } + + if IsCartographerFeatureDisabled("router_filter_gen") { + t.Errorf("If the feature is not present in the list should be assumed as enabled/true ',', expected false, got %v", IsCartographerFeatureDisabled("router_filter_gen")) + } + + if !IsCartographerFeatureDisabled("throttle_filter_gen") { + t.Errorf("If the feature is present in the list with valure disabled. ',', expected true, got %v", IsCartographerFeatureDisabled("throttle_filter_gen")) + } + + if DisableIPGeneration() { + t.Errorf("Disable IP Address Generation mismatch, expected false, got %v", DisableIPGeneration()) + } + + if GetPartitionIdentifier() != "admiral.io/identityPartition" { + t.Errorf("Get identity partition mismatch, expected admiral.io/identityPartition, got %v", GetPartitionIdentifier()) + } + + if !EnableSWAwareNSCaches() { + t.Errorf("enable SW aware namespace caches mismatch, expected true, got %v", EnableSWAwareNSCaches()) + } + + if !EnableExportTo("fakeIdentity") { + t.Errorf("enable exportTo mismatch, expected true, got %v", EnableExportTo("fakeIdentity")) + } + + if GetExportToMaxNamespaces() != 35 { + t.Errorf("exportTo max namespaces mismatch, expected 35, got %v", GetExportToMaxNamespaces()) + } } + +func TestGetCRDIdentityLabelWithCRDIdentity(t *testing.T) { + + admiralParams := GetAdmiralParams() + backOldIdentity := admiralParams.LabelSet.AdmiralCRDIdentityLabel + admiralParams.LabelSet.AdmiralCRDIdentityLabel = "identityOld" + + assert.Equalf(t, "identityOld", GetAdmiralCRDIdentityLabel(), "GetCRDIdentityLabel()") + + admiralParams.LabelSet.AdmiralCRDIdentityLabel = backOldIdentity +} + +//func TestGetCRDIdentityLabelWithLabel(t *testing.T) { +// +// admiralParams := GetAdmiralParams() +// backOldIdentity := admiralParams.LabelSet.AdmiralCRDIdentityLabel +// backOldGTPLabel := admiralParams.LabelSet.GlobalTrafficDeploymentLabel +// admiralParams.LabelSet.GlobalTrafficDeploymentLabel = "identityGTP" +// +// assert.Equalf(t, "identityGTP", GetAdmiralCRDIdentityLabel(), "GetAdmiralCRDIdentityLabel()") +// +// admiralParams.LabelSet.CRDIdentityLabel = backOldIdentity +// admiralParams.LabelSet.GlobalTrafficDeploymentLabel = backOldGTPLabel +//} + +//func TestGetCRDIdentityLabelWithEmptyLabel(t *testing.T) { +// +// admiralParams := GetAdmiralParams() +// backOldIdentity := admiralParams.LabelSet.CRDIdentityLabel +// backOldGTPLabel := admiralParams.LabelSet.GlobalTrafficDeploymentLabel +// admiralParams.LabelSet.GlobalTrafficDeploymentLabel = "" +// +// assert.Equalf(t, "", GetCRDIdentityLabel(), "GetCRDIdentityLabel()") +// +// admiralParams.LabelSet.GlobalTrafficDeploymentLabel = "" +// admiralParams.LabelSet.CRDIdentityLabel = backOldIdentity +// admiralParams.LabelSet.GlobalTrafficDeploymentLabel = backOldGTPLabel +//} diff --git a/admiral/pkg/controller/common/metrics.go b/admiral/pkg/controller/common/metrics.go index c022cfcf..6da097c8 100644 --- a/admiral/pkg/controller/common/metrics.go +++ b/admiral/pkg/controller/common/metrics.go @@ -1,100 +1,32 @@ package common -import ( - "github.com/prometheus/client_golang/prometheus" - "sync" -) +import "github.com/prometheus/client_golang/prometheus" -const ( - ClustersMonitoredMetricName = "clusters_monitored" - EventsProcessedTotalMetricName = "events_processed_total" - - AddEventLabelValue = "add" - UpdateEventLabelValue = "update" - DeleteEventLabelValue = "delete" -) - -var ( - metricsOnce sync.Once - RemoteClustersMetric Gauge - EventsProcessed Counter -) +const ClustersMonitoredMetricName = "clusters_monitored" +const DependencyProxyServiceCacheSizeMetricName = "dependency_proxy_service_cache_size" type Gauge interface { - With(labelValues ...string) Gauge Set(value float64) } -type Counter interface { - With(labelValues ...string) Counter - Inc() -} - -/* -InitializeMetrics depends on AdmiralParams for metrics enablement. -*/ -func InitializeMetrics() { - metricsOnce.Do(func() { - RemoteClustersMetric = NewGaugeFrom(ClustersMonitoredMetricName, "Gauge for the clusters monitored by Admiral", []string{}) - EventsProcessed = NewCounterFrom(EventsProcessedTotalMetricName, "Counter for the events processed by Admiral", []string{"cluster", "object_type", "event_type"}) - }) -} - -func NewGaugeFrom(name string, help string, labelNames []string) Gauge { +func NewGaugeFrom(name string, help string) Gauge { if !GetMetricsEnabled() { - return &NoopGauge{} + return &Noop{} } opts := prometheus.GaugeOpts{Name: name, Help: help} - g := prometheus.NewGaugeVec(opts, labelNames) + g := prometheus.NewGauge(opts) prometheus.MustRegister(g) - return &PromGauge{g, labelNames} + return &PromGauge{g} } -func NewCounterFrom(name string, help string, labelNames []string) Counter { - if !GetMetricsEnabled() { - return &NoopCounter{} - } - opts := prometheus.CounterOpts{Name: name, Help: help} - c := prometheus.NewCounterVec(opts, labelNames) - prometheus.MustRegister(c) - return &PromCounter{c, labelNames} -} - -type NoopGauge struct{} -type NoopCounter struct{} +type Noop struct{} type PromGauge struct { - g *prometheus.GaugeVec - lvs []string -} - -type PromCounter struct { - c *prometheus.CounterVec - lvs []string -} - -func (g *PromGauge) With(labelValues ...string) Gauge { - g.lvs = append([]string{}, labelValues...) - - return g + g prometheus.Gauge } func (g *PromGauge) Set(value float64) { - g.g.WithLabelValues(g.lvs...).Set(value) -} - -func (c *PromCounter) With(labelValues ...string) Counter { - c.lvs = append([]string{}, labelValues...) - - return c + g.g.Set(value) } -func (c *PromCounter) Inc() { - c.c.WithLabelValues(c.lvs...).Inc() -} - -func (g *NoopGauge) Set(float64) {} -func (g *NoopGauge) With(...string) Gauge { return g } - -func (g *NoopCounter) Inc() {} -func (g *NoopCounter) With(...string) Counter { return g } +func (g *Noop) Set(value float64) {} diff --git a/admiral/pkg/controller/common/metrics_test.go b/admiral/pkg/controller/common/metrics_test.go index 0f881246..18e9b602 100644 --- a/admiral/pkg/controller/common/metrics_test.go +++ b/admiral/pkg/controller/common/metrics_test.go @@ -1,132 +1,40 @@ package common import ( + "testing" + "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/stretchr/testify/assert" - "io/ioutil" - "net/http" - "net/http/httptest" - "regexp" - "strconv" - "testing" ) func TestNewGaugeFrom(t *testing.T) { type args struct { - prom bool - name string - help string - value int64 - labelNames []string - labelValues []string - } - tc := []struct { - name string - args args - wantMetric bool - wantValue int64 - }{ - { - name: "Should return a Prometheus gauge", - args: args{true, "mygauge", "", 10, []string{"l1", "l2"}, []string{"v1", "v2"}}, - wantMetric: true, - wantValue: 10, - }, - { - name: "Should return a Noop gauge", - args: args{false, "mygauge", "", 10, []string{}, []string{}}, - wantMetric: false, - }, - } - - for _, tt := range tc { - t.Run(tt.name, func(t *testing.T) { - SetEnablePrometheus(tt.args.prom) - - // exercise metric - actual := NewGaugeFrom(tt.args.name, tt.args.help, tt.args.labelNames) - actual.With(tt.args.labelValues...).Set(float64(tt.args.value)) - - // query metrics endpoint - s := httptest.NewServer(promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{})) - defer s.Close() - - // parse response - resp, _ := http.Get(s.URL) - buf, _ := ioutil.ReadAll(resp.Body) - actualString := string(buf) - - // verify - if tt.wantMetric { - pattern := tt.args.name + `{l1="v1",l2="v2"} ([0-9]+)` - re := regexp.MustCompile(pattern) - matches := re.FindStringSubmatch(actualString) - f, _ := strconv.ParseInt(matches[1], 0, 64) - assert.Equal(t, tt.wantValue, f) - } - assert.Equal(t, 200, resp.StatusCode) - }) - } -} - -func TestNewCounterFrom(t *testing.T) { - type args struct { - prom bool - name string - help string - value int64 - labelNames []string - labelValues []string + prom bool + Name string + Help string } tc := []struct { - name string - args args - wantMetric bool - wantValue int64 + name string + args args + want Gauge }{ { - name: "Should return a Noop counter", - args: args{false, "mycounter", "", 10, []string{}, []string{}}, - wantMetric: false, + "Should return a Prometheus gauge", + args{true, "gauge", ""}, + &PromGauge{prometheus.NewGauge(prometheus.GaugeOpts{Name: "gauge", Help: ""})}, }, { - name: "Should return a Prometheus counter", - args: args{true, "mycounter", "", 1, []string{"l1", "l2"}, []string{"v1", "v2"}}, - wantMetric: true, - wantValue: 1, + "Should return a Noop gauge", + args{false, "gauge", ""}, + &Noop{}, }, } for _, tt := range tc { t.Run(tt.name, func(t *testing.T) { SetEnablePrometheus(tt.args.prom) - - // exercise metric - actual := NewCounterFrom(tt.args.name, tt.args.help, tt.args.labelNames) - var i int64 - for i = 0; i < tt.args.value; i++ { - actual.With(tt.args.labelValues...).Inc() - } - - // query metrics endpoint - s := httptest.NewServer(promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{})) - defer s.Close() - - // parse response - resp, _ := http.Get(s.URL) - buf, _ := ioutil.ReadAll(resp.Body) - actualString := string(buf) - - // verify - if tt.wantMetric { - pattern := tt.args.name + `{l1="v1",l2="v2"} ([0-9]+)` - re := regexp.MustCompile(pattern) - s2 := re.FindStringSubmatch(actualString)[1] - f, _ := strconv.ParseInt(s2, 0, 64) - assert.Equal(t, tt.wantValue, f) - } - assert.Equal(t, 200, resp.StatusCode) + actual := NewGaugeFrom(tt.args.Name, tt.args.Help) + assert.Equal(t, tt.want, actual, "want: %#v, got: %#v", tt.want, actual) }) } } diff --git a/admiral/pkg/controller/common/rolloutcommon.go b/admiral/pkg/controller/common/rolloutcommon.go index 5500d03d..a88eb821 100644 --- a/admiral/pkg/controller/common/rolloutcommon.go +++ b/admiral/pkg/controller/common/rolloutcommon.go @@ -1,11 +1,12 @@ package common import ( - argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1" - log "github.com/sirupsen/logrus" "sort" "strings" + + argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1" + log "github.com/sirupsen/logrus" ) // GetCname returns cname in the format ..global, Ex: stage.Admiral.services.registry.global @@ -13,11 +14,11 @@ func GetCnameForRollout(rollout *argo.Rollout, identifier string, nameSuffix str var environment = GetEnvForRollout(rollout) alias := GetValueForKeyFromRollout(identifier, rollout) if len(alias) == 0 { - log.Warnf("%v label missing on deployment %v in namespace %v. Falling back to annotation to create cname.", identifier, rollout.Name, rollout.Namespace) + log.Warnf("%v label missing on rollout %v in namespace %v. Falling back to annotation to create cname.", identifier, rollout.Name, rollout.Namespace) alias = rollout.Spec.Template.Annotations[identifier] } if len(alias) == 0 { - log.Errorf("Unable to get cname for deployment with name %v in namespace %v as it doesn't have the %v annotation", rollout.Name, rollout.Namespace, identifier) + log.Errorf("Unable to get cname for rollout with name %v in namespace %v as it doesn't have the %v annotation", rollout.Name, rollout.Namespace, identifier) return "" } cname := environment + Sep + alias + Sep + nameSuffix @@ -45,16 +46,16 @@ func GetSANForRollout(domain string, rollout *argo.Rollout, identifier string) s func GetValueForKeyFromRollout(key string, rollout *argo.Rollout) string { value := rollout.Spec.Template.Labels[key] if len(value) == 0 { - log.Warnf("%v label missing on deployment %v in namespace %v. Falling back to annotation.", key, rollout.Name, rollout.Namespace) + log.Warnf("%v label missing on rollout %v in namespace %v. Falling back to annotation.", key, rollout.Name, rollout.Namespace) value = rollout.Spec.Template.Annotations[key] } return value } -//Returns the list of rollouts to which this GTP should apply. It is assumed that all inputs already are an identity match -//If the GTP has an identity label, it should match all rollouts which share that label -//If the GTP does not have an identity label, it should return all rollouts without an identity label -//IMPORTANT: If an environment label is specified on either the GTP or the rollout, the same value must be specified on the other for them to match +// Returns the list of rollouts to which this GTP should apply. It is assumed that all inputs already are an identity match +// If the GTP has an identity label, it should match all rollouts which share that label +// If the GTP does not have an identity label, it should return all rollouts without an identity label +// IMPORTANT: If an environment label is specified on either the GTP or the rollout, the same value must be specified on the other for them to match func MatchRolloutsToGTP(gtp *v1.GlobalTrafficPolicy, rollouts []argo.Rollout) []argo.Rollout { if gtp == nil || gtp.Name == "" { log.Warn("Nil or empty GlobalTrafficPolicy provided for rollout match. Returning nil.") @@ -92,15 +93,36 @@ func GetRolloutGlobalIdentifier(rollout *argo.Rollout) string { //TODO can this be removed now? This was for backward compatibility identity = rollout.Spec.Template.Annotations[GetWorkloadIdentifier()] } + if EnableSWAwareNSCaches() && len(identity) > 0 && len(GetRolloutIdentityPartition(rollout)) > 0 { + identity = GetRolloutIdentityPartition(rollout) + Sep + strings.ToLower(identity) + } return identity } -//Find the GTP that best matches the rollout. -//It's assumed that the set of GTPs passed in has already been matched via the GtprolloutLabel. Now it's our job to choose the best one. -//In order: +func GetRolloutOriginalIdentifier(rollout *argo.Rollout) string { + identity := rollout.Spec.Template.Labels[GetWorkloadIdentifier()] + if len(identity) == 0 { + //TODO can this be removed now? This was for backward compatibility + identity = rollout.Spec.Template.Annotations[GetWorkloadIdentifier()] + } + return identity +} + +func GetRolloutIdentityPartition(rollout *argo.Rollout) string { + identityPartition := rollout.Spec.Template.Annotations[GetPartitionIdentifier()] + if len(identityPartition) == 0 { + //In case partition is accidentally applied as Label + identityPartition = rollout.Spec.Template.Labels[GetPartitionIdentifier()] + } + return identityPartition +} + +// Find the GTP that best matches the rollout. +// It's assumed that the set of GTPs passed in has already been matched via the GtprolloutLabel. Now it's our job to choose the best one. +// In order: // - If one and only one GTP matches the env label of the rollout - use that one. Use "default" as the default env label for all GTPs and rollout. // - If multiple GTPs match the rollout label, use the oldest one (Using an old one has less chance of new behavior which could impact workflows) -//IMPORTANT: If an environment label is specified on either the GTP or the rollout, the same value must be specified on the other for them to match +// IMPORTANT: If an environment label is specified on either the GTP or the rollout, the same value must be specified on the other for them to match func MatchGTPsToRollout(gtpList []v1.GlobalTrafficPolicy, rollout *argo.Rollout) *v1.GlobalTrafficPolicy { if rollout == nil || rollout.Name == "" { log.Warn("Nil or empty GlobalTrafficPolicy provided for rollout match. Returning nil.") diff --git a/admiral/pkg/controller/common/rolloutcommon_test.go b/admiral/pkg/controller/common/rolloutcommon_test.go index 0306bba7..482840b3 100644 --- a/admiral/pkg/controller/common/rolloutcommon_test.go +++ b/admiral/pkg/controller/common/rolloutcommon_test.go @@ -1,41 +1,59 @@ package common import ( - argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - "github.com/google/go-cmp/cmp" - v12 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1" "reflect" "strings" + "sync" "testing" "time" + + argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/google/go-cmp/cmp" + v12 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1" + log "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func init() { - p := AdmiralParams{ - KubeconfigPath: "testdata/fake.config", - LabelSet: &LabelSet{}, - EnableSAN: true, - SANPrefix: "prefix", - HostnameSuffix: "mesh", - SyncNamespace: "ns", - CacheRefreshDuration: time.Minute, - ClusterRegistriesNamespace: "default", - DependenciesNamespace: "default", - SecretResolver: "", - WorkloadSidecarName: "default", - WorkloadSidecarUpdate: "disabled", +var rolloutCommonTestSingleton sync.Once + +func setupForRolloutCommonTests() { + var initHappened bool + rolloutCommonTestSingleton.Do(func() { + p := AdmiralParams{ + KubeconfigPath: "testdata/fake.config", + LabelSet: &LabelSet{ + WorkloadIdentityKey: "identity", + AdmiralCRDIdentityLabel: "identity", + EnvKey: "admiral.io/env", + IdentityPartitionKey: "admiral.io/identityPartition", + }, + EnableSAN: true, + SANPrefix: "prefix", + HostnameSuffix: "mesh", + SyncNamespace: "ns", + CacheReconcileDuration: time.Minute, + ClusterRegistriesNamespace: "default", + DependenciesNamespace: "default", + WorkloadSidecarName: "default", + WorkloadSidecarUpdate: "disabled", + EnableSWAwareNSCaches: true, + ExportToIdentityList: []string{"*"}, + } + + ResetSync() + initHappened = true + InitializeConfig(p) + }) + if !initHappened { + log.Warn("InitializeConfig was NOT called from setupForRolloutCommonTests") + } else { + log.Info("InitializeConfig was called setupForRolloutCommonTests") } - - p.LabelSet.WorkloadIdentityKey = "identity" - p.LabelSet.GlobalTrafficDeploymentLabel = "identity" - - InitializeConfig(p) } func TestGetEnvForRollout(t *testing.T) { - + setupForRolloutCommonTests() testCases := []struct { name string rollout argo.Rollout @@ -52,8 +70,17 @@ func TestGetEnvForRollout(t *testing.T) { expected: "stage2", }, { - name: "should return valid env from new env annotation", - rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{"admiral.io/env": "stage1"}, Labels: map[string]string{"env": "stage2"}}}}}, + name: "should return valid env from new env annotation", + rollout: argo.Rollout{ + Spec: argo.RolloutSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"admiral.io/env": "stage1"}, + Labels: map[string]string{"env": "stage2"}, + }, + }, + }, + }, expected: "stage1", }, { @@ -232,7 +259,7 @@ func TestMatchGTPsToRollout(t *testing.T) { } func TestGetRolloutGlobalIdentifier(t *testing.T) { - + setupForRolloutCommonTests() identifier := "identity" identifierVal := "company.platform.server" @@ -240,21 +267,31 @@ func TestGetRolloutGlobalIdentifier(t *testing.T) { name string rollout argo.Rollout expected string + original string }{ { name: "should return valid identifier from label", rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{identifier: identifierVal, "env": "stage"}}}}}, expected: identifierVal, + original: identifierVal, }, { name: "should return valid identifier from annotations", rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{identifier: identifierVal, "env": "stage"}}}}}, expected: identifierVal, + original: identifierVal, + }, + { + name: "should return partitioned identifier", + rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{identifier: identifierVal, "env": "stage", "admiral.io/identityPartition": "pid"}}}}}, + expected: "pid." + identifierVal, + original: identifierVal, }, { name: "should return empty identifier", rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{}, Annotations: map[string]string{}}}}}, expected: "", + original: "", }, } @@ -264,6 +301,47 @@ func TestGetRolloutGlobalIdentifier(t *testing.T) { if !(iVal == c.expected) { t.Errorf("Wanted identity value: %s, got: %s", c.expected, iVal) } + oiVal := GetRolloutOriginalIdentifier(&c.rollout) + if !(oiVal == c.original) { + t.Errorf("Wanted original identity value: %s, got: %s", c.original, oiVal) + } + }) + } +} + +func TestGetRolloutIdentityPartition(t *testing.T) { + setupForRolloutCommonTests() + partitionIdentifier := "admiral.io/identityPartition" + identifierVal := "swX" + + testCases := []struct { + name string + rollout argo.Rollout + expected string + }{ + { + name: "should return valid identifier from label", + rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{partitionIdentifier: identifierVal, "env": "stage"}}}}}, + expected: identifierVal, + }, + { + name: "should return valid identifier from annotations", + rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{partitionIdentifier: identifierVal, "env": "stage"}}}}}, + expected: identifierVal, + }, + { + name: "should return empty identifier", + rollout: argo.Rollout{Spec: argo.RolloutSpec{Template: corev1.PodTemplateSpec{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{}, Annotations: map[string]string{}}}}}, + expected: "", + }, + } + + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + iVal := GetRolloutIdentityPartition(&c.rollout) + if !(iVal == c.expected) { + t.Errorf("Wanted identityPartition value: %s, got: %s", c.expected, iVal) + } }) } } diff --git a/admiral/pkg/controller/common/types.go b/admiral/pkg/controller/common/types.go index 210ba4f6..fb624bab 100644 --- a/admiral/pkg/controller/common/types.go +++ b/admiral/pkg/controller/common/types.go @@ -1,9 +1,14 @@ package common import ( + "context" "fmt" "sync" "time" + + log "github.com/sirupsen/logrus" + + v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1" ) type Map struct { @@ -13,63 +18,122 @@ type Map struct { type MapOfMaps struct { cache map[string]*Map - mutex *sync.Mutex + mutex *sync.RWMutex +} + +type MapOfMapOfMaps struct { + cache map[string]*MapOfMaps + mutex *sync.RWMutex } type SidecarEgress struct { Namespace string FQDN string - CNAMEs map[string]string + CNAMEs *Map } -//maintains a map from workload identity -> map[namespace]SidecarEgress +// maintains a map from workload identity -> map[namespace]SidecarEgress type SidecarEgressMap struct { cache map[string]map[string]SidecarEgress mutex *sync.Mutex } type AdmiralParams struct { - ArgoRolloutsEnabled bool - KubeconfigPath string - CacheRefreshDuration time.Duration - ClusterRegistriesNamespace string - DependenciesNamespace string - SyncNamespace string - EnableSAN bool - SANPrefix string - SecretResolver string - LabelSet *LabelSet - LogLevel int - HostnameSuffix string - PreviewHostnamePrefix string - MetricsEnabled bool - WorkloadSidecarUpdate string - WorkloadSidecarName string - AdmiralStateCheckerName string - DRStateStoreConfigPath string - ServiceEntryIPPrefix string - EnvoyFilterVersion string - EnvoyFilterAdditionalConfig string - EnableRoutingPolicy bool - ExcludedIdentityList []string - AdditionalEndpointSuffixes []string - AdditionalEndpointLabelFilters []string + ArgoRolloutsEnabled bool + KubeconfigPath string + SecretFilterTags string + CacheReconcileDuration time.Duration + SeAndDrCacheReconcileDuration time.Duration + ClusterRegistriesNamespace string + DependenciesNamespace string + DnsConfigFile string + DNSTimeoutMs int + DNSRetries int + TrafficConfigNamespace string + SyncNamespace string + EnableSAN bool + SANPrefix string + AdmiralConfig string + Profile string + LabelSet *LabelSet + LogLevel int + HostnameSuffix string + PreviewHostnamePrefix string + MetricsEnabled bool + ChannelCapacity int + WorkloadSidecarUpdate string + WorkloadSidecarName string + AdmiralStateCheckerName string + DRStateStoreConfigPath string + ServiceEntryIPPrefix string + EnvoyFilterVersion string + DeprecatedEnvoyFilterVersion string + EnvoyFilterAdditionalConfig string + EnableRoutingPolicy bool + ExcludedIdentityList []string + AdditionalEndpointSuffixes []string + AdditionalEndpointLabelFilters []string + HAMode string + EnableWorkloadDataStorage bool + EnableDiffCheck bool + EnableProxyEnvoyFilter bool + EnableDependencyProcessing bool + DeploymentOrRolloutWorkerConcurrency int + DependentClusterWorkerConcurrency int + SeAddressConfigmap string + DependencyWarmupMultiplier int + EnableOutlierDetection bool + EnableClientConnectionConfigProcessing bool + MaxRequestsPerConnection int32 + EnableAbsoluteFQDN bool + EnableAbsoluteFQDNForLocalEndpoints bool + DisableDefaultAutomaticFailover bool + EnableServiceEntryCache bool + AlphaIdentityList []string + EnableDestinationRuleCache bool + DisableIPGeneration bool + EnableActivePassive bool + EnableSWAwareNSCaches bool + ExportToIdentityList []string + ExportToMaxNamespaces int + EnableSyncIstioResourcesToSourceClusters bool + AdmiralStateSyncerMode bool + DefaultWarmupDurationSecs int64 + + // Cartographer specific params + TrafficConfigPersona bool + TrafficConfigIgnoreAssets []string // used to ignore applying of client side envoy filters + CartographerFeatures map[string]string + TrafficConfigScope string + LogToFile bool + LogFilePath string + LogFileSizeInMBs int + + // Air specific + GatewayAssetAliases []string } func (b AdmiralParams) String() string { return fmt.Sprintf("KubeconfigPath=%v ", b.KubeconfigPath) + - fmt.Sprintf("CacheRefreshDuration=%v ", b.CacheRefreshDuration) + + fmt.Sprintf("CacheRefreshDuration=%v ", b.CacheReconcileDuration) + + fmt.Sprintf("SEAndDRCacheRefreshDuration=%v ", b.SeAndDrCacheReconcileDuration) + fmt.Sprintf("ClusterRegistriesNamespace=%v ", b.ClusterRegistriesNamespace) + fmt.Sprintf("DependenciesNamespace=%v ", b.DependenciesNamespace) + fmt.Sprintf("EnableSAN=%v ", b.EnableSAN) + fmt.Sprintf("SANPrefix=%v ", b.SANPrefix) + fmt.Sprintf("LabelSet=%v ", b.LabelSet) + - fmt.Sprintf("SecretResolver=%v ", b.SecretResolver) + - fmt.Sprintf("AdmiralStateCheckername=%v ", b.AdmiralStateCheckerName) + + fmt.Sprintf("SecretResolver=%v ", b.Profile) + + fmt.Sprintf("Profile=%v ", b.Profile) + + fmt.Sprintf("AdmiralStateCheckerName=%v ", b.AdmiralStateCheckerName) + fmt.Sprintf("DRStateStoreConfigPath=%v ", b.DRStateStoreConfigPath) + fmt.Sprintf("ServiceEntryIPPrefix=%v ", b.ServiceEntryIPPrefix) + fmt.Sprintf("EnvoyFilterVersion=%v ", b.EnvoyFilterVersion) + - fmt.Sprintf("EnableRoutingPolicy=%v ", b.EnableRoutingPolicy) + fmt.Sprintf("DeprecatedEnvoyFilterVersion=%v ", b.DeprecatedEnvoyFilterVersion) + + fmt.Sprintf("EnableRoutingPolicy=%v ", b.EnableRoutingPolicy) + + fmt.Sprintf("TrafficConfigNamespace=%v ", b.TrafficConfigNamespace) + + fmt.Sprintf("TrafficConfigPersona=%v ", b.TrafficConfigPersona) + + fmt.Sprintf("CartographerFeatures=%v ", b.CartographerFeatures) + + fmt.Sprintf("DefaultWarmupDuration=%v ", b.DefaultWarmupDurationSecs) } type LabelSet struct { @@ -80,9 +144,24 @@ type LabelSet struct { AdmiralIgnoreLabel string PriorityKey string WorkloadIdentityKey string //Should always be used for both label and annotation (using label as the primary, and falling back to annotation if the label is not found) - GlobalTrafficDeploymentLabel string //label used to tie together deployments and globaltrafficpolicy objects. Configured separately from the identity key because this one _must_ be a label + TrafficConfigIdentityKey string //Should always be used for both label and annotation (using label as the primary, and falling back to annotation if the label is not found) EnvKey string //key used to group deployments by env. The order would be to use annotation `EnvKey` and then label `EnvKey` and then fallback to label `env` label GatewayApp string //the value for `app` key that will be used to fetch the loadblancer for cross cluster calls, also referred to as east west gateway + AdmiralCRDIdentityLabel string //Label Used to identify identity label for crd + IdentityPartitionKey string //Label used for partitioning assets with same identity into groups +} + +type TrafficObject struct { + TrafficConfig *v1.TrafficConfig + ClusterID string + Ctx *Context + Event string +} + +type Context struct { + Ctx context.Context + Log *log.Entry + Property map[string]string } func NewSidecarEgressMap() *SidecarEgressMap { @@ -102,7 +181,14 @@ func NewMap() *Map { func NewMapOfMaps() *MapOfMaps { n := new(MapOfMaps) n.cache = make(map[string]*Map) - n.mutex = &sync.Mutex{} + n.mutex = &sync.RWMutex{} + return n +} + +func NewMapOfMapOfMaps() *MapOfMapOfMaps { + n := new(MapOfMapOfMaps) + n.cache = make(map[string]*MapOfMaps) + n.mutex = &sync.RWMutex{} return n } @@ -113,9 +199,26 @@ func (s *Map) Put(key string, value string) { } func (s *Map) Get(key string) string { + defer s.mutex.Unlock() + s.mutex.Lock() return s.cache[key] } +func (s *Map) CheckIfPresent(key string) bool { + defer s.mutex.Unlock() + s.mutex.Lock() + if _, ok := s.cache[key]; ok { + return true + } + return false +} + +func (s *Map) Len() int { + defer s.mutex.Unlock() + s.mutex.Lock() + return len(s.cache) +} + func (s *Map) Delete(key string) { defer s.mutex.Unlock() s.mutex.Lock() @@ -136,6 +239,18 @@ func (s *Map) Copy() map[string]string { } } +func (s *Map) CopyJustValues() []string { + var copy []string + if s != nil { + defer s.mutex.Unlock() + s.mutex.Lock() + for _, v := range s.cache { + copy = append(copy, v) + } + } + return copy +} + func (s *Map) Range(fn func(k string, v string)) { s.mutex.Lock() for k, v := range s.cache { @@ -155,6 +270,17 @@ func (s *MapOfMaps) Put(pkey string, key string, value string) { s.cache[pkey] = mapVal } +func (s *MapOfMaps) DeleteMap(pkey string, key string) { + defer s.mutex.Unlock() + s.mutex.Lock() + var mapVal = s.cache[pkey] + if mapVal == nil { + return + } + mapVal.Delete(key) + s.cache[pkey] = mapVal +} + func (s *MapOfMaps) PutMap(pkey string, inputMap *Map) { defer s.mutex.Unlock() s.mutex.Lock() @@ -174,10 +300,6 @@ func (s *MapOfMaps) Delete(key string) { delete(s.cache, key) } -func (s *MapOfMaps) Map() map[string]*Map { - return s.cache -} - func (s *MapOfMaps) Range(fn func(k string, v *Map)) { s.mutex.Lock() for k, v := range s.cache { @@ -186,6 +308,62 @@ func (s *MapOfMaps) Range(fn func(k string, v *Map)) { s.mutex.Unlock() } +func (s *MapOfMaps) Len() int { + defer s.mutex.Unlock() + s.mutex.Lock() + return len(s.cache) +} + +func (s *MapOfMaps) GetKeys() []string { + defer s.mutex.RUnlock() + s.mutex.RLock() + keys := []string{} + for k := range s.cache { + keys = append(keys, k) + } + return keys +} + +func (s *MapOfMapOfMaps) Put(pkey string, skey string, key, value string) { + defer s.mutex.Unlock() + s.mutex.Lock() + var mapOfMapsVal = s.cache[pkey] + if mapOfMapsVal == nil { + mapOfMapsVal = NewMapOfMaps() + } + mapOfMapsVal.Put(skey, key, value) + s.cache[pkey] = mapOfMapsVal +} + +func (s *MapOfMapOfMaps) PutMapofMaps(key string, value *MapOfMaps) { + defer s.mutex.Unlock() + s.mutex.Lock() + s.cache[key] = value +} + +func (s *MapOfMapOfMaps) Get(key string) *MapOfMaps { + s.mutex.RLock() + val := s.cache[key] + s.mutex.RUnlock() + return val +} + +func (s *MapOfMapOfMaps) Len() int { + defer s.mutex.RUnlock() + s.mutex.RLock() + return len(s.cache) +} + +func (s *Map) GetKeys() []string { + defer s.mutex.Unlock() + s.mutex.Lock() + keys := make([]string, 0) + for _, val := range s.cache { + keys = append(keys, val) + } + return keys +} + func (s *SidecarEgressMap) Put(identity string, namespace string, fqdn string, cnames map[string]string) { defer s.mutex.Unlock() s.mutex.Lock() @@ -193,7 +371,11 @@ func (s *SidecarEgressMap) Put(identity string, namespace string, fqdn string, c if mapVal == nil { mapVal = make(map[string]SidecarEgress) } - mapVal[namespace] = SidecarEgress{Namespace: namespace, FQDN: fqdn, CNAMEs: cnames} + cnameMap := NewMap() + for k, v := range cnames { + cnameMap.Put(k, v) + } + mapVal[namespace] = SidecarEgress{Namespace: namespace, FQDN: fqdn, CNAMEs: cnameMap} s.cache[identity] = mapVal } @@ -217,3 +399,46 @@ func (s *SidecarEgressMap) Range(fn func(k string, v map[string]SidecarEgress)) fn(k, v) } } + +type ProxyFilterRequestObject struct { + Identity string + ProxiedServiceInfo *ProxiedServiceInfo + DnsConfigFile string + DnsRetries int + DnsTimeoutMs int + ClusterID string + Ctx *Context + Event string +} + +type ProxyFilterConfig struct { + ConfigFile string `json:"configFile"` + DNSTimeoutMs int `json:"dnsTimeoutMs"` + DNSRetries int `json:"dnsRetries"` + GatewayAssetAlias string `json:"gatewayAssetAlias"` + Services []*ProxiedServiceInfo `json:"services"` +} + +type ProxiedServiceInfo struct { + Identity string `json:"assetAlias"` + ProxyAlias string `json:"-"` + Environments []*ProxiedServiceEnvironment `json:"environments"` +} + +type ProxiedServiceEnvironment struct { + Environment string `json:"environment"` + DnsName string `json:"dnsName"` + CNames []string `json:"cNames"` +} + +func (c *ProxyFilterConfig) String() string { + return fmt.Sprintf("{ConfigFile: %s, DNSTimeoutMs:%d, DNSRetries: %d, GatewayAssetAlias: %s, Services: %s}", c.ConfigFile, c.DNSTimeoutMs, c.DNSRetries, c.GatewayAssetAlias, c.Services) +} + +func (s *ProxiedServiceInfo) String() string { + return fmt.Sprintf("{Identity:%s, Enviroments: %v}", s.Identity, s.Environments) +} + +func (s *ProxiedServiceEnvironment) String() string { + return fmt.Sprintf("{Environment:%s, DnsName: %s, CNames: %s}", s.Environment, s.DnsName, s.CNames) +} diff --git a/admiral/pkg/controller/common/types_test.go b/admiral/pkg/controller/common/types_test.go index b56c9a4d..20bc60b6 100644 --- a/admiral/pkg/controller/common/types_test.go +++ b/admiral/pkg/controller/common/types_test.go @@ -45,6 +45,72 @@ func TestMapOfMaps(t *testing.T) { if map3 != nil { t.Fail() } + +} + +func TestDeleteMapOfMaps(t *testing.T) { + t.Parallel() + mapOfMaps := NewMapOfMaps() + mapOfMaps.Put("pkey1", "dev.a.global1", "127.0.10.1") + mapOfMaps.Put("pkey1", "dev.a.global2", "127.0.10.2") + mapOfMaps.DeleteMap("pkey1", "dev.a.global1") + + mapValue := mapOfMaps.Get("pkey1") + if len(mapValue.Get("dev.a.global1")) > 0 { + t.Errorf("expected=nil, got=%v", mapValue.Get("dev.a.global1")) + } + if mapValue.Get("dev.a.global2") != "127.0.10.2" { + t.Errorf("expected=%v, got=%v", "127.0.10.2", mapValue.Get("dev.a.global2")) + } +} + +func TestMapOfMapOfMaps(t *testing.T) { + t.Parallel() + mapOfMapOfMaps := NewMapOfMapOfMaps() + mapOfMapOfMaps.Put("pkey1", "dev.a.global1", "127.0.10.1", "ns1") + mapOfMapOfMaps.Put("pkey1", "dev.a.global2", "127.0.10.2", "ns2") + mapOfMapOfMaps.Put("pkey2", "qa.a.global", "127.0.10.1", "ns3") + mapOfMapOfMaps.Put("pkey2", "qa.a.global", "127.0.10.2", "ns4") + + mapOfMaps1 := mapOfMapOfMaps.Get("pkey1") + if mapOfMaps1 == nil || mapOfMaps1.Get("dev.a.global1").Get("127.0.10.1") != "ns1" { + t.Fail() + } + if mapOfMapOfMaps.Len() != 2 { + t.Fail() + } + + mapOfMaps1.Delete("dev.a.global2") + + mapOfMaps2 := mapOfMapOfMaps.Get("pkey1") + if mapOfMaps2.Get("dev.a.global2") != nil { + t.Fail() + } + + keyList := mapOfMapOfMaps.Get("pkey2").Get("qa.a.global").GetKeys() + if len(keyList) != 2 { + t.Fail() + } + + mapOfMapOfMaps.Put("pkey3", "prod.a.global", "127.0.10.1", "ns5") + + mapOfMaps3 := mapOfMapOfMaps.Get("pkey3") + if mapOfMaps3 == nil || mapOfMaps3.Get("prod.a.global").Get("127.0.10.1") != "ns5" { + t.Fail() + } + + mapOfMaps4 := mapOfMapOfMaps.Get("pkey4") + if mapOfMaps4 != nil { + t.Fail() + } + + mapOfMaps5 := NewMapOfMaps() + mapOfMaps5.Put("dev.b.global", "ns6", "ns6") + mapOfMapOfMaps.PutMapofMaps("pkey5", mapOfMaps5) + if mapOfMapOfMaps.Get("pkey5") == nil || mapOfMapOfMaps.Get("pkey5").Get("dev.b.global").Get("ns6") != "ns6" { + t.Fail() + } + } func TestAdmiralParams(t *testing.T) { @@ -92,7 +158,7 @@ func TestMapOfMapsRange(t *testing.T) { mapOfMaps.Put("pkey2", "qa.a.global", "127.0.10.1") mapOfMaps.Put("pkey3", "stage.a.global", "127.0.10.1") - keys := make(map[string]string, len(mapOfMaps.Map())) + keys := make(map[string]string, len(mapOfMaps.cache)) for _, k := range keys { keys[k] = k } diff --git a/admiral/pkg/controller/istio/destinationrule.go b/admiral/pkg/controller/istio/destinationrule.go index f3a6c53c..35a301cd 100644 --- a/admiral/pkg/controller/istio/destinationrule.go +++ b/admiral/pkg/controller/istio/destinationrule.go @@ -3,8 +3,13 @@ package istio import ( "context" "fmt" + "sync" "time" + "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + log "github.com/sirupsen/logrus" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" networking "istio.io/client-go/pkg/apis/networking/v1alpha3" versioned "istio.io/client-go/pkg/clientset/versioned" @@ -16,9 +21,9 @@ import ( // Handler interface contains the methods that are required type DestinationRuleHandler interface { - Added(ctx context.Context, obj *networking.DestinationRule) - Updated(ctx context.Context, obj *networking.DestinationRule) - Deleted(ctx context.Context, obj *networking.DestinationRule) + Added(ctx context.Context, obj *networking.DestinationRule) error + Updated(ctx context.Context, obj *networking.DestinationRule) error + Deleted(ctx context.Context, obj *networking.DestinationRule) error } type DestinationRuleEntry struct { @@ -30,16 +35,118 @@ type DestinationRuleController struct { IstioClient versioned.Interface DestinationRuleHandler DestinationRuleHandler informer cache.SharedIndexInformer + Cache *DestinationRuleCache + Cluster string +} + +type DestinationRuleItem struct { + DestinationRule *networking.DestinationRule + Status string +} + +type DestinationRuleCache struct { + cache map[string]*DestinationRuleItem + mutex *sync.RWMutex +} + +func NewDestinationRuleCache() *DestinationRuleCache { + return &DestinationRuleCache{ + cache: map[string]*DestinationRuleItem{}, + mutex: &sync.RWMutex{}, + } +} + +func (d *DestinationRuleCache) getKey(dr *networking.DestinationRule) string { + return makeKey(dr.Name, dr.Namespace) +} + +func makeKey(str1, str2 string) string { + return str1 + "/" + str2 +} + +func (d *DestinationRuleCache) Put(dr *networking.DestinationRule) { + defer d.mutex.Unlock() + d.mutex.Lock() + + key := d.getKey(dr) + + d.cache[key] = &DestinationRuleItem{ + DestinationRule: dr, + Status: common.ProcessingInProgress, + } +} + +func (d *DestinationRuleCache) Get(identity string, namespace string) *networking.DestinationRule { + defer d.mutex.Unlock() + d.mutex.Lock() + + drItem, ok := d.cache[makeKey(identity, namespace)] + if ok { + return drItem.DestinationRule + } + + log.Infof("no destinationrule found in cache for identity=%s", identity) + return nil +} + +func (d *DestinationRuleCache) Delete(dr *networking.DestinationRule) { + defer d.mutex.Unlock() + d.mutex.Lock() + + key := d.getKey(dr) + + _, ok := d.cache[key] + if ok { + delete(d.cache, key) + } } -func NewDestinationRuleController(clusterID string, stopCh <-chan struct{}, handler DestinationRuleHandler, config *rest.Config, resyncPeriod time.Duration) (*DestinationRuleController, error) { +func (d *DestinationRuleCache) GetDRProcessStatus(dr *networking.DestinationRule) string { + defer d.mutex.Unlock() + d.mutex.Lock() + + key := d.getKey(dr) + + dc, ok := d.cache[key] + if ok { + return dc.Status + } + return common.NotProcessed +} + +func (d *DestinationRuleCache) UpdateDRProcessStatus(dr *networking.DestinationRule, status string) error { + defer d.mutex.Unlock() + d.mutex.Lock() + + key := d.getKey(dr) + + dc, ok := d.cache[key] + if ok { + + dc.Status = status + d.cache[key] = dc + return nil + } + + return fmt.Errorf("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "Update", "DestinationRule", + dr.Name, dr.Namespace, "", "nothing to update, destinationrule not found in cache") +} + +func NewDestinationRuleController(stopCh <-chan struct{}, handler DestinationRuleHandler, clusterID string, config *rest.Config, resyncPeriod time.Duration, clientLoader loader.ClientLoader) (*DestinationRuleController, error) { drController := DestinationRuleController{} drController.DestinationRuleHandler = handler + drCache := DestinationRuleCache{} + drCache.cache = make(map[string]*DestinationRuleItem) + drCache.mutex = &sync.RWMutex{} + drController.Cache = &drCache + + drController.Cluster = clusterID + var err error - ic, err := versioned.NewForConfig(config) + ic, err := clientLoader.LoadIstioClientFromConfig(config) if err != nil { return nil, fmt.Errorf("failed to create destination rule controller k8s client: %v", err) } @@ -48,24 +155,68 @@ func NewDestinationRuleController(clusterID string, stopCh <-chan struct{}, hand drController.informer = informers.NewDestinationRuleInformer(ic, k8sV1.NamespaceAll, resyncPeriod, cache.Indexers{}) - mcd := admiral.NewMonitoredDelegator(&drController, clusterID, "destinationrule") - admiral.NewController("destinationrule-ctrl-"+config.Host, stopCh, mcd, drController.informer) + admiral.NewController("destinationrule-ctrl", config.Host, stopCh, &drController, drController.informer) return &drController, nil } -func (sec *DestinationRuleController) Added(ctx context.Context, ojb interface{}) { - dr := ojb.(*networking.DestinationRule) - sec.DestinationRuleHandler.Added(ctx, dr) +func (drc *DestinationRuleController) Added(ctx context.Context, obj interface{}) error { + dr, ok := obj.(*networking.DestinationRule) + if !ok { + return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.DestinationRule", obj) + } + drc.Cache.Put(dr) + return drc.DestinationRuleHandler.Added(ctx, dr) +} + +func (drc *DestinationRuleController) Updated(ctx context.Context, obj interface{}, oldObj interface{}) error { + dr, ok := obj.(*networking.DestinationRule) + if !ok { + return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.DestinationRule", obj) + } + drc.Cache.Put(dr) + return drc.DestinationRuleHandler.Updated(ctx, dr) +} + +func (drc *DestinationRuleController) Deleted(ctx context.Context, obj interface{}) error { + dr, ok := obj.(*networking.DestinationRule) + if !ok { + return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.DestinationRule", obj) + } + drc.Cache.Delete(dr) + return drc.DestinationRuleHandler.Deleted(ctx, dr) } -func (sec *DestinationRuleController) Updated(ctx context.Context, ojb interface{}, oldObj interface{}) { - dr := ojb.(*networking.DestinationRule) - sec.DestinationRuleHandler.Updated(ctx, dr) +func (drc *DestinationRuleController) GetProcessItemStatus(obj interface{}) (string, error) { + dr, ok := obj.(*networking.DestinationRule) + if !ok { + return common.NotProcessed, fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.DestinationRule", obj) + } + return drc.Cache.GetDRProcessStatus(dr), nil +} + +func (drc *DestinationRuleController) UpdateProcessItemStatus(obj interface{}, status string) error { + dr, ok := obj.(*networking.DestinationRule) + if !ok { + return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.DestinationRule", obj) + } + return drc.Cache.UpdateDRProcessStatus(dr, status) } -func (sec *DestinationRuleController) Deleted(ctx context.Context, ojb interface{}) { - dr := ojb.(*networking.DestinationRule) - sec.DestinationRuleHandler.Deleted(ctx, dr) +func (drc *DestinationRuleController) LogValueOfAdmiralIoIgnore(obj interface{}) { + dr, ok := obj.(*networking.DestinationRule) + if !ok { + return + } + if len(dr.Annotations) > 0 && dr.Annotations[common.AdmiralIgnoreAnnotation] == "true" { + log.Infof("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "admiralIoIgnoreAnnotationCheck", "DestinationRule", dr.Name, dr.Namespace, "", "Value=true") + } +} +func (drc *DestinationRuleController) Get(ctx context.Context, isRetry bool, obj interface{}) (interface{}, error) { + /*dr, ok := obj.(*networking.DestinationRule) + if ok && d.IstioClient != nil { + return d.IstioClient.NetworkingV1alpha3().DestinationRules(dr.Namespace).Get(ctx, dr.Name, meta_v1.GetOptions{}) + }*/ + return nil, fmt.Errorf("istio client is not initialized, txId=%s", ctx.Value("txId")) } diff --git a/admiral/pkg/controller/istio/destinationrule_test.go b/admiral/pkg/controller/istio/destinationrule_test.go index 5112e0c4..c4ce9106 100644 --- a/admiral/pkg/controller/istio/destinationrule_test.go +++ b/admiral/pkg/controller/istio/destinationrule_test.go @@ -2,18 +2,193 @@ package istio import ( "context" + "fmt" + "strings" + "sync" "testing" "time" + coreV1 "k8s.io/api/core/v1" + + "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + "github.com/google/go-cmp/cmp" "github.com/istio-ecosystem/admiral/admiral/pkg/test" + "github.com/stretchr/testify/assert" "google.golang.org/protobuf/testing/protocmp" v1alpha32 "istio.io/api/networking/v1alpha3" "istio.io/client-go/pkg/apis/networking/v1alpha3" + networking "istio.io/client-go/pkg/apis/networking/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/clientcmd" ) +func TestDestinationRuleAdded(t *testing.T) { + + mockDestinationRuleHandler := &test.MockDestinationRuleHandler{} + ctx := context.Background() + destinationRuleController := DestinationRuleController{ + DestinationRuleHandler: mockDestinationRuleHandler, + Cache: NewDestinationRuleCache(), + } + + testCases := []struct { + name string + destinationRule interface{} + expectedError error + }{ + { + name: "Given context and DestinationRule " + + "When DestinationRule param is nil " + + "Then func should return an error", + destinationRule: nil, + expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.DestinationRule"), + }, + { + name: "Given context and DestinationRule " + + "When DestinationRule param is not of type *v1alpha3.DestinationRule " + + "Then func should return an error", + destinationRule: struct{}{}, + expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.DestinationRule"), + }, + { + name: "Given context and DestinationRule " + + "When DestinationRule param is of type *v1alpha3.DestinationRule " + + "Then func should not return an error", + destinationRule: &networking.DestinationRule{}, + expectedError: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + err := destinationRuleController.Added(ctx, tc.destinationRule) + if tc.expectedError != nil { + assert.NotNil(t, err) + assert.Equal(t, tc.expectedError.Error(), err.Error()) + } else { + if err != nil { + assert.Fail(t, "expected error to be nil but got %v", err) + } + } + + }) + } + +} + +func TestDestinationRuleUpdated(t *testing.T) { + + mockDestinationRuleHandler := &test.MockDestinationRuleHandler{} + ctx := context.Background() + destinationRuleController := DestinationRuleController{ + DestinationRuleHandler: mockDestinationRuleHandler, + Cache: NewDestinationRuleCache(), + } + + testCases := []struct { + name string + destinationRule interface{} + expectedError error + }{ + { + name: "Given context and DestinationRule " + + "When DestinationRule param is nil " + + "Then func should return an error", + destinationRule: nil, + expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.DestinationRule"), + }, + { + name: "Given context and DestinationRule " + + "When DestinationRule param is not of type *v1alpha3.DestinationRule " + + "Then func should return an error", + destinationRule: struct{}{}, + expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.DestinationRule"), + }, + { + name: "Given context and DestinationRule " + + "When DestinationRule param is of type *v1alpha3.DestinationRule " + + "Then func should not return an error", + destinationRule: &networking.DestinationRule{}, + expectedError: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + err := destinationRuleController.Updated(ctx, tc.destinationRule, nil) + if tc.expectedError != nil { + assert.NotNil(t, err) + assert.Equal(t, tc.expectedError.Error(), err.Error()) + } else { + if err != nil { + assert.Fail(t, "expected error to be nil but got %v", err) + } + } + + }) + } + +} + +func TestDestinationRuleDeleted(t *testing.T) { + + mockDestinationRuleHandler := &test.MockDestinationRuleHandler{} + ctx := context.Background() + destinationRuleController := DestinationRuleController{ + DestinationRuleHandler: mockDestinationRuleHandler, + Cache: NewDestinationRuleCache(), + } + + testCases := []struct { + name string + destinationRule interface{} + expectedError error + }{ + { + name: "Given context and DestinationRule " + + "When DestinationRule param is nil " + + "Then func should return an error", + destinationRule: nil, + expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.DestinationRule"), + }, + { + name: "Given context and DestinationRule " + + "When DestinationRule param is not of type *v1alpha3.DestinationRule " + + "Then func should return an error", + destinationRule: struct{}{}, + expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.DestinationRule"), + }, + { + name: "Given context and DestinationRule " + + "When DestinationRule param is of type *v1alpha3.DestinationRule " + + "Then func should not return an error", + destinationRule: &networking.DestinationRule{}, + expectedError: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + err := destinationRuleController.Deleted(ctx, tc.destinationRule) + if tc.expectedError != nil { + assert.NotNil(t, err) + assert.Equal(t, tc.expectedError.Error(), err.Error()) + } else { + if err != nil { + assert.Fail(t, "expected error to be nil but got %v", err) + } + } + + }) + } + +} + func TestNewDestinationRuleController(t *testing.T) { config, err := clientcmd.BuildConfigFromFlags("", "../../test/resources/admins@fake-cluster.k8s.local") if err != nil { @@ -22,7 +197,7 @@ func TestNewDestinationRuleController(t *testing.T) { stop := make(chan struct{}) handler := test.MockDestinationRuleHandler{} - destinationRuleController, err := NewDestinationRuleController("", stop, &handler, config, time.Duration(1000)) + destinationRuleController, err := NewDestinationRuleController(stop, &handler, "cluster-id1", config, time.Duration(1000), loader.GetFakeClientLoader()) if err != nil { t.Errorf("Unexpected err %v", err) @@ -54,3 +229,166 @@ func TestNewDestinationRuleController(t *testing.T) { t.Errorf("Handler should have no obj") } } + +// TODO: This is just a placeholder for when we add diff check for other types +func TestDestinationRuleGetProcessItemStatus(t *testing.T) { + destinationRuleController := DestinationRuleController{} + testCases := []struct { + name string + obj interface{} + expectedRes string + }{ + { + name: "TODO: Currently always returns false", + obj: nil, + expectedRes: common.NotProcessed, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res, _ := destinationRuleController.GetProcessItemStatus(tc.obj) + assert.Equal(t, tc.expectedRes, res) + }) + } +} + +func TestDestinationRuleUpdateProcessItemStatus(t *testing.T) { + var ( + serviceAccount = &coreV1.ServiceAccount{} + + dr1 = &networking.DestinationRule{ + ObjectMeta: v1.ObjectMeta{ + Name: "debug-incache", + Namespace: "namespace1", + Annotations: map[string]string{"other-annotation": "value"}}} + + dr2 = &networking.DestinationRule{ + ObjectMeta: v1.ObjectMeta{ + Name: "debug2-incache", + Namespace: "namespace1", + Annotations: map[string]string{"other-annotation": "value"}}} + + drNotInCache = &networking.DestinationRule{ + ObjectMeta: v1.ObjectMeta{ + Name: "debug", + Namespace: "namespace1", + Annotations: map[string]string{"other-annotation": "value"}}} + + diffNsDrNotInCache = &networking.DestinationRule{ + ObjectMeta: v1.ObjectMeta{ + Name: "debug", + Namespace: "namespace2", + Annotations: map[string]string{"other-annotation": "value"}}} + ) + + drCache := &DestinationRuleCache{ + cache: make(map[string]*DestinationRuleItem), + mutex: &sync.RWMutex{}, + } + + destinationRuleController := &DestinationRuleController{ + Cache: drCache, + } + + drCache.Put(dr1) + drCache.Put(dr2) + + cases := []struct { + name string + obj interface{} + statusToSet string + expectedErr error + expectedStatus string + }{ + { + name: "Given dr cache has a valid dr in its cache, " + + "Then, the status for the valid dr should be updated to processed", + obj: dr1, + statusToSet: common.Processed, + expectedErr: nil, + expectedStatus: common.Processed, + }, + { + name: "Given dr cache has a valid dr in its cache, " + + "Then, the status for the valid dr should be updated to not processed", + obj: dr2, + statusToSet: common.NotProcessed, + expectedErr: nil, + expectedStatus: common.NotProcessed, + }, + { + name: "Given dr cache does not has a valid dr in its cache, " + + "Then, the status for the valid dr should be not processed, " + + "And an error should be returned with the dr not found message", + obj: drNotInCache, + statusToSet: common.NotProcessed, + expectedErr: fmt.Errorf("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "Update", "DestinationRule", + drNotInCache.Name, drNotInCache.Namespace, "", "nothing to update, destinationrule not found in cache"), + expectedStatus: common.NotProcessed, + }, + { + name: "Given dr cache does not has a valid dr in its cache, " + + "And dr is in a different namespace, " + + "Then, the status for the valid dr should be not processed, " + + "And an error should be returned with the dr not found message", + obj: diffNsDrNotInCache, + statusToSet: common.NotProcessed, + expectedErr: fmt.Errorf("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "Update", "DestinationRule", + diffNsDrNotInCache.Name, diffNsDrNotInCache.Namespace, "", "nothing to update, destinationrule not found in cache"), + expectedStatus: common.NotProcessed, + }, + { + name: "Given ServiceAccount is passed to the function, " + + "Then, the function should not panic, " + + "And return an error", + obj: serviceAccount, + expectedErr: fmt.Errorf("type assertion failed"), + expectedStatus: common.NotProcessed, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + err := destinationRuleController.UpdateProcessItemStatus(c.obj, c.statusToSet) + if err != nil && c.expectedErr == nil { + t.Errorf("unexpected error: %v", err) + } + if err == nil && c.expectedErr != nil { + t.Errorf("expected error: %v", c.expectedErr) + } + if err != nil && c.expectedErr != nil && !strings.Contains(err.Error(), c.expectedErr.Error()) { + t.Errorf("expected: %v, got: %v", c.expectedErr, err) + } + status, _ := destinationRuleController.GetProcessItemStatus(c.obj) + assert.Equal(t, c.expectedStatus, status) + }) + } +} + +func TestLogValueOfAdmiralIoIgnore(t *testing.T) { + // Test case 1: obj is not a DestinationRule object + sec := &DestinationRuleController{} + sec.LogValueOfAdmiralIoIgnore("not a destination rule") + // No error should occur + + // Test case 2: DestinationRule has no annotations + sec = &DestinationRuleController{} + sec.LogValueOfAdmiralIoIgnore(&networking.DestinationRule{}) + // No error should occur + + // Test case 3: AdmiralIgnoreAnnotation is not set + sec = &DestinationRuleController{} + dr := &networking.DestinationRule{ + ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{"other-annotation": "value"}}} + sec.LogValueOfAdmiralIoIgnore(dr) + // No error should occur + + // Test case 4: AdmiralIgnoreAnnotation is set + sec = &DestinationRuleController{} + dr = &networking.DestinationRule{ObjectMeta: v1.ObjectMeta{ + Annotations: map[string]string{common.AdmiralIgnoreAnnotation: "true"}}} + sec.LogValueOfAdmiralIoIgnore(dr) + // No error should occur +} diff --git a/admiral/pkg/controller/istio/serviceentry.go b/admiral/pkg/controller/istio/serviceentry.go index 2fd97375..7c2ab9ef 100644 --- a/admiral/pkg/controller/istio/serviceentry.go +++ b/admiral/pkg/controller/istio/serviceentry.go @@ -5,6 +5,12 @@ import ( "fmt" "time" + "sync" + + "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + log "github.com/sirupsen/logrus" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" networking "istio.io/client-go/pkg/apis/networking/v1alpha3" versioned "istio.io/client-go/pkg/clientset/versioned" @@ -16,30 +22,137 @@ import ( // Handler interface contains the methods that are required type ServiceEntryHandler interface { - Added(obj *networking.ServiceEntry) - Updated(obj *networking.ServiceEntry) - Deleted(obj *networking.ServiceEntry) -} - -type ServiceEntryEntry struct { - Identity string - ServiceEntry *networking.ServiceEntry + Added(obj *networking.ServiceEntry) error + Updated(obj *networking.ServiceEntry) error + Deleted(obj *networking.ServiceEntry) error } type ServiceEntryController struct { IstioClient versioned.Interface ServiceEntryHandler ServiceEntryHandler informer cache.SharedIndexInformer + Cache *ServiceEntryCache + Cluster string +} + +type ServiceEntryItem struct { + ServiceEntry *networking.ServiceEntry + Status string +} + +type ServiceEntryCache struct { + cache map[string]map[string]*ServiceEntryItem + mutex *sync.RWMutex +} + +func NewServiceEntryCache() *ServiceEntryCache { + return &ServiceEntryCache{ + cache: map[string]map[string]*ServiceEntryItem{}, + mutex: &sync.RWMutex{}, + } +} + +func (d *ServiceEntryCache) getKey(se *networking.ServiceEntry) string { + return se.Name +} + +func (d *ServiceEntryCache) Put(se *networking.ServiceEntry, cluster string) { + defer d.mutex.Unlock() + d.mutex.Lock() + key := d.getKey(se) + + var ( + seInCluster map[string]*ServiceEntryItem + ) + + if value, ok := d.cache[cluster]; !ok { + seInCluster = make(map[string]*ServiceEntryItem) + } else { + seInCluster = value + } + + seInCluster[key] = &ServiceEntryItem{ + ServiceEntry: se, + Status: common.ProcessingInProgress, + } + + d.cache[cluster] = seInCluster +} + +func (d *ServiceEntryCache) Get(identity string, cluster string) *networking.ServiceEntry { + defer d.mutex.Unlock() + d.mutex.Lock() + + seInCluster, ok := d.cache[cluster] + if ok { + se, ok := seInCluster[identity] + if ok { + return se.ServiceEntry + } + } + log.Infof("no service entry found in cache for identity=%s cluster=%s", identity, cluster) + return nil +} + +func (d *ServiceEntryCache) Delete(se *networking.ServiceEntry, cluster string) { + defer d.mutex.Unlock() + d.mutex.Lock() + + seInCluster, ok := d.cache[cluster] + if ok { + delete(seInCluster, d.getKey(se)) + } +} + +func (d *ServiceEntryCache) GetSEProcessStatus(se *networking.ServiceEntry, cluster string) string { + defer d.mutex.Unlock() + d.mutex.Lock() + + seInCluster, ok := d.cache[cluster] + if ok { + key := d.getKey(se) + sec, ok := seInCluster[key] + if ok { + return sec.Status + } + } + + return common.NotProcessed } -func NewServiceEntryController(clusterID string, stopCh <-chan struct{}, handler ServiceEntryHandler, config *rest.Config, resyncPeriod time.Duration) (*ServiceEntryController, error) { +func (d *ServiceEntryCache) UpdateSEProcessStatus(se *networking.ServiceEntry, cluster string, status string) error { + defer d.mutex.Unlock() + d.mutex.Lock() + + seInCluster, ok := d.cache[cluster] + if ok { + key := d.getKey(se) + sec, ok := seInCluster[key] + if ok { + sec.Status = status + seInCluster[key] = sec + return nil + } + } + + return fmt.Errorf("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "Update", "ServiceEntry", + se.Name, se.Namespace, "", "nothing to update, serviceentry not found in cache") +} +func NewServiceEntryController(stopCh <-chan struct{}, handler ServiceEntryHandler, clusterID string, config *rest.Config, resyncPeriod time.Duration, clientLoader loader.ClientLoader) (*ServiceEntryController, error) { seController := ServiceEntryController{} seController.ServiceEntryHandler = handler + seCache := ServiceEntryCache{} + seCache.cache = make(map[string]map[string]*ServiceEntryItem) + seCache.mutex = &sync.RWMutex{} + seController.Cache = &seCache + + seController.Cluster = clusterID + var err error - ic, err := versioned.NewForConfig(config) + ic, err := clientLoader.LoadIstioClientFromConfig(config) if err != nil { return nil, fmt.Errorf("failed to create service entry k8s client: %v", err) } @@ -48,23 +161,69 @@ func NewServiceEntryController(clusterID string, stopCh <-chan struct{}, handler seController.informer = informers.NewServiceEntryInformer(ic, k8sV1.NamespaceAll, resyncPeriod, cache.Indexers{}) - mcd := admiral.NewMonitoredDelegator(&seController, clusterID, "serviceentry") - admiral.NewController("serviceentry-ctrl-"+config.Host, stopCh, mcd, seController.informer) + admiral.NewController("serviceentry-ctrl", config.Host, stopCh, &seController, seController.informer) return &seController, nil } -func (sec *ServiceEntryController) Added(ctx context.Context, ojb interface{}) { - se := ojb.(*networking.ServiceEntry) - sec.ServiceEntryHandler.Added(se) +func (sec *ServiceEntryController) Added(ctx context.Context, obj interface{}) error { + se, ok := obj.(*networking.ServiceEntry) + if !ok { + return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.ServiceEntry", obj) + } + sec.Cache.Put(se, sec.Cluster) + return sec.ServiceEntryHandler.Added(se) +} + +func (sec *ServiceEntryController) Updated(ctx context.Context, obj interface{}, oldObj interface{}) error { + se, ok := obj.(*networking.ServiceEntry) + if !ok { + return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.ServiceEntry", obj) + } + sec.Cache.Put(se, sec.Cluster) + return sec.ServiceEntryHandler.Updated(se) +} + +func (sec *ServiceEntryController) Deleted(ctx context.Context, obj interface{}) error { + se, ok := obj.(*networking.ServiceEntry) + if !ok { + return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.ServiceEntry", obj) + } + + sec.Cache.Delete(se, sec.Cluster) + return sec.ServiceEntryHandler.Deleted(se) +} + +func (sec *ServiceEntryController) GetProcessItemStatus(obj interface{}) (string, error) { + se, ok := obj.(*networking.ServiceEntry) + if !ok { + return common.NotProcessed, fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.ServiceEntry", obj) + } + return sec.Cache.GetSEProcessStatus(se, sec.Cluster), nil } -func (sec *ServiceEntryController) Updated(ctx context.Context, ojb interface{}, oldObj interface{}) { - se := ojb.(*networking.ServiceEntry) - sec.ServiceEntryHandler.Updated(se) +func (sec *ServiceEntryController) UpdateProcessItemStatus(obj interface{}, status string) error { + se, ok := obj.(*networking.ServiceEntry) + if !ok { + return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.ServiceEntry", obj) + } + return sec.Cache.UpdateSEProcessStatus(se, sec.Cluster, status) +} + +func (sec *ServiceEntryController) LogValueOfAdmiralIoIgnore(obj interface{}) { + se, ok := obj.(*networking.ServiceEntry) + if !ok { + return + } + if len(se.Annotations) > 0 && se.Annotations[common.AdmiralIgnoreAnnotation] == "true" { + log.Infof("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "admiralIoIgnoreAnnotationCheck", "ServiceEntry", se.Name, se.Namespace, "", "Value=true") + } } -func (sec *ServiceEntryController) Deleted(ctx context.Context, ojb interface{}) { - se := ojb.(*networking.ServiceEntry) - sec.ServiceEntryHandler.Deleted(se) +func (sec *ServiceEntryController) Get(ctx context.Context, isRetry bool, obj interface{}) (interface{}, error) { + /*se, ok := obj.(*networking.ServiceEntry) + if ok && sec.IstioClient != nil { + return sec.IstioClient.NetworkingV1alpha3().ServiceEntries(se.Namespace).Get(ctx, se.Name, meta_v1.GetOptions{}) + }*/ + return nil, fmt.Errorf("istio client is not initialized, txId=%s", ctx.Value("txId")) } diff --git a/admiral/pkg/controller/istio/serviceentry_test.go b/admiral/pkg/controller/istio/serviceentry_test.go index 31b0ba20..bc56e387 100644 --- a/admiral/pkg/controller/istio/serviceentry_test.go +++ b/admiral/pkg/controller/istio/serviceentry_test.go @@ -2,18 +2,204 @@ package istio import ( "context" + "fmt" + "strings" + "sync" "testing" "time" + coreV1 "k8s.io/api/core/v1" + + "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + "github.com/google/go-cmp/cmp" "github.com/istio-ecosystem/admiral/admiral/pkg/test" + "github.com/stretchr/testify/assert" "google.golang.org/protobuf/testing/protocmp" "istio.io/api/networking/v1alpha3" + networking "istio.io/client-go/pkg/apis/networking/v1alpha3" v1alpha32 "istio.io/client-go/pkg/apis/networking/v1alpha3" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/clientcmd" ) +func TestServiceEntryAdded(t *testing.T) { + + mockServiceEntryHandler := &test.MockServiceEntryHandler{} + ctx := context.Background() + serviceEntryController := ServiceEntryController{ + ServiceEntryHandler: mockServiceEntryHandler, + Cluster: "testCluster", + Cache: &ServiceEntryCache{ + cache: map[string]map[string]*ServiceEntryItem{}, + mutex: &sync.RWMutex{}, + }, + } + + testCases := []struct { + name string + serviceEntry interface{} + expectedError error + }{ + { + name: "Given context and ServiceEntry " + + "When ServiceEntry param is nil " + + "Then func should return an error", + serviceEntry: nil, + expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.ServiceEntry"), + }, + { + name: "Given context and ServiceEntry " + + "When ServiceEntry param is not of type *v1alpha3.ServiceEntry " + + "Then func should return an error", + serviceEntry: struct{}{}, + expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.ServiceEntry"), + }, + { + name: "Given context and ServiceEntry " + + "When ServiceEntry param is of type *v1alpha3.ServiceEntry " + + "Then func should not return an error", + serviceEntry: &networking.ServiceEntry{}, + expectedError: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := serviceEntryController.Added(ctx, tc.serviceEntry) + if tc.expectedError != nil { + assert.NotNil(t, err) + assert.Equal(t, tc.expectedError.Error(), err.Error()) + } else { + if err != nil { + assert.Fail(t, "expected error to be nil but got %v", err) + } + } + + }) + } + +} + +func TestServiceEntryUpdated(t *testing.T) { + + mockServiceEntryHandler := &test.MockServiceEntryHandler{} + ctx := context.Background() + serviceEntryController := ServiceEntryController{ + ServiceEntryHandler: mockServiceEntryHandler, + Cluster: "testCluster", + Cache: &ServiceEntryCache{ + cache: map[string]map[string]*ServiceEntryItem{}, + mutex: &sync.RWMutex{}, + }, + } + + testCases := []struct { + name string + serviceEntry interface{} + expectedError error + }{ + { + name: "Given context and ServiceEntry " + + "When ServiceEntry param is nil " + + "Then func should return an error", + serviceEntry: nil, + expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.ServiceEntry"), + }, + { + name: "Given context and ServiceEntry " + + "When ServiceEntry param is not of type *v1alpha3.ServiceEntry " + + "Then func should return an error", + serviceEntry: struct{}{}, + expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.ServiceEntry"), + }, + { + name: "Given context and ServiceEntry " + + "When ServiceEntry param is of type *v1alpha3.ServiceEntry " + + "Then func should not return an error", + serviceEntry: &networking.ServiceEntry{}, + expectedError: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + err := serviceEntryController.Updated(ctx, tc.serviceEntry, nil) + if tc.expectedError != nil { + assert.NotNil(t, err) + assert.Equal(t, tc.expectedError.Error(), err.Error()) + } else { + if err != nil { + assert.Fail(t, "expected error to be nil but got %v", err) + } + } + + }) + } + +} + +func TestServiceEntryDeleted(t *testing.T) { + + mockServiceEntryHandler := &test.MockServiceEntryHandler{} + ctx := context.Background() + serviceEntryController := ServiceEntryController{ + ServiceEntryHandler: mockServiceEntryHandler, + Cluster: "testCluster", + Cache: &ServiceEntryCache{ + cache: map[string]map[string]*ServiceEntryItem{}, + mutex: &sync.RWMutex{}, + }, + } + + testCases := []struct { + name string + serviceEntry interface{} + expectedError error + }{ + { + name: "Given context and ServiceEntry " + + "When ServiceEntry param is nil " + + "Then func should return an error", + serviceEntry: nil, + expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.ServiceEntry"), + }, + { + name: "Given context and ServiceEntry " + + "When ServiceEntry param is not of type *v1alpha3.ServiceEntry " + + "Then func should return an error", + serviceEntry: struct{}{}, + expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.ServiceEntry"), + }, + { + name: "Given context and ServiceEntry " + + "When ServiceEntry param is of type *v1alpha3.ServiceEntry " + + "Then func should not return an error", + serviceEntry: &networking.ServiceEntry{}, + expectedError: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + err := serviceEntryController.Deleted(ctx, tc.serviceEntry) + if tc.expectedError != nil { + assert.NotNil(t, err) + assert.Equal(t, tc.expectedError.Error(), err.Error()) + } else { + if err != nil { + assert.Fail(t, "expected error to be nil but got %v", err) + } + } + + }) + } + +} + func TestNewServiceEntryController(t *testing.T) { config, err := clientcmd.BuildConfigFromFlags("", "../../test/resources/admins@fake-cluster.k8s.local") if err != nil { @@ -22,7 +208,7 @@ func TestNewServiceEntryController(t *testing.T) { stop := make(chan struct{}) handler := test.MockServiceEntryHandler{} - serviceEntryController, err := NewServiceEntryController("test", stop, &handler, config, time.Duration(1000)) + serviceEntryController, err := NewServiceEntryController(stop, &handler, "testCluster", config, time.Duration(1000), loader.GetFakeClientLoader()) if err != nil { t.Errorf("Unexpected err %v", err) @@ -55,3 +241,164 @@ func TestNewServiceEntryController(t *testing.T) { t.Errorf("Handler should have no obj") } } + +// TODO: This is just a placeholder for when we add diff check for other types +func TestServiceEntryGetProcessItemStatus(t *testing.T) { + serviceEntryController := ServiceEntryController{} + testCases := []struct { + name string + obj interface{} + expectedRes string + }{ + { + name: "TODO: Currently always returns false", + obj: nil, + expectedRes: common.NotProcessed, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res, _ := serviceEntryController.GetProcessItemStatus(tc.obj) + assert.Equal(t, tc.expectedRes, res) + }) + } +} + +func TestServiceEntryUpdateProcessItemStatus(t *testing.T) { + var ( + serviceAccount = &coreV1.ServiceAccount{} + + se1 = &networking.ServiceEntry{ + ObjectMeta: v1.ObjectMeta{ + Name: "debug-incache", + Namespace: "namespace1", + Annotations: map[string]string{"other-annotation": "value"}}} + + se2 = &networking.ServiceEntry{ + ObjectMeta: v1.ObjectMeta{ + Name: "debug2-incache", + Namespace: "namespace1", + Annotations: map[string]string{"other-annotation": "value"}}} + + seNotInCache = &networking.ServiceEntry{ + ObjectMeta: v1.ObjectMeta{ + Name: "debug", + Namespace: "namespace1", + Annotations: map[string]string{"other-annotation": "value"}}} + + diffNsSeNotInCache = &networking.ServiceEntry{ + ObjectMeta: v1.ObjectMeta{ + Name: "debug", + Namespace: "namespace2", + Annotations: map[string]string{"other-annotation": "value"}}} + ) + + seCache := &ServiceEntryCache{ + cache: make(map[string]map[string]*ServiceEntryItem), + mutex: &sync.RWMutex{}, + } + + serviceentryController := &ServiceEntryController{ + Cluster: "cluster1", + Cache: seCache, + } + + seCache.Put(se1, "cluster1") + seCache.Put(se2, "cluster1") + + cases := []struct { + name string + obj interface{} + statusToSet string + expectedErr error + expectedStatus string + }{ + { + name: "Given se cache has a valid se in its cache, " + + "Then, the status for the valid se should be updated to processed", + obj: se1, + statusToSet: common.Processed, + expectedErr: nil, + expectedStatus: common.Processed, + }, + { + name: "Given se cache has a valid se in its cache, " + + "Then, the status for the valid se should be updated to not processed", + obj: se2, + statusToSet: common.NotProcessed, + expectedErr: nil, + expectedStatus: common.NotProcessed, + }, + { + name: "Given se cache does not has a valid se in its cache, " + + "Then, the status for the valid se should be not processed, " + + "And an error should be returned with the se not found message", + obj: seNotInCache, + statusToSet: common.NotProcessed, + expectedErr: fmt.Errorf("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "Update", "ServiceEntry", + seNotInCache.Name, seNotInCache.Namespace, "", "nothing to update, serviceentry not found in cache"), + expectedStatus: common.NotProcessed, + }, + { + name: "Given se cache does not has a valid se in its cache, " + + "And se is in a different namespace, " + + "Then, the status for the valid se should be not processed, " + + "And an error should be returned with the se not found message", + obj: diffNsSeNotInCache, + statusToSet: common.NotProcessed, + expectedErr: fmt.Errorf("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "Update", "ServiceEntry", + diffNsSeNotInCache.Name, diffNsSeNotInCache.Namespace, "", "nothing to update, serviceentry not found in cache"), + expectedStatus: common.NotProcessed, + }, + { + name: "Given ServiceAccount is passed to the function, " + + "Then, the function should not panic, " + + "And return an error", + obj: serviceAccount, + expectedErr: fmt.Errorf("type assertion failed"), + expectedStatus: common.NotProcessed, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + err := serviceentryController.UpdateProcessItemStatus(c.obj, c.statusToSet) + if err != nil && c.expectedErr == nil { + t.Errorf("unexpected error: %v", err) + } + if err == nil && c.expectedErr != nil { + t.Errorf("expected error: %v", c.expectedErr) + } + if err != nil && c.expectedErr != nil && !strings.Contains(err.Error(), c.expectedErr.Error()) { + t.Errorf("expected: %v, got: %v", c.expectedErr, err) + } + status, _ := serviceentryController.GetProcessItemStatus(c.obj) + assert.Equal(t, c.expectedStatus, status) + }) + } +} + +func TestServiceEntryLogValueOfAdmiralIoIgnore(t *testing.T) { + // Test case 1: obj is not a ServiceEntry object + sec := &ServiceEntryController{} + sec.LogValueOfAdmiralIoIgnore("not a service entry") + // No error should occur + + // Test case 2: ServiceEntry has no annotations + sec = &ServiceEntryController{} + sec.LogValueOfAdmiralIoIgnore(&networking.ServiceEntry{}) + // No error should occur + + // Test case 3: AdmiralIgnoreAnnotation is not set + sec = &ServiceEntryController{} + se := &networking.ServiceEntry{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{"other-annotation": "value"}}} + sec.LogValueOfAdmiralIoIgnore(se) + // No error should occur + + // Test case 4: AdmiralIgnoreAnnotation is set in annotations + sec = &ServiceEntryController{} + se = &networking.ServiceEntry{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.AdmiralIgnoreAnnotation: "true"}}} + sec.LogValueOfAdmiralIoIgnore(se) + // No error should occur +} diff --git a/admiral/pkg/controller/istio/sidecar.go b/admiral/pkg/controller/istio/sidecar.go index 9a59a112..12b8a1eb 100644 --- a/admiral/pkg/controller/istio/sidecar.go +++ b/admiral/pkg/controller/istio/sidecar.go @@ -5,6 +5,9 @@ import ( "fmt" "time" + "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" networking "istio.io/client-go/pkg/apis/networking/v1alpha3" versioned "istio.io/client-go/pkg/clientset/versioned" @@ -16,9 +19,9 @@ import ( // SidecarHandler interface contains the methods that are required type SidecarHandler interface { - Added(ctx context.Context, obj *networking.Sidecar) - Updated(ctx context.Context, obj *networking.Sidecar) - Deleted(ctx context.Context, obj *networking.Sidecar) + Added(ctx context.Context, obj *networking.Sidecar) error + Updated(ctx context.Context, obj *networking.Sidecar) error + Deleted(ctx context.Context, obj *networking.Sidecar) error } type SidecarEntry struct { @@ -32,14 +35,14 @@ type SidecarController struct { informer cache.SharedIndexInformer } -func NewSidecarController(clusterID string, stopCh <-chan struct{}, handler SidecarHandler, config *rest.Config, resyncPeriod time.Duration) (*SidecarController, error) { +func NewSidecarController(stopCh <-chan struct{}, handler SidecarHandler, config *rest.Config, resyncPeriod time.Duration, clientLoader loader.ClientLoader) (*SidecarController, error) { sidecarController := SidecarController{} sidecarController.SidecarHandler = handler var err error - ic, err := versioned.NewForConfig(config) + ic, err := clientLoader.LoadIstioClientFromConfig(config) if err != nil { return nil, fmt.Errorf("failed to create sidecar controller k8s client: %v", err) } @@ -48,24 +51,50 @@ func NewSidecarController(clusterID string, stopCh <-chan struct{}, handler Side sidecarController.informer = informers.NewSidecarInformer(ic, k8sV1.NamespaceAll, resyncPeriod, cache.Indexers{}) - mcd := admiral.NewMonitoredDelegator(&sidecarController, clusterID, "sidecar") - admiral.NewController("sidecar-ctrl-"+config.Host, stopCh, mcd, sidecarController.informer) + admiral.NewController("sidecar-ctrl", config.Host, stopCh, &sidecarController, sidecarController.informer) return &sidecarController, nil } -func (sec *SidecarController) Added(ctx context.Context, ojb interface{}) { - sidecar := ojb.(*networking.Sidecar) - sec.SidecarHandler.Added(ctx, sidecar) +func (sec *SidecarController) Added(ctx context.Context, obj interface{}) error { + sidecar, ok := obj.(*networking.Sidecar) + if !ok { + return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.Sidecar", obj) + } + return sec.SidecarHandler.Added(ctx, sidecar) +} + +func (sec *SidecarController) Updated(ctx context.Context, obj interface{}, oldObj interface{}) error { + sidecar, ok := obj.(*networking.Sidecar) + if !ok { + return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.Sidecar", obj) + } + return sec.SidecarHandler.Updated(ctx, sidecar) } -func (sec *SidecarController) Updated(ctx context.Context, ojb interface{}, oldObj interface{}) { - sidecar := ojb.(*networking.Sidecar) - sec.SidecarHandler.Updated(ctx, sidecar) +func (sec *SidecarController) Deleted(ctx context.Context, obj interface{}) error { + sidecar, ok := obj.(*networking.Sidecar) + if !ok { + return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.Sidecar", obj) + } + return sec.SidecarHandler.Deleted(ctx, sidecar) } -func (sec *SidecarController) Deleted(ctx context.Context, ojb interface{}) { - sidecar := ojb.(*networking.Sidecar) - sec.SidecarHandler.Deleted(ctx, sidecar) +func (sec *SidecarController) GetProcessItemStatus(obj interface{}) (string, error) { + return common.NotProcessed, nil +} + +func (sec *SidecarController) UpdateProcessItemStatus(obj interface{}, status string) error { + return nil +} + +func (sec *SidecarController) LogValueOfAdmiralIoIgnore(obj interface{}) { +} +func (sec *SidecarController) Get(ctx context.Context, isRetry bool, obj interface{}) (interface{}, error) { + /*sidecar, ok := obj.(*networking.Sidecar) + if ok && sec.IstioClient != nil { + return sec.IstioClient.NetworkingV1alpha3().Sidecars(sidecar.Namespace).Get(ctx, sidecar.Name, meta_v1.GetOptions{}) + }*/ + return nil, fmt.Errorf("istio client is not initialized, txId=%s", ctx.Value("txId")) } diff --git a/admiral/pkg/controller/istio/sidecar_test.go b/admiral/pkg/controller/istio/sidecar_test.go index 9b2a95a3..45ec3f95 100644 --- a/admiral/pkg/controller/istio/sidecar_test.go +++ b/admiral/pkg/controller/istio/sidecar_test.go @@ -2,11 +2,16 @@ package istio import ( "context" + "fmt" "testing" "time" + "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + "github.com/google/go-cmp/cmp" "github.com/istio-ecosystem/admiral/admiral/pkg/test" + "github.com/stretchr/testify/assert" "google.golang.org/protobuf/testing/protocmp" v1alpha32 "istio.io/api/networking/v1alpha3" "istio.io/client-go/pkg/apis/networking/v1alpha3" @@ -14,6 +19,168 @@ import ( "k8s.io/client-go/tools/clientcmd" ) +func TestSidecarAdded(t *testing.T) { + + mockSidecarHandler := &test.MockSidecarHandler{} + ctx := context.Background() + sidecarController := SidecarController{ + SidecarHandler: mockSidecarHandler, + } + + testCases := []struct { + name string + sidecar interface{} + expectedError error + }{ + { + name: "Given context and sidecar " + + "When sidecar param is nil " + + "Then func should return an error", + sidecar: nil, + expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.Sidecar"), + }, + { + name: "Given context and sidecar " + + "When sidecar param is not of type *v1alpha3.Sidecar " + + "Then func should return an error", + sidecar: struct{}{}, + expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.Sidecar"), + }, + { + name: "Given context and Sidecar " + + "When Sidecar param is of type *v1alpha3.Sidecar " + + "Then func should not return an error", + sidecar: &v1alpha3.Sidecar{}, + expectedError: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + err := sidecarController.Added(ctx, tc.sidecar) + if tc.expectedError != nil { + assert.NotNil(t, err) + assert.Equal(t, tc.expectedError.Error(), err.Error()) + } else { + if err != nil { + assert.Fail(t, "expected error to be nil but got %v", err) + } + } + + }) + } + +} + +func TestSidecarUpdated(t *testing.T) { + + mockSidecarHandler := &test.MockSidecarHandler{} + ctx := context.Background() + sidecarController := SidecarController{ + SidecarHandler: mockSidecarHandler, + } + + testCases := []struct { + name string + sidecar interface{} + expectedError error + }{ + { + name: "Given context and sidecar " + + "When sidecar param is nil " + + "Then func should return an error", + sidecar: nil, + expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.Sidecar"), + }, + { + name: "Given context and sidecar " + + "When sidecar param is not of type *v1alpha3.Sidecar " + + "Then func should return an error", + sidecar: struct{}{}, + expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.Sidecar"), + }, + { + name: "Given context and Sidecar " + + "When Sidecar param is of type *v1alpha3.Sidecar " + + "Then func should not return an error", + sidecar: &v1alpha3.Sidecar{}, + expectedError: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + err := sidecarController.Updated(ctx, tc.sidecar, nil) + if tc.expectedError != nil { + assert.NotNil(t, err) + assert.Equal(t, tc.expectedError.Error(), err.Error()) + } else { + if err != nil { + assert.Fail(t, "expected error to be nil but got %v", err) + } + } + + }) + } + +} + +func TestSidecarDeleted(t *testing.T) { + + mockSidecarHandler := &test.MockSidecarHandler{} + ctx := context.Background() + sidecarController := SidecarController{ + SidecarHandler: mockSidecarHandler, + } + + testCases := []struct { + name string + sidecar interface{} + expectedError error + }{ + { + name: "Given context and sidecar " + + "When sidecar param is nil " + + "Then func should return an error", + sidecar: nil, + expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.Sidecar"), + }, + { + name: "Given context and sidecar " + + "When sidecar param is not of type *v1alpha3.Sidecar " + + "Then func should return an error", + sidecar: struct{}{}, + expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.Sidecar"), + }, + { + name: "Given context and Sidecar " + + "When Sidecar param is of type *v1alpha3.Sidecar " + + "Then func should not return an error", + sidecar: &v1alpha3.Sidecar{}, + expectedError: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + err := sidecarController.Deleted(ctx, tc.sidecar) + if tc.expectedError != nil { + assert.NotNil(t, err) + assert.Equal(t, tc.expectedError.Error(), err.Error()) + } else { + if err != nil { + assert.Fail(t, "expected error to be nil but got %v", err) + } + } + + }) + } + +} + func TestNewSidecarController(t *testing.T) { config, err := clientcmd.BuildConfigFromFlags("", "../../test/resources/admins@fake-cluster.k8s.local") if err != nil { @@ -22,7 +189,7 @@ func TestNewSidecarController(t *testing.T) { stop := make(chan struct{}) handler := test.MockSidecarHandler{} - sidecarController, err := NewSidecarController("", stop, &handler, config, time.Duration(1000)) + sidecarController, err := NewSidecarController(stop, &handler, config, time.Duration(1000), loader.GetFakeClientLoader()) if err != nil { t.Errorf("Unexpected err %v", err) @@ -55,3 +222,49 @@ func TestNewSidecarController(t *testing.T) { t.Errorf("Handler should have no obj") } } + +// TODO: This is just a placeholder for when we add diff check for other types +func TestSideCarGetProcessItemStatus(t *testing.T) { + sidecarController := SidecarController{} + testCases := []struct { + name string + obj interface{} + expectedRes string + }{ + { + name: "TODO: Currently always returns false", + obj: nil, + expectedRes: common.NotProcessed, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res, _ := sidecarController.GetProcessItemStatus(tc.obj) + assert.Equal(t, tc.expectedRes, res) + }) + } +} + +// TODO: This is just a placeholder for when we add diff check for other types +func TestSideCarUpdateProcessItemStatus(t *testing.T) { + sidecarController := SidecarController{} + testCases := []struct { + name string + obj interface{} + expectedErr error + }{ + { + name: "TODO: Currently always returns nil", + obj: nil, + expectedErr: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := sidecarController.UpdateProcessItemStatus(tc.obj, common.NotProcessed) + assert.Equal(t, tc.expectedErr, err) + }) + } +} diff --git a/admiral/pkg/controller/istio/virtualservice.go b/admiral/pkg/controller/istio/virtualservice.go index 6914f049..0f98880c 100644 --- a/admiral/pkg/controller/istio/virtualservice.go +++ b/admiral/pkg/controller/istio/virtualservice.go @@ -5,6 +5,10 @@ import ( "fmt" "time" + "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + log "github.com/sirupsen/logrus" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" networking "istio.io/client-go/pkg/apis/networking/v1alpha3" "istio.io/client-go/pkg/clientset/versioned" @@ -16,9 +20,9 @@ import ( // VirtualServiceHandler interface contains the methods that are required type VirtualServiceHandler interface { - Added(ctx context.Context, obj *networking.VirtualService) - Updated(ctx context.Context, obj *networking.VirtualService) - Deleted(ctx context.Context, obj *networking.VirtualService) + Added(ctx context.Context, obj *networking.VirtualService) error + Updated(ctx context.Context, obj *networking.VirtualService) error + Deleted(ctx context.Context, obj *networking.VirtualService) error } type VirtualServiceController struct { @@ -27,40 +31,72 @@ type VirtualServiceController struct { informer cache.SharedIndexInformer } -func NewVirtualServiceController(clusterID string, stopCh <-chan struct{}, handler VirtualServiceHandler, config *rest.Config, resyncPeriod time.Duration) (*VirtualServiceController, error) { +func NewVirtualServiceController(stopCh <-chan struct{}, handler VirtualServiceHandler, config *rest.Config, resyncPeriod time.Duration, clientLoader loader.ClientLoader) (*VirtualServiceController, error) { - drController := VirtualServiceController{} - drController.VirtualServiceHandler = handler + vsController := VirtualServiceController{} + vsController.VirtualServiceHandler = handler var err error - ic, err := versioned.NewForConfig(config) + ic, err := clientLoader.LoadIstioClientFromConfig(config) if err != nil { return nil, fmt.Errorf("failed to create virtual service controller k8s client: %v", err) } - drController.IstioClient = ic + vsController.IstioClient = ic + vsController.informer = informers.NewVirtualServiceInformer(ic, k8sV1.NamespaceAll, resyncPeriod, cache.Indexers{}) + + admiral.NewController("virtualservice-ctrl", config.Host, stopCh, &vsController, vsController.informer) + + return &vsController, nil +} - drController.informer = informers.NewVirtualServiceInformer(ic, k8sV1.NamespaceAll, resyncPeriod, cache.Indexers{}) +func (sec *VirtualServiceController) Added(ctx context.Context, obj interface{}) error { + dr, ok := obj.(*networking.VirtualService) + if !ok { + return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.VirtualService", obj) + } + return sec.VirtualServiceHandler.Added(ctx, dr) +} - mcd := admiral.NewMonitoredDelegator(&drController, clusterID, "virtualservice") - admiral.NewController("virtualservice-ctrl-"+config.Host, stopCh, mcd, drController.informer) +func (sec *VirtualServiceController) Updated(ctx context.Context, obj interface{}, oldObj interface{}) error { + dr, ok := obj.(*networking.VirtualService) + if !ok { + return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.VirtualService", obj) + } + return sec.VirtualServiceHandler.Updated(ctx, dr) +} - return &drController, nil +func (sec *VirtualServiceController) Deleted(ctx context.Context, obj interface{}) error { + dr, ok := obj.(*networking.VirtualService) + if !ok { + return fmt.Errorf("type assertion failed, %v is not of type *v1alpha3.VirtualService", obj) + } + return sec.VirtualServiceHandler.Deleted(ctx, dr) } -func (sec *VirtualServiceController) Added(ctx context.Context, ojb interface{}) { - dr := ojb.(*networking.VirtualService) - sec.VirtualServiceHandler.Added(ctx, dr) +func (sec *VirtualServiceController) GetProcessItemStatus(obj interface{}) (string, error) { + return common.NotProcessed, nil } -func (sec *VirtualServiceController) Updated(ctx context.Context, ojb interface{}, oldObj interface{}) { - dr := ojb.(*networking.VirtualService) - sec.VirtualServiceHandler.Updated(ctx, dr) +func (sec *VirtualServiceController) UpdateProcessItemStatus(obj interface{}, status string) error { + return nil } -func (sec *VirtualServiceController) Deleted(ctx context.Context, ojb interface{}) { - dr := ojb.(*networking.VirtualService) - sec.VirtualServiceHandler.Deleted(ctx, dr) +func (sec *VirtualServiceController) LogValueOfAdmiralIoIgnore(obj interface{}) { + vs, ok := obj.(*networking.VirtualService) + if !ok { + return + } + if len(vs.Annotations) > 0 && vs.Annotations[common.AdmiralIgnoreAnnotation] == "true" { + log.Infof("op=%s type=%v name=%v namespace=%s cluster=%s message=%s", "admiralIoIgnoreAnnotationCheck", "VirtualService", vs.Name, vs.Namespace, "", "Value=true") + } +} +func (sec *VirtualServiceController) Get(ctx context.Context, isRetry bool, obj interface{}) (interface{}, error) { + /*vs, ok := obj.(*networking.VirtualService) + if ok && sec.IstioClient != nil { + return sec.IstioClient.NetworkingV1alpha3().VirtualServices(vs.Namespace).Get(ctx, vs.Name, meta_v1.GetOptions{}) + }*/ + return nil, fmt.Errorf("istio client is not initialized, txId=%s", ctx.Value("txId")) } diff --git a/admiral/pkg/controller/istio/virtualservice_test.go b/admiral/pkg/controller/istio/virtualservice_test.go index b905cafa..5a30616e 100644 --- a/admiral/pkg/controller/istio/virtualservice_test.go +++ b/admiral/pkg/controller/istio/virtualservice_test.go @@ -2,11 +2,16 @@ package istio import ( "context" + "fmt" "testing" "time" + "github.com/istio-ecosystem/admiral/admiral/pkg/client/loader" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + "github.com/google/go-cmp/cmp" "github.com/istio-ecosystem/admiral/admiral/pkg/test" + "github.com/stretchr/testify/assert" "google.golang.org/protobuf/testing/protocmp" v1alpha32 "istio.io/api/networking/v1alpha3" "istio.io/client-go/pkg/apis/networking/v1alpha3" @@ -14,6 +19,168 @@ import ( "k8s.io/client-go/tools/clientcmd" ) +func TestAdded(t *testing.T) { + + mockVirtualServiceHandler := &test.MockVirtualServiceHandler{} + ctx := context.Background() + virtualServiceController := VirtualServiceController{ + VirtualServiceHandler: mockVirtualServiceHandler, + } + + testCases := []struct { + name string + virtualService interface{} + expectedError error + }{ + { + name: "Given context and virtualService " + + "When virtualservice param is nil " + + "Then func should return an error", + virtualService: nil, + expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.VirtualService"), + }, + { + name: "Given context and virtualService " + + "When virtualservice param is not of type *v1alpha3.VirtualService " + + "Then func should return an error", + virtualService: struct{}{}, + expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.VirtualService"), + }, + { + name: "Given context and virtualService " + + "When virtualservice param is of type *v1alpha3.VirtualService " + + "Then func should not return an error", + virtualService: &v1alpha3.VirtualService{}, + expectedError: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + err := virtualServiceController.Added(ctx, tc.virtualService) + if tc.expectedError != nil { + assert.NotNil(t, err) + assert.Equal(t, tc.expectedError.Error(), err.Error()) + } else { + if err != nil { + assert.Fail(t, "expected error to be nil but got %v", err) + } + } + + }) + } + +} + +func TestUpdated(t *testing.T) { + + mockVirtualServiceHandler := &test.MockVirtualServiceHandler{} + ctx := context.Background() + virtualServiceController := VirtualServiceController{ + VirtualServiceHandler: mockVirtualServiceHandler, + } + + testCases := []struct { + name string + virtualService interface{} + expectedError error + }{ + { + name: "Given context and virtualService " + + "When virtualservice param is nil " + + "Then func should return an error", + virtualService: nil, + expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.VirtualService"), + }, + { + name: "Given context and virtualService " + + "When virtualservice param is not of type *v1alpha3.VirtualService " + + "Then func should return an error", + virtualService: struct{}{}, + expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.VirtualService"), + }, + { + name: "Given context and virtualService " + + "When virtualservice param is of type *v1alpha3.VirtualService " + + "Then func should not return an error", + virtualService: &v1alpha3.VirtualService{}, + expectedError: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + err := virtualServiceController.Updated(ctx, tc.virtualService, nil) + if tc.expectedError != nil { + assert.NotNil(t, err) + assert.Equal(t, tc.expectedError.Error(), err.Error()) + } else { + if err != nil { + assert.Fail(t, "expected error to be nil but got %v", err) + } + } + + }) + } + +} + +func TestDeleted(t *testing.T) { + + mockVirtualServiceHandler := &test.MockVirtualServiceHandler{} + ctx := context.Background() + virtualServiceController := VirtualServiceController{ + VirtualServiceHandler: mockVirtualServiceHandler, + } + + testCases := []struct { + name string + virtualService interface{} + expectedError error + }{ + { + name: "Given context and virtualService " + + "When virtualservice param is nil " + + "Then func should return an error", + virtualService: nil, + expectedError: fmt.Errorf("type assertion failed, is not of type *v1alpha3.VirtualService"), + }, + { + name: "Given context and virtualService " + + "When virtualservice param is not of type *v1alpha3.VirtualService " + + "Then func should return an error", + virtualService: struct{}{}, + expectedError: fmt.Errorf("type assertion failed, {} is not of type *v1alpha3.VirtualService"), + }, + { + name: "Given context and virtualService " + + "When virtualservice param is of type *v1alpha3.VirtualService " + + "Then func should not return an error", + virtualService: &v1alpha3.VirtualService{}, + expectedError: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + err := virtualServiceController.Deleted(ctx, tc.virtualService) + if tc.expectedError != nil { + assert.NotNil(t, err) + assert.Equal(t, tc.expectedError.Error(), err.Error()) + } else { + if err != nil { + assert.Fail(t, "expected error to be nil but got %v", err) + } + } + + }) + } + +} + func TestNewVirtualServiceController(t *testing.T) { config, err := clientcmd.BuildConfigFromFlags("", "../../test/resources/admins@fake-cluster.k8s.local") if err != nil { @@ -22,7 +189,7 @@ func TestNewVirtualServiceController(t *testing.T) { stop := make(chan struct{}) handler := test.MockVirtualServiceHandler{} - virtualServiceController, err := NewVirtualServiceController("", stop, &handler, config, time.Duration(1000)) + virtualServiceController, err := NewVirtualServiceController(stop, &handler, config, time.Duration(1000), loader.GetFakeClientLoader()) if err != nil { t.Errorf("Unexpected err %v", err) @@ -56,3 +223,73 @@ func TestNewVirtualServiceController(t *testing.T) { t.Errorf("Handler should have no obj") } } + +// TODO: This is just a placeholder for when we add diff check for other types +func TestVirtualServiceGetProcessItemStatus(t *testing.T) { + virtualServiceController := VirtualServiceController{} + testCases := []struct { + name string + obj interface{} + expectedRes string + }{ + { + name: "TODO: Currently always returns false", + obj: nil, + expectedRes: common.NotProcessed, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + res, _ := virtualServiceController.GetProcessItemStatus(tc.obj) + assert.Equal(t, tc.expectedRes, res) + }) + } +} + +// TODO: This is just a placeholder for when we add diff check for other types +func TestVirtualServiceUpdateProcessItemStatus(t *testing.T) { + virtualServiceController := VirtualServiceController{} + testCases := []struct { + name string + obj interface{} + expectedErr error + }{ + { + name: "TODO: Currently always returns nil", + obj: nil, + expectedErr: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := virtualServiceController.UpdateProcessItemStatus(tc.obj, common.NotProcessed) + assert.Equal(t, tc.expectedErr, err) + }) + } +} + +func TestVirtualServiceLogValueOfAdmiralIoIgnore(t *testing.T) { + // Test case 1: obj is not a VirtualService object + sec := &VirtualServiceController{} + sec.LogValueOfAdmiralIoIgnore("not a virtual service") + // No error should occur + + // Test case 2: VirtualService has no annotations + sec = &VirtualServiceController{} + sec.LogValueOfAdmiralIoIgnore(&v1alpha3.VirtualService{}) + // No error should occur + + // Test case 3: AdmiralIgnoreAnnotation is not set + sec = &VirtualServiceController{} + vs := &v1alpha3.VirtualService{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{"other-annotation": "value"}}} + sec.LogValueOfAdmiralIoIgnore(vs) + // No error should occur + + // Test case 4: AdmiralIgnoreAnnotation is set in annotations + sec = &VirtualServiceController{} + vs = &v1alpha3.VirtualService{ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.AdmiralIgnoreAnnotation: "true"}}} + sec.LogValueOfAdmiralIoIgnore(vs) + // No error should occur +} diff --git a/admiral/pkg/controller/secret/resolver/defaultresolver_test.go b/admiral/pkg/controller/secret/resolver/defaultresolver_test.go new file mode 100644 index 00000000..2a0fdaf7 --- /dev/null +++ b/admiral/pkg/controller/secret/resolver/defaultresolver_test.go @@ -0,0 +1,41 @@ +package resolver + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNewDefaultResolver(t *testing.T) { + resolver, err := NewDefaultResolver() + assert.NotNil(t, resolver, "DefaultResolver should not be nil") + assert.Nil(t, err, "Error while new instance creation should be nil") +} + +func TestDefaultResolver_FetchKubeConfig(t *testing.T) { + expectedKubeConfig := ` +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: ca_data + server: https://example.com + name: example-cluster +contexts: +- context: + cluster: example-cluster + user: example-user + name: example-context +current-context: example-context +kind: Config +preferences: {} +users: +- name: example-user + user: + client-certificate-data: cert_data + client-key-data: key_data +` + resolver, _ := NewDefaultResolver() + kconfig, err := resolver.FetchKubeConfig("", []byte(expectedKubeConfig)) + assert.Equal(t, []byte(expectedKubeConfig), kconfig) + assert.Nil(t, err, "Expected error to be nil") +} diff --git a/admiral/pkg/controller/secret/secretcontroller.go b/admiral/pkg/controller/secret/secretcontroller.go index 5e8d1674..0440609e 100644 --- a/admiral/pkg/controller/secret/secretcontroller.go +++ b/admiral/pkg/controller/secret/secretcontroller.go @@ -20,52 +20,59 @@ import ( "fmt" "time" + "github.com/istio-ecosystem/admiral/admiral/pkg/client" + "github.com/istio-ecosystem/admiral/admiral/pkg/registry" + "github.com/istio-ecosystem/admiral/admiral/pkg/util" + idps_sdk "github.intuit.com/idps/idps-go-sdk/v3/idps-sdk" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" - "github.com/istio-ecosystem/admiral/admiral/pkg/controller/secret/resolver" - log "github.com/sirupsen/logrus" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/workqueue" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/secret/resolver" + log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/util/workqueue" ) const ( - filterLabel = "admiral/sync" - maxRetries = 5 + maxRetries = 5 ) // LoadKubeConfig is a unit test override variable for loading the k8s config. // DO NOT USE - TEST ONLY. var LoadKubeConfig = clientcmd.Load +var remoteClustersMetric common.Gauge + // addSecretCallback prototype for the add secret callback function. -type addSecretCallback func(config *rest.Config, dataKey string, resyncPeriod time.Duration) error +type addSecretCallback func(config *rest.Config, dataKey string, resyncPeriod util.ResyncIntervals) error // updateSecretCallback prototype for the update secret callback function. -type updateSecretCallback func(config *rest.Config, dataKey string, resyncPeriod time.Duration) error +type updateSecretCallback func(config *rest.Config, dataKey string, resyncPeriod util.ResyncIntervals) error // removeSecretCallback prototype for the remove secret callback function. type removeSecretCallback func(dataKey string) error // Controller is the controller implementation for Secret resources type Controller struct { - kubeclientset kubernetes.Interface - namespace string - Cs *ClusterStore - queue workqueue.RateLimitingInterface - informer cache.SharedIndexInformer - addCallback addSecretCallback - updateCallback updateSecretCallback - removeCallback removeSecretCallback - secretResolver resolver.SecretResolver + kubeclientset kubernetes.Interface + namespace string + Cs *ClusterStore + queue workqueue.RateLimitingInterface + informer cache.SharedIndexInformer + addCallback addSecretCallback + updateCallback updateSecretCallback + removeCallback removeSecretCallback + secretResolver resolver.SecretResolver + clusterShardStoreHandler registry.ClusterShardStore } // RemoteCluster defines cluster structZZ @@ -86,6 +93,12 @@ func newClustersStore() *ClusterStore { } } +type IdpsSdkWrapper struct{} + +func (c *IdpsSdkWrapper) IdpsClientInstanceFromMap(props map[string]string) (client.IdpsClientInterface, error) { + return idps_sdk.IdpsClientInstanceFromMap(props) +} + // NewController returns a new secret controller func NewController( kubeclientset kubernetes.Interface, @@ -94,17 +107,18 @@ func NewController( addCallback addSecretCallback, updateCallback updateSecretCallback, removeCallback removeSecretCallback, - secretResolverType string) *Controller { + admiralProfile string, + secretResolverConfig string) *Controller { ctx := context.Background() secretsInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(opts meta_v1.ListOptions) (runtime.Object, error) { - opts.LabelSelector = filterLabel + "=true" + opts.LabelSelector = common.GetSecretFilterTags() + "=true" return kubeclientset.CoreV1().Secrets(namespace).List(ctx, opts) }, WatchFunc: func(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.LabelSelector = filterLabel + "=true" + opts.LabelSelector = common.GetSecretFilterTags() + "=true" return kubeclientset.CoreV1().Secrets(namespace).Watch(ctx, opts) }, }, @@ -115,11 +129,16 @@ func NewController( var secretResolver resolver.SecretResolver var err error - if len(secretResolverType) == 0 { + + if admiralProfile == common.AdmiralProfileIntuit { + log.Info("Initializing Intuit secret resolver") + idpsClientProviderWrapper := &IdpsSdkWrapper{} + secretResolver, err = resolver.NewIDPSResolver(secretResolverConfig, idpsClientProviderWrapper) + } else if admiralProfile == common.AdmiralProfileDefault || admiralProfile == common.AdmiralProfilePerf { log.Info("Initializing default secret resolver") secretResolver, err = resolver.NewDefaultResolver() } else { - err = fmt.Errorf("unrecognized secret resolver type %v specified", secretResolverType) + err = fmt.Errorf("unrecognized secret resolver type %v specified", admiralProfile) } if err != nil { @@ -128,15 +147,16 @@ func NewController( } controller := &Controller{ - kubeclientset: kubeclientset, - namespace: namespace, - Cs: cs, - informer: secretsInformer, - queue: queue, - addCallback: addCallback, - updateCallback: updateCallback, - removeCallback: removeCallback, - secretResolver: secretResolver, + kubeclientset: kubeclientset, + namespace: namespace, + Cs: cs, + informer: secretsInformer, + queue: queue, + addCallback: addCallback, + updateCallback: updateCallback, + removeCallback: removeCallback, + secretResolver: secretResolver, + clusterShardStoreHandler: registry.NewClusterShardStoreHandler(), } log.Info("Setting up event handlers") @@ -163,12 +183,17 @@ func NewController( } }, }) + + remoteClustersMetric = common.NewGaugeFrom(common.ClustersMonitoredMetricName, "Gauge for the clusters monitored by Admiral") return controller } // Run starts the controller until it receives a message over stopCh func (c *Controller) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() + if c == nil { + return + } defer c.queue.ShutDown() log.Info("Starting Secrets controller") @@ -188,16 +213,12 @@ func (c *Controller) Run(stopCh <-chan struct{}) { // StartSecretController creates the secret controller. func StartSecretController( - ctx context.Context, - k8s kubernetes.Interface, - addCallback addSecretCallback, - updateCallback updateSecretCallback, - removeCallback removeSecretCallback, - namespace string, - secretResolverType string) (*Controller, error) { + ctx context.Context, k8s kubernetes.Interface, addCallback addSecretCallback, + updateCallback updateSecretCallback, removeCallback removeSecretCallback, + namespace, admiralProfile, secretResolverConfig string) (*Controller, error) { clusterStore := newClustersStore() - controller := NewController(k8s, namespace, clusterStore, addCallback, updateCallback, removeCallback, secretResolverType) + controller := NewController(k8s, namespace, clusterStore, addCallback, updateCallback, removeCallback, admiralProfile, secretResolverConfig) go controller.Run(ctx.Done()) @@ -289,6 +310,10 @@ func (c *Controller) createRemoteCluster(kubeConfig []byte, secretName string, c } func (c *Controller) addMemberCluster(secretName string, s *corev1.Secret) { + shard, err := getShardNameFromClusterSecret(s) + if err != nil { + log.Errorf("unable to find shard information from secret") + } for clusterID, kubeConfig := range s.Data { // clusterID must be unique even across multiple secrets if prev, ok := c.Cs.RemoteClusters[clusterID]; !ok { @@ -304,11 +329,15 @@ func (c *Controller) addMemberCluster(secretName string, s *corev1.Secret) { c.Cs.RemoteClusters[clusterID] = remoteCluster - if err := c.addCallback(restConfig, clusterID, common.GetAdmiralParams().CacheRefreshDuration); err != nil { + if err := c.addCallback(restConfig, clusterID, common.GetResyncIntervals()); err != nil { log.Errorf("error during secret loading for clusterID: %s %v", clusterID, err) continue } - + err = c.addClusterToShard(clusterID, shard) + if err != nil { + log.Errorf("error adding cluster=%s to shard=%s", clusterID, shard) + continue + } log.Infof("Secret loaded for cluster %s in the secret %s in namespace %s.", clusterID, c.Cs.RemoteClusters[clusterID].secretName, s.ObjectMeta.Namespace) } else { @@ -328,14 +357,19 @@ func (c *Controller) addMemberCluster(secretName string, s *corev1.Secret) { } c.Cs.RemoteClusters[clusterID] = remoteCluster - if err := c.updateCallback(restConfig, clusterID, common.GetAdmiralParams().CacheRefreshDuration); err != nil { + if err := c.updateCallback(restConfig, clusterID, common.GetResyncIntervals()); err != nil { log.Errorf("Error updating cluster_id from secret=%v: %s %v", clusterID, secretName, err) } + err = c.addClusterToShard(clusterID, shard) + if err != nil { + log.Errorf("error adding cluster=%s to shard=%s", clusterID, shard) + continue + } } - } - common.RemoteClustersMetric.Set(float64(len(c.Cs.RemoteClusters))) + + remoteClustersMetric.Set(float64(len(c.Cs.RemoteClusters))) log.Infof("Number of remote clusters: %d", len(c.Cs.RemoteClusters)) } @@ -350,6 +384,38 @@ func (c *Controller) deleteMemberCluster(secretName string) { delete(c.Cs.RemoteClusters, clusterID) } } - common.RemoteClustersMetric.Set(float64(len(c.Cs.RemoteClusters))) + remoteClustersMetric.Set(float64(len(c.Cs.RemoteClusters))) log.Infof("Number of remote clusters: %d", len(c.Cs.RemoteClusters)) } + +func getShardNameFromClusterSecret(secret *corev1.Secret) (string, error) { + if !common.IsAdmiralStateSyncerMode() { + return "", nil + } + if secret == nil { + return "", fmt.Errorf("nil secret passed") + } + annotation := secret.GetAnnotations() + if len(annotation) == 0 { + return "", fmt.Errorf("no annotations found on secret=%s", secret.GetName()) + } + shard, ok := annotation[util.SecretShardKey] + if ok { + return shard, nil + } + return "", fmt.Errorf("shard not found") +} +func (c *Controller) addClusterToShard(cluster, shard string) error { + if !common.IsAdmiralStateSyncerMode() { + return nil + } + return c.clusterShardStoreHandler.AddClusterToShard(cluster, shard) +} + +// TODO: invoke function in delete workflow +func (c *Controller) removeClusterFromShard(cluster, shard string) error { + if !common.IsAdmiralStateSyncerMode() { + return nil + } + return c.clusterShardStoreHandler.RemoveClusterFromShard(cluster, shard) +} diff --git a/admiral/pkg/controller/secret/secretcontroller_test.go b/admiral/pkg/controller/secret/secretcontroller_test.go index 6e02bddf..d7e131b0 100644 --- a/admiral/pkg/controller/secret/secretcontroller_test.go +++ b/admiral/pkg/controller/secret/secretcontroller_test.go @@ -17,37 +17,61 @@ package secret import ( "context" "fmt" + "reflect" "sync" "testing" "time" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + "github.com/istio-ecosystem/admiral/admiral/pkg/util" "github.com/prometheus/client_golang/prometheus" io_prometheus_client "github.com/prometheus/client_model/go" + coreV1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/client-go/rest" . "github.com/onsi/gomega" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - pkgtest "github.com/istio-ecosystem/admiral/admiral/pkg/test" + registryMocks "github.com/istio-ecosystem/admiral/admiral/pkg/registry/mocks" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" ) -const secretName string = "testSecretName" -const secretNameSpace string = "istio-system" +const ( + secretName string = "testSecretName" + secretNameSpace string = "istio-system" +) -var testCreateControllerCalled bool -var testDeleteControllerCalled bool +var ( + testCreateControllerCalled bool + testDeleteControllerCalled bool +) -func makeSecret(secret, clusterID string, kubeconfig []byte) *v1.Secret { - return &v1.Secret{ +func makeSecret(secret, clusterID string, kubeconfig []byte) *coreV1.Secret { + return &coreV1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secret, Namespace: secretNameSpace, Labels: map[string]string{ - filterLabel: "true", + common.GetSecretFilterTags(): "true", + }, + }, + Data: map[string][]byte{ + clusterID: kubeconfig, + }, + } +} + +func makeSecretWithCustomFilterTag(secret, clusterID string, kubeconfig []byte, secretFilterTag string) *coreV1.Secret { + return &coreV1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secret, + Namespace: secretNameSpace, + Labels: map[string]string{ + secretFilterTag: "true", }, }, Data: map[string][]byte{ @@ -63,14 +87,14 @@ var ( deleted string ) -func addCallback(config *rest.Config, id string, resyncPeriod time.Duration) error { +func addCallback(config *rest.Config, id string, resyncPeriod util.ResyncIntervals) error { mu.Lock() defer mu.Unlock() added = id return nil } -func updateCallback(config *rest.Config, id string, resyncPeriod time.Duration) error { +func updateCallback(config *rest.Config, id string, resyncPeriod util.ResyncIntervals) error { mu.Lock() defer mu.Unlock() updated = id @@ -102,7 +126,7 @@ func testDeleteController(clusterID string) error { func createMultiClusterSecret(k8s *fake.Clientset) error { data := map[string][]byte{} - secret := v1.Secret{ + secret := coreV1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, Namespace: secretNameSpace, @@ -140,61 +164,47 @@ func mockLoadKubeConfig(kubeconfig []byte) (*clientcmdapi.Config, error) { return config, nil } -func verifyControllerDeleted(t *testing.T, timeoutName string) { - pkgtest.NewEventualOpts(10*time.Millisecond, 5*time.Second).Eventually(t, timeoutName, func() bool { - return testDeleteControllerCalled == true - }) -} - -func verifyControllerCreated(t *testing.T, timeoutName string) { - pkgtest.NewEventualOpts(10*time.Millisecond, 5*time.Second).Eventually(t, timeoutName, func() bool { - return testCreateControllerCalled == true - }) -} +func Test_SecretFilterTags(t *testing.T) { + g := NewWithT(t) -/* -func Test_SecretController(t *testing.T) { LoadKubeConfig = mockLoadKubeConfig - clientset := fake.NewSimpleClientset() + secretFilterTag := "admiral/test-filter-tag" - // Start the secret controller and sleep to allow secret process to start. - err := StartSecretController( - clientset, testCreateController, testDeleteController, secretNameSpace, context.TODO(), "") - if err != nil { - t.Fatalf("Could not start secret controller: %v", err) + p := common.AdmiralParams{ + MetricsEnabled: true, + SecretFilterTags: secretFilterTag, } - time.Sleep(100 * time.Millisecond) - // Create the multicluster secret. - err = createMultiClusterSecret(clientset) - if err != nil { - t.Fatalf("Unexpected error on secret create: %v", err) - } + common.InitializeConfig(p) - verifyControllerCreated(t, "Create remote secret controller") + secret := makeSecretWithCustomFilterTag("s0", "c0", []byte("kubeconfig0-0"), secretFilterTag) - if testDeleteControllerCalled != false { - t.Fatalf("Test failed on create secret, delete callback function called") - } + g.Expect(common.GetSecretFilterTags()).Should(Equal(secretFilterTag)) // Check if the secret filter tag is set correctly on the config + g.Expect(secret.Labels[common.GetSecretFilterTags()]).Should(Equal("true")) // Check if the secret filter tag matches the one set on the config to watch. + +} - // Reset test variables and delete the multicluster secret. - testCreateControllerCalled = false - testDeleteControllerCalled = false +func Test_SecretFilterTagsMismatch(t *testing.T) { + g := NewWithT(t) - err = deleteMultiClusterSecret(clientset) - if err != nil { - t.Fatalf("Unexpected error on secret delete: %v", err) - } + LoadKubeConfig = mockLoadKubeConfig - // Test - Verify that the remote controller has been removed. - verifyControllerDeleted(t, "delete remote secret controller") + secretFilterTag := "admiral/test-filter-tag" - // Test - if testCreateControllerCalled != false { - t.Fatalf("Test failed on delete secret, create callback function called") + p := common.AdmiralParams{ + MetricsEnabled: true, + SecretFilterTags: secretFilterTag, } -}*/ + + common.InitializeConfig(p) + + secret := makeSecretWithCustomFilterTag("s0", "c0", []byte("kubeconfig0-0"), "admiral/other-filter-tag") + + g.Expect(common.GetSecretFilterTags()).Should(Equal(secretFilterTag)) // Check if the secret filter tag is set correctly on the config + g.Expect(secret.Labels[common.GetSecretFilterTags()]).Should(Equal("")) // Check if the secret filter tag doesnt match the one set on the config to watch, hence it should be empty. + +} func Test_SecretController(t *testing.T) { g := NewWithT(t) @@ -203,20 +213,23 @@ func Test_SecretController(t *testing.T) { clientset := fake.NewSimpleClientset() + p := common.AdmiralParams{ + MetricsEnabled: true, + SecretFilterTags: "admiral/sync", + } + common.InitializeConfig(p) + var ( secret0 = makeSecret("s0", "c0", []byte("kubeconfig0-0")) secret0UpdateKubeconfigChanged = makeSecret("s0", "c0", []byte("kubeconfig0-1")) secret1 = makeSecret("s1", "c1", []byte("kubeconfig1-0")) ) - p := common.AdmiralParams{MetricsEnabled: true} - common.InitializeConfig(p) - steps := []struct { // only set one of these per step. The others should be nil. - add *v1.Secret - update *v1.Secret - delete *v1.Secret + add *coreV1.Secret + update *coreV1.Secret + delete *coreV1.Secret // only set one of these per step. The others should be empty. wantAdded string @@ -237,7 +250,7 @@ func Test_SecretController(t *testing.T) { // The assertion ShouldNot(BeNil()) make sure that start secret controller return a not nil controller and nil error registry := prometheus.DefaultGatherer g.Expect( - StartSecretController(context.TODO(), clientset, addCallback, updateCallback, deleteCallback, secretNameSpace, "")). + StartSecretController(context.TODO(), clientset, addCallback, updateCallback, deleteCallback, secretNameSpace, common.AdmiralProfileDefault, "")). ShouldNot(BeNil()) ctx := context.Background() @@ -299,3 +312,239 @@ func Test_SecretController(t *testing.T) { }) } } + +func TestGetShardNameFromClusterSecret(t *testing.T) { + cases := []struct { + name string + secret *corev1.Secret + stateSyncerMode bool + want string + wantErr error + }{ + { + name: "Given secret is empty" + + "When function is invoked, " + + "It should return an error", + stateSyncerMode: true, + secret: nil, + want: "", + wantErr: fmt.Errorf("nil secret passed"), + }, + { + name: "Given secret is empty, " + + "And, state syncer mode is false, " + + "When function is invoked, " + + "It should return an error", + secret: nil, + want: "", + wantErr: nil, + }, + { + name: "Given secret is valid, but does not have annotations" + + "When function is invoked, " + + "It should return an error", + stateSyncerMode: true, + secret: &coreV1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: secretNameSpace, + Labels: map[string]string{ + common.GetSecretFilterTags(): "true", + }, + }, + }, + want: "", + wantErr: fmt.Errorf("no annotations found on secret=%s", secretName), + }, + { + name: "Given secret is valid, and has valid annotations" + + "When function is invoked, " + + "It should return a valid value, without any error", + stateSyncerMode: true, + secret: &coreV1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: secretNameSpace, + Annotations: map[string]string{ + util.SecretShardKey: "shard1", + }, + }, + }, + want: "shard1", + wantErr: nil, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + common.ResetSync() + common.InitializeConfig(common.AdmiralParams{ + AdmiralStateSyncerMode: c.stateSyncerMode, + }) + got, err := getShardNameFromClusterSecret(c.secret) + if got != c.want { + t.Errorf("want=%s, got=%s", c.want, got) + } + if !reflect.DeepEqual(err, c.wantErr) { + t.Errorf("want=%v, got=%v", c.wantErr, err) + } + }) + } +} + +func TestAddClusterToShard(t *testing.T) { + var ( + cluster1 = "cluster1" + shard1 = "shard1" + err1 = fmt.Errorf("error1") + simpleShardMock = ®istryMocks.ClusterShardStore{} + ) + shardMockWithoutErr := ®istryMocks.ClusterShardStore{} + shardMockWithoutErr.On( + "AddClusterToShard", + mock.AnythingOfType("string"), + mock.AnythingOfType("string")).Return(nil) + shardMockWithErr := ®istryMocks.ClusterShardStore{} + shardMockWithErr.On( + "AddClusterToShard", + mock.AnythingOfType("string"), + mock.AnythingOfType("string")).Return(err1) + cases := []struct { + name string + stateSyncerMode bool + cluster string + shard string + clusterShardStoreHandler *registryMocks.ClusterShardStore + clusterShardStoreHandlerCalls int + wantErr error + }{ + { + name: "Given state syncer mode is set to false, " + + "When function is invoked, " + + "It should not invoke cluster shard store handler, and should return nil", + cluster: cluster1, + shard: shard1, + clusterShardStoreHandler: simpleShardMock, + clusterShardStoreHandlerCalls: 0, + wantErr: nil, + }, + { + name: "Given state syncer mode is set to true, " + + "When function is invoked, " + + "And AddClusterToShard returns an error, " + + "It should return an error", + stateSyncerMode: true, + cluster: cluster1, + shard: shard1, + clusterShardStoreHandler: shardMockWithErr, + clusterShardStoreHandlerCalls: 1, + wantErr: err1, + }, + { + name: "Given state syncer mode is set to true, " + + "When function is invoked, " + + "And AddClusterToShard does not return any error , " + + "It should not return any error", + stateSyncerMode: true, + cluster: cluster1, + shard: shard1, + clusterShardStoreHandler: shardMockWithoutErr, + clusterShardStoreHandlerCalls: 1, + wantErr: nil, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + common.ResetSync() + common.InitializeConfig(common.AdmiralParams{ + AdmiralStateSyncerMode: c.stateSyncerMode, + }) + controller := &Controller{ + clusterShardStoreHandler: c.clusterShardStoreHandler, + } + err := controller.addClusterToShard(c.cluster, c.shard) + if !reflect.DeepEqual(err, c.wantErr) { + t.Errorf("want=%v, got=%v", c.wantErr, err) + } + assert.Equal(t, len(c.clusterShardStoreHandler.ExpectedCalls), c.clusterShardStoreHandlerCalls) + }) + } +} + +func TestRemoveClusterFromShard(t *testing.T) { + var ( + cluster1 = "cluster1" + shard1 = "shard1" + err1 = fmt.Errorf("error1") + simpleShardMock = ®istryMocks.ClusterShardStore{} + ) + shardMockWithoutErr := ®istryMocks.ClusterShardStore{} + shardMockWithoutErr.On( + "RemoveClusterFromShard", + mock.AnythingOfType("string"), + mock.AnythingOfType("string")).Return(nil) + shardMockWithErr := ®istryMocks.ClusterShardStore{} + shardMockWithErr.On( + "RemoveClusterFromShard", + mock.AnythingOfType("string"), + mock.AnythingOfType("string")).Return(err1) + cases := []struct { + name string + stateSyncerMode bool + cluster string + shard string + clusterShardStoreHandler *registryMocks.ClusterShardStore + clusterShardStoreHandlerCalls int + wantErr error + }{ + { + name: "Given state syncer mode is set to false, " + + "When function is invoked, " + + "It should not invoke cluster shard store handler, and should return nil", + cluster: cluster1, + shard: shard1, + clusterShardStoreHandler: simpleShardMock, + clusterShardStoreHandlerCalls: 0, + wantErr: nil, + }, + { + name: "Given state syncer mode is set to true, " + + "When function is invoked, " + + "And RemoveClusterFromShard returns an error, " + + "It should return an error", + stateSyncerMode: true, + cluster: cluster1, + shard: shard1, + clusterShardStoreHandler: shardMockWithErr, + clusterShardStoreHandlerCalls: 1, + wantErr: err1, + }, + { + name: "Given state syncer mode is set to true, " + + "When function is invoked, " + + "And RemoveClusterFromShard does not return any error , " + + "It should not return any error", + stateSyncerMode: true, + cluster: cluster1, + shard: shard1, + clusterShardStoreHandler: shardMockWithoutErr, + clusterShardStoreHandlerCalls: 1, + wantErr: nil, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + common.ResetSync() + common.InitializeConfig(common.AdmiralParams{ + AdmiralStateSyncerMode: c.stateSyncerMode, + }) + controller := &Controller{ + clusterShardStoreHandler: c.clusterShardStoreHandler, + } + err := controller.removeClusterFromShard(c.cluster, c.shard) + if !reflect.DeepEqual(err, c.wantErr) { + t.Errorf("want=%v, got=%v", c.wantErr, err) + } + assert.Equal(t, len(c.clusterShardStoreHandler.ExpectedCalls), c.clusterShardStoreHandlerCalls) + }) + } +} diff --git a/admiral/pkg/controller/util/migration.go b/admiral/pkg/controller/util/migration.go new file mode 100644 index 00000000..0357afeb --- /dev/null +++ b/admiral/pkg/controller/util/migration.go @@ -0,0 +1,59 @@ +package util + +import ( + "fmt" + + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + networking "istio.io/api/networking/v1alpha3" + k8sV1 "k8s.io/api/core/v1" +) + +// UpdateEndpointsForDeployToRolloutMigration creates an SE with the endpoints for both the rollout and deployment +// This is for Deployment <-> Rollout migration +func UpdateEndpointsForDeployToRolloutMigration(serviceInstance map[string]*k8sV1.Service, + serviceEntry *networking.ServiceEntry, meshPorts map[string]map[string]uint32, clusterIngress string, + clusterAppDeleteMap map[string]string, clusterName string, + clusterDeployRolloutPresent map[string]map[string]bool) error { + if serviceInstance[common.Deployment] == nil || serviceInstance[common.Rollout] == nil { + return fmt.Errorf("serviceInstance for Deployment/Rollout is nil as the service cache has not updated yet") + } + + deployLocalFqdn := serviceInstance[common.Deployment].Name + common.Sep + serviceInstance[common.Deployment].Namespace + common.GetLocalDomainSuffix() + rolloutFqdn := serviceInstance[common.Rollout].Name + common.Sep + serviceInstance[common.Rollout].Namespace + common.GetLocalDomainSuffix() + + var uniqueEndpointsList []*networking.WorkloadEntry + for _, ep := range serviceEntry.Endpoints { + // only if the ep.Address is equal to clusterIngress do we append the deployment + // and rollout endpoint for add and update events. + // For delete events we check for which cluster did we get the event for and then + // decide which cluster to remove the deployment or rollout endpoint for. + if ep.Address == clusterIngress { + if clusterAppDeleteMap[clusterName] != common.Deployment && clusterDeployRolloutPresent[clusterName][common.Deployment] { + deployEp := &networking.WorkloadEntry{ + Address: deployLocalFqdn, + Locality: ep.Locality, + Ports: meshPorts[common.Deployment], + Labels: map[string]string{"type": common.Deployment}, + } + uniqueEndpointsList = append(uniqueEndpointsList, deployEp) + } + + if clusterAppDeleteMap[clusterName] != common.Rollout && clusterDeployRolloutPresent[clusterName][common.Rollout] { + rolloutEp := &networking.WorkloadEntry{ + Address: rolloutFqdn, + Locality: ep.Locality, + Ports: meshPorts[common.Rollout], + Labels: map[string]string{"type": common.Rollout}, + } + uniqueEndpointsList = append(uniqueEndpointsList, rolloutEp) + } + } else { + ep.Labels = nil + uniqueEndpointsList = append(uniqueEndpointsList, ep) + } + } + + serviceEntry.Endpoints = uniqueEndpointsList + + return nil +} diff --git a/admiral/pkg/controller/util/migration_test.go b/admiral/pkg/controller/util/migration_test.go new file mode 100644 index 00000000..aee81971 --- /dev/null +++ b/admiral/pkg/controller/util/migration_test.go @@ -0,0 +1,272 @@ +package util + +import ( + "fmt" + "reflect" + "testing" + + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + "github.com/stretchr/testify/assert" + networking "istio.io/api/networking/v1alpha3" + coreV1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestUpdateEndpointsForDeployToRolloutMigration(t *testing.T) { + var ( + foobarMetadataName = "foobar" + foobarMetadataNamespace = "foobar-ns" + identity = "identity" + meshPorts = make(map[string]map[string]uint32) + serviceInstanceDeployNil = make(map[string]*coreV1.Service) + serviceInstanceRolloutNil = make(map[string]*coreV1.Service) + serviceInstance = make(map[string]*coreV1.Service) + clusterName = "test-k8s" + ) + + localAddress := common.LocalAddressPrefix + ".10.1" + + seDeployment := &networking.ServiceEntry{ + Hosts: []string{"e2e.my-first-service.mesh"}, + Addresses: []string{localAddress}, + Ports: []*networking.ServicePort{{Number: uint32(common.DefaultServiceEntryPort), + Name: "http", Protocol: "http"}}, + Location: networking.ServiceEntry_MESH_INTERNAL, + Resolution: networking.ServiceEntry_DNS, + SubjectAltNames: []string{"spiffe://prefix/my-first-service"}, + Endpoints: []*networking.WorkloadEntry{ + {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2", Labels: map[string]string{"type": common.Deployment}}, + }, + } + + seRollout := &networking.ServiceEntry{ + Hosts: []string{"e2e.my-first-service.mesh"}, + Addresses: []string{localAddress}, + Ports: []*networking.ServicePort{{Number: uint32(common.DefaultServiceEntryPort), + Name: "http", Protocol: "http"}}, + Location: networking.ServiceEntry_MESH_INTERNAL, + Resolution: networking.ServiceEntry_DNS, + SubjectAltNames: []string{"spiffe://prefix/my-first-service"}, + Endpoints: []*networking.WorkloadEntry{ + {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2", Labels: map[string]string{"type": common.Rollout}}, + }, + } + + seDeployAndRolloutSingleCluster := &networking.ServiceEntry{ + Hosts: []string{"e2e.my-first-service.mesh"}, + Addresses: []string{localAddress}, + Ports: []*networking.ServicePort{{Number: uint32(common.DefaultServiceEntryPort), + Name: "http", Protocol: "http"}}, + Location: networking.ServiceEntry_MESH_INTERNAL, + Resolution: networking.ServiceEntry_DNS, + SubjectAltNames: []string{"spiffe://prefix/my-first-service"}, + Endpoints: []*networking.WorkloadEntry{ + {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2", Labels: map[string]string{"type": common.Rollout}}, + }, + } + + seDeployAndRolloutMulticluster := &networking.ServiceEntry{ + Hosts: []string{"e2e.my-first-service.mesh"}, + Addresses: []string{localAddress}, + Ports: []*networking.ServicePort{{Number: uint32(common.DefaultServiceEntryPort), + Name: "http", Protocol: "http"}}, + Location: networking.ServiceEntry_MESH_INTERNAL, + Resolution: networking.ServiceEntry_DNS, + SubjectAltNames: []string{"spiffe://prefix/my-first-service"}, + Endpoints: []*networking.WorkloadEntry{ + {Address: "east.elb.aws.com", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2", Labels: map[string]string{"type": common.Deployment}}, + {Address: "west.elb.aws.com", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2", Labels: map[string]string{"type": common.Rollout}}, + }, + } + + seDeployAndRolloutMulticluster1 := seDeployAndRolloutMulticluster.DeepCopy() + seDeployAndRolloutMulticluster2 := seDeployAndRolloutMulticluster.DeepCopy() + + service := &coreV1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: foobarMetadataName, + Namespace: foobarMetadataNamespace, + }, + Spec: coreV1.ServiceSpec{ + Selector: map[string]string{"app": identity}, + Ports: []coreV1.ServicePort{ + { + Name: "http", + Port: 8090, + }, + }, + }, + } + + meshPorts[common.Deployment] = map[string]uint32{"http": uint32(8090)} + meshPorts[common.Rollout] = map[string]uint32{"http": uint32(8090)} + + serviceInstanceDeployNil[common.Deployment] = nil + serviceInstanceRolloutNil[common.Rollout] = nil + serviceInstance[common.Deployment] = service + serviceInstance[common.Rollout] = service + + clusterDeployRolloutPresent := make(map[string]map[string]bool) + clusterDeployRolloutPresent[clusterName] = make(map[string]bool) + clusterDeployRolloutPresent[clusterName][common.Deployment] = true + clusterDeployRolloutPresent[clusterName][common.Rollout] = true + + testCases := []struct { + name string + serviceInstance map[string]*coreV1.Service + serviceEntry *networking.ServiceEntry + clusterAppDeleteMap map[string]string + clusterIngress string + clusterDeployRolloutPresent map[string]map[string]bool + expectedSeEndpoints []*networking.WorkloadEntry + expectedErr error + }{ + { + name: "Given service resource for the deployment type is nil," + + "Then there is an error returned", + serviceInstance: serviceInstanceDeployNil, + serviceEntry: seDeployment, + clusterAppDeleteMap: nil, + clusterIngress: "dummy.admiral.global", + clusterDeployRolloutPresent: nil, + expectedSeEndpoints: nil, + expectedErr: fmt.Errorf("serviceInstance for Deployment/Rollout is nil as the service cache has not updated yet"), + }, + { + name: "Given service resource for the rollout type is nil," + + "Then there is an error returned", + serviceInstance: serviceInstanceRolloutNil, + serviceEntry: seRollout, + clusterAppDeleteMap: nil, + clusterIngress: "dummy.admiral.global", + clusterDeployRolloutPresent: nil, + expectedSeEndpoints: nil, + expectedErr: fmt.Errorf("serviceInstance for Deployment/Rollout is nil as the service cache has not updated yet"), + }, + { + name: "Given all valid parameters," + + "And there is a deployment and rollout in a single cluster," + + "Then there is no error returned and 2 endpoints for deployment and rollout", + serviceInstance: serviceInstance, + serviceEntry: seDeployAndRolloutSingleCluster, + clusterIngress: "dummy.admiral.global", + clusterAppDeleteMap: nil, + clusterDeployRolloutPresent: clusterDeployRolloutPresent, + expectedSeEndpoints: []*networking.WorkloadEntry{ + { + Address: "foobar.foobar-ns.svc.cluster.local", + Locality: "us-west-2", + Ports: meshPorts[common.Deployment], + Labels: map[string]string{"type": common.Deployment}, + }, + { + Address: "foobar.foobar-ns.svc.cluster.local", + Locality: "us-west-2", + Ports: meshPorts[common.Rollout], + Labels: map[string]string{"type": common.Rollout}, + }, + }, + expectedErr: nil, + }, + { + name: "Given all valid parameters," + + "And there is a deployment and rollout in a multi cluster," + + "Then there is no error returned and 3 endpoints for deployment, rollout and LB", + serviceInstance: serviceInstance, + serviceEntry: seDeployAndRolloutMulticluster, + clusterIngress: "east.elb.aws.com", + clusterAppDeleteMap: nil, + clusterDeployRolloutPresent: clusterDeployRolloutPresent, + expectedSeEndpoints: []*networking.WorkloadEntry{ + { + Address: "foobar.foobar-ns.svc.cluster.local", + Locality: "us-east-2", + Ports: meshPorts[common.Deployment], + Labels: map[string]string{"type": common.Deployment}, + }, + { + Address: "foobar.foobar-ns.svc.cluster.local", + Locality: "us-east-2", + Ports: meshPorts[common.Rollout], + Labels: map[string]string{"type": common.Rollout}, + }, + { + Address: "west.elb.aws.com", + Locality: "us-west-2", + Ports: map[string]uint32{"http": 0}, + }, + }, + expectedErr: nil, + }, + { + name: "Given all valid parameters," + + "And there is a deployment and rollout in a multi cluster," + + "And there is a delete for a deployment in one of the cluster," + + "When we are computing the SE for the source cluster," + + "Then there is no error returned and 2 endpoints for rollout and LB in that cluster", + serviceInstance: serviceInstance, + serviceEntry: seDeployAndRolloutMulticluster2, + clusterIngress: "east.elb.aws.com", + clusterAppDeleteMap: map[string]string{"test-k8s": common.Deployment}, + clusterDeployRolloutPresent: clusterDeployRolloutPresent, + expectedSeEndpoints: []*networking.WorkloadEntry{ + { + Address: "foobar.foobar-ns.svc.cluster.local", + Locality: "us-east-2", + Ports: meshPorts[common.Rollout], + Labels: map[string]string{"type": common.Rollout}, + }, + { + Address: "west.elb.aws.com", + Locality: "us-west-2", + Ports: map[string]uint32{"http": 0}, + }, + }, + expectedErr: nil, + }, + { + name: "Given all valid parameters," + + "And there is a deployment and rollout in a multi cluster," + + "And there is a delete for a deployment in one of the cluster," + + "When we are computing the SE for the other cluster," + + "Then there is no error returned and still 3 endpoints for deployment, rollout and LB", + serviceInstance: serviceInstance, + serviceEntry: seDeployAndRolloutMulticluster1, + clusterIngress: "east.elb.aws.com", + clusterAppDeleteMap: nil, + clusterDeployRolloutPresent: clusterDeployRolloutPresent, + expectedSeEndpoints: []*networking.WorkloadEntry{ + { + Address: "foobar.foobar-ns.svc.cluster.local", + Locality: "us-east-2", + Ports: meshPorts[common.Deployment], + Labels: map[string]string{"type": common.Deployment}, + }, + { + Address: "foobar.foobar-ns.svc.cluster.local", + Locality: "us-east-2", + Ports: meshPorts[common.Rollout], + Labels: map[string]string{"type": common.Rollout}, + }, + { + Address: "west.elb.aws.com", + Locality: "us-west-2", + Ports: map[string]uint32{"http": 0}, + }, + }, + expectedErr: nil, + }, + } + + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + err := UpdateEndpointsForDeployToRolloutMigration(c.serviceInstance, c.serviceEntry, meshPorts, c.clusterIngress, c.clusterAppDeleteMap, clusterName, c.clusterDeployRolloutPresent) + assert.Equal(t, c.expectedErr, err) + if err == nil { + if !reflect.DeepEqual(c.expectedSeEndpoints, c.serviceEntry.Endpoints) { + t.Errorf("Expected endpoints: %v, got: %v", c.expectedSeEndpoints, c.serviceEntry.Endpoints) + } + } + }) + } +} diff --git a/admiral/pkg/controller/util/util.go b/admiral/pkg/controller/util/util.go index ff603927..e5ad56ce 100644 --- a/admiral/pkg/controller/util/util.go +++ b/admiral/pkg/controller/util/util.go @@ -1,9 +1,11 @@ package util import ( - log "github.com/sirupsen/logrus" "reflect" "time" + + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + log "github.com/sirupsen/logrus" ) func MapCopy(dst, src interface{}) { @@ -46,6 +48,27 @@ func LogElapsedTime(op, identity, env, clusterId string) func() { } } +func LogElapsedTimeController(logger *log.Entry, logMessage string) func() { + start := time.Now() + return func() { + logger.Infof("%s txTime=%v", + logMessage, + time.Since(start).Milliseconds()) + } +} + +func LogElapsedTimeForModifySE(logger *log.Entry, op, name, namespace, cluster, message string) func() { + start := time.Now() + return func() { + LogElapsedTimeSinceForModifySE(logger, op, name, namespace, cluster, message, start) + } +} + +func LogElapsedTimeSinceForModifySE(logger *log.Entry, op, name, namespace, cluster, message string, start time.Time) { + // op=%v name=%v namespace=%s cluster=%s message=%v txId=%v + logger.Infof(common.CtxLogFormatWithTime, op, name, namespace, cluster, message, time.Since(start).Milliseconds()) +} + func LogElapsedTimeSince(op, identity, env, clusterId string, start time.Time) { - log.Infof("op=%s identity=%s env=%s cluster=%s time=%v\n", op, identity, env, clusterId, time.Since(start).Milliseconds()) + log.Infof("op=%s identity=%s env=%s cluster=%s txTime=%v", op, identity, env, clusterId, time.Since(start).Milliseconds()) } diff --git a/admiral/pkg/controller/util/util_test.go b/admiral/pkg/controller/util/util_test.go index 5e07eb8f..7e9afcd9 100644 --- a/admiral/pkg/controller/util/util_test.go +++ b/admiral/pkg/controller/util/util_test.go @@ -1,8 +1,12 @@ package util import ( + "bytes" "reflect" "testing" + + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" ) func TestCopyMap(t *testing.T) { @@ -81,6 +85,12 @@ func TestSubset(t *testing.T) { m2: m2, result: false, }, + { + name: "non-empty m1 is not a subset of non-empty m2 due to value mis-match", + m1: map[string]string{"env": "e2e", "version": "v1"}, + m2: map[string]string{"env": "stage", "version": "v1"}, + result: false, + }, } for _, c := range testCases { @@ -128,3 +138,14 @@ func TestContains(t *testing.T) { }) } } + +func TestLogElapsedTime(t *testing.T) { + logFunc := LogElapsedTime("test_op", "test_identity", "test_env", "test_clusterId") + oldOut := log.StandardLogger().Out + buf := bytes.Buffer{} + log.SetOutput(&buf) + logFunc() + + assert.Contains(t, buf.String(), "op=test_op identity=test_identity env=test_env cluster=test_clusterId txTime=") + log.SetOutput(oldOut) +} diff --git a/admiral/pkg/registry/clusterIdentity.go b/admiral/pkg/registry/clusterIdentity.go new file mode 100644 index 00000000..49dea026 --- /dev/null +++ b/admiral/pkg/registry/clusterIdentity.go @@ -0,0 +1,106 @@ +package registry + +import ( + "fmt" + "sync" +) + +// ClusterIdentityStore stores mapping of identity and +// the cluster in which resources for them need to be +// created +type ClusterIdentityStore interface { + AddUpdateIdentityToCluster(identity ClusterIdentity, clusterName string) error + RemoveIdentityToCluster(identity ClusterIdentity, clusterName string) error + GetAllIdentitiesForCluster(clusterName string) (IdentityStore, error) + AddIdentityConfiguration() error +} + +type clusterIdentityStoreHandler struct { + store clusterStore +} +type ClusterIdentity struct { + IdentityName string + SourceIdentity bool +} + +func NewClusterIdentity(name string, sourceIdentity bool) ClusterIdentity { + return ClusterIdentity{ + IdentityName: name, + SourceIdentity: sourceIdentity, + } +} + +type IdentityStore struct { + Store map[string]ClusterIdentity +} + +type clusterStore struct { + cache map[string]IdentityStore + mutex *sync.RWMutex +} + +func newClusterStore() clusterStore { + return clusterStore{ + cache: make(map[string]IdentityStore), + mutex: &sync.RWMutex{}, + } +} + +func NewClusterIdentityStoreHandler() *clusterIdentityStoreHandler { + return &clusterIdentityStoreHandler{ + store: newClusterStore(), + } +} + +func (s *clusterIdentityStoreHandler) AddUpdateIdentityToCluster(identity ClusterIdentity, clusterName string) error { + err := s.addUpdateCache(identity, clusterName) + return err +} + +func (s *clusterIdentityStoreHandler) RemoveIdentityToCluster(identity ClusterIdentity, clusterName string) error { + err := s.deleteCache(identity, clusterName) + return err +} + +func (s *clusterIdentityStoreHandler) GetAllIdentitiesForCluster(clusterName string) (IdentityStore, error) { + if clusterName == "" { + return IdentityStore{}, fmt.Errorf("empty cluster name=''") + } + cache, ok := s.store.cache[clusterName] + if !ok { + return IdentityStore{}, fmt.Errorf("no record for cluster=%s", clusterName) + } + return cache, nil +} + +func (s *clusterIdentityStoreHandler) AddIdentityConfiguration() error { + return nil +} + +func (s *clusterIdentityStoreHandler) addUpdateCache(identity ClusterIdentity, clusterName string) error { + defer s.store.mutex.Unlock() + s.store.mutex.Lock() + cache, ok := s.store.cache[clusterName] + if !ok { + s.store.cache[clusterName] = IdentityStore{ + Store: map[string]ClusterIdentity{ + identity.IdentityName: identity, + }, + } + return nil + } + cache.Store[identity.IdentityName] = identity + return nil +} + +func (s *clusterIdentityStoreHandler) deleteCache(identity ClusterIdentity, clusterName string) error { + defer s.store.mutex.Unlock() + s.store.mutex.Lock() + cache, ok := s.store.cache[clusterName] + if !ok { + return nil + } + delete(cache.Store, identity.IdentityName) + s.store.cache[clusterName] = cache + return nil +} diff --git a/admiral/pkg/registry/clusterShard.go b/admiral/pkg/registry/clusterShard.go new file mode 100644 index 00000000..042728ca --- /dev/null +++ b/admiral/pkg/registry/clusterShard.go @@ -0,0 +1,28 @@ +package registry + +// ClusterShardStore stores mapping of clusters +// and the shard they belong to +type ClusterShardStore interface { + AddClusterToShard(cluster, shard string) error + RemoveClusterFromShard(cluster, shard string) error + AddAllClustersToShard(clusters []string, shard string) error +} + +type clusterShardStoreHandler struct { +} + +func NewClusterShardStoreHandler() *clusterShardStoreHandler { + return &clusterShardStoreHandler{} +} + +func (c *clusterShardStoreHandler) AddClusterToShard(cluster, shard string) error { + return nil +} + +func (c *clusterShardStoreHandler) RemoveClusterFromShard(cluster, shard string) error { + return nil +} + +func (c *clusterShardStoreHandler) AddAllClustersToShard(clusters []string, shard string) error { + return nil +} diff --git a/admiral/pkg/registry/clusterShard_test.go b/admiral/pkg/registry/clusterShard_test.go new file mode 100644 index 00000000..b2a276fb --- /dev/null +++ b/admiral/pkg/registry/clusterShard_test.go @@ -0,0 +1 @@ +package registry diff --git a/admiral/pkg/registry/clusterdentity_test.go b/admiral/pkg/registry/clusterdentity_test.go new file mode 100644 index 00000000..b2a276fb --- /dev/null +++ b/admiral/pkg/registry/clusterdentity_test.go @@ -0,0 +1 @@ +package registry diff --git a/admiral/pkg/registry/configSyncer.go b/admiral/pkg/registry/configSyncer.go new file mode 100644 index 00000000..df0e03c7 --- /dev/null +++ b/admiral/pkg/registry/configSyncer.go @@ -0,0 +1,40 @@ +package registry + +type ConfigSyncer interface { + SyncDeployment() error + SyncService() error + + // argo custom resources + SyncArgoRollout() error + + // admiral custom resources + SyncGlobalTrafficPolicy() error + SyncClientConnectionConfigurations() error + SyncOutlierDetectionConfigurations() error +} + +type configSync struct{} + +func NewConfigSync() *configSync { + return &configSync{} +} + +func (c *configSync) SyncDeployment() error { + return nil +} + +func (c *configSync) SyncService() error { + return nil +} +func (c *configSync) SyncArgoRollout() error { + return nil +} +func (c *configSync) SyncGlobalTrafficPolicy() error { + return nil +} +func (c *configSync) SyncClientConnectionConfigurations() error { + return nil +} +func (c *configSync) SyncOutlierDetectionConfigurations() error { + return nil +} diff --git a/admiral/pkg/registry/mocks/ClusterShardStore.go b/admiral/pkg/registry/mocks/ClusterShardStore.go new file mode 100644 index 00000000..4f0d5966 --- /dev/null +++ b/admiral/pkg/registry/mocks/ClusterShardStore.go @@ -0,0 +1,66 @@ +// Code generated by mockery v2.37.1. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// ClusterShardStore is an autogenerated mock type for the ClusterShardStore type +type ClusterShardStore struct { + mock.Mock +} + +// AddAllClustersToShard provides a mock function with given fields: clusters, shard +func (_m *ClusterShardStore) AddAllClustersToShard(clusters []string, shard string) error { + ret := _m.Called(clusters, shard) + + var r0 error + if rf, ok := ret.Get(0).(func([]string, string) error); ok { + r0 = rf(clusters, shard) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// AddClusterToShard provides a mock function with given fields: cluster, shard +func (_m *ClusterShardStore) AddClusterToShard(cluster string, shard string) error { + ret := _m.Called(cluster, shard) + + var r0 error + if rf, ok := ret.Get(0).(func(string, string) error); ok { + r0 = rf(cluster, shard) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RemoveClusterFromShard provides a mock function with given fields: cluster, shard +func (_m *ClusterShardStore) RemoveClusterFromShard(cluster string, shard string) error { + ret := _m.Called(cluster, shard) + + var r0 error + if rf, ok := ret.Get(0).(func(string, string) error); ok { + r0 = rf(cluster, shard) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewClusterShardStore creates a new instance of ClusterShardStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClusterShardStore(t interface { + mock.TestingT + Cleanup(func()) +}) *ClusterShardStore { + mock := &ClusterShardStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} \ No newline at end of file diff --git a/admiral/pkg/registry/registry.go b/admiral/pkg/registry/registry.go index 9d11bfb3..67c34583 100644 --- a/admiral/pkg/registry/registry.go +++ b/admiral/pkg/registry/registry.go @@ -1,10 +1,110 @@ package registry +import ( + "context" + "encoding/json" + "os" + + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + networkingV1Alpha3 "istio.io/api/networking/v1alpha3" + coreV1 "k8s.io/api/core/v1" +) + // IdentityConfiguration is an interface to fetch configuration from a registry // backend. The backend can provide an API to give configurations per identity, // or if given a cluster name, it will provide the configurations for all // the identities present in that cluster. type IdentityConfiguration interface { - GetByIdentityByName(identityAlias string) error - GetByClusterName(clusterName string) error + GetByIdentityName(identityAlias string, ctx context.Context) (IdentityConfig, error) + GetByClusterName(clusterName string, ctx context.Context) ([]IdentityConfig, error) +} + +type registryClient struct { + registryEndpoint string + operatorCluster string +} + +func NewRegistryClient(options ...func(client *registryClient)) *registryClient { + registryClient := ®istryClient{} + for _, o := range options { + o(registryClient) + } + return registryClient +} + +func WithRegistryEndpoint(registryEndpoint string) func(*registryClient) { + return func(c *registryClient) { + c.registryEndpoint = registryEndpoint + } +} + +func WithOperatorCluster(operatorCluster string) func(*registryClient) { + return func(c *registryClient) { + c.operatorCluster = operatorCluster + } +} + +type IdentityConfig struct { + Assetname string `json:"assetname"` + Clusters []IdentityConfigCluster `json:"clusters"` +} + +type IdentityConfigCluster struct { + Name string `json:"name"` + Locality string `json:"locality"` + IngressEndpoint string `json:"ingressEndpoint"` + IngressPort string `json:"ingressPort"` + IngressPortName string `json:"ingressPortName"` + Environment []IdentityConfigEnvironment `json:"environment"` + ClientAssets []map[string]string `json:"clientAssets"` + // Why is clientAssets under cluster? shouldn't it be regardless of cluster??/??? +} + +type IdentityConfigEnvironment struct { + Name string `json:"name"` + Namespace string `json:"namespace"` + ServiceName string `json:"serviceName"` + Type string `json:"type"` + Selectors map[string]string `json:"selectors"` + Ports []coreV1.ServicePort `json:"ports"` + TrafficPolicy networkingV1Alpha3.TrafficPolicy `json:"trafficPolicy"` +} + +// GetByIdentityName calls the registry API to fetch the IdentityConfig for +// the given identityAlias +func (c *registryClient) GetByIdentityName(identityAlias string, ctx context.Context) (IdentityConfig, error) { + //jsonResult = os.request(/asset/identityAlias/configurations) + ctxLogger := common.GetCtxLogger(ctx, identityAlias, "") + ctxLogger.Infof(common.CtxLogFormat, "GetByIdentityName", identityAlias, "", c.operatorCluster, "") + byteValue, err := os.ReadFile("testdata/" + identityAlias + "IdentityConfiguration.json") + if err != nil { + ctxLogger.Infof(common.CtxLogFormat, "GetByIdentityName", identityAlias, "", c.operatorCluster, err) + } + var identityConfigUnmarshalResult IdentityConfig + err = json.Unmarshal(byteValue, &identityConfigUnmarshalResult) + if err != nil { + ctxLogger.Infof(common.CtxLogFormat, "GetByIdentityName", identityAlias, "", c.operatorCluster, err) + } + return identityConfigUnmarshalResult, err +} + +// GetByClusterName calls the registry API to fetch the IdentityConfigs for +// every identity on the cluster. +func (c *registryClient) GetByClusterName(clusterName string, ctx context.Context) ([]IdentityConfig, error) { + //jsonResult = os.request(/cluster/{cluster_id}/configurations + ctxLogger := common.GetCtxLogger(ctx, "", "") + ctxLogger.Infof(common.CtxLogFormat, "GetByClusterName", "", "", clusterName, "") + //identities := getIdentitiesForCluster(clusterName) - either queries shard CRD or shard CRD controller calls this func with those as parameters + identities := []string{clusterName} + identityConfigs := []IdentityConfig{} + var err error + for _, identity := range identities { + identityConfig, identityErr := c.GetByIdentityName(identity, ctx) + if identityErr != nil { + err = identityErr + ctxLogger.Infof(common.CtxLogFormat, "GetByClusterName", "", "", clusterName, identityErr) + } + identityConfigs = append(identityConfigs, identityConfig) + } + return identityConfigs, err } diff --git a/admiral/pkg/registry/registry_test.go b/admiral/pkg/registry/registry_test.go new file mode 100644 index 00000000..7f598c6a --- /dev/null +++ b/admiral/pkg/registry/registry_test.go @@ -0,0 +1,196 @@ +package registry + +import ( + "context" + json "encoding/json" + "errors" + "reflect" + "testing" + + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/wrappers" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + networkingV1Alpha3 "istio.io/api/networking/v1alpha3" + coreV1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func getSampleIdentityConfigEnvironment(env string, namespace string) IdentityConfigEnvironment { + identityConfigEnvironment := IdentityConfigEnvironment{ + Name: env, + Namespace: namespace, + ServiceName: "partner-data-to-tax-spk-root-service", + Type: "rollout", + Selectors: map[string]string{"app": "partner-data-to-tax"}, + Ports: []coreV1.ServicePort{{Name: "http-service-mesh", Port: int32(8090), Protocol: coreV1.ProtocolTCP, TargetPort: intstr.FromInt(8090)}}, + TrafficPolicy: networkingV1Alpha3.TrafficPolicy{ + LoadBalancer: &networkingV1Alpha3.LoadBalancerSettings{ + LbPolicy: &networkingV1Alpha3.LoadBalancerSettings_Simple{Simple: networkingV1Alpha3.LoadBalancerSettings_LEAST_REQUEST}, + LocalityLbSetting: &networkingV1Alpha3.LocalityLoadBalancerSetting{ + Distribute: []*networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute{{ + From: "*", + To: map[string]uint32{"us-west-2": 100}, + }}, + }, + WarmupDurationSecs: &duration.Duration{Seconds: 45}, + }, + ConnectionPool: &networkingV1Alpha3.ConnectionPoolSettings{ + Http: &networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{ + Http2MaxRequests: 1000, + MaxRequestsPerConnection: 5, + }, + }, + OutlierDetection: &networkingV1Alpha3.OutlierDetection{ + ConsecutiveGatewayErrors: &wrappers.UInt32Value{Value: 0}, + Consecutive_5XxErrors: &wrappers.UInt32Value{Value: 0}, + }, + }, + } + return identityConfigEnvironment +} + +func getSampleIdentityConfig() IdentityConfig { + prfEnv := getSampleIdentityConfigEnvironment("prf", "ctg-taxprep-partnerdatatotax-usw2-prf") + e2eEnv := getSampleIdentityConfigEnvironment("e2e", "ctg-taxprep-partnerdatatotax-usw2-e2e") + qalEnv := getSampleIdentityConfigEnvironment("qal", "ctg-taxprep-partnerdatatotax-usw2-qal") + environments := []IdentityConfigEnvironment{prfEnv, e2eEnv, qalEnv} + clientAssets := []map[string]string{{"name": "intuit.cto.dev_portal"}, {"name": "intuit.ctg.tto.browserclient"}, {"name": "intuit.ctg.taxprep.partnerdatatotaxtestclient"}, {"name": "intuit.productmarketing.ipu.pmec"}, {"name": "intuit.tax.taxdev.txo"}, {"name": "intuit.CTO.oauth2"}, {"name": "intuit.platform.servicesgateway.servicesgateway"}, {"name": "intuit.ctg.taxprep.partnerdatatotax"}, {"name": "sample"}} + cluster := IdentityConfigCluster{ + Name: "cg-tax-ppd-usw2-k8s", + Locality: "us-west-2", + IngressEndpoint: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", + IngressPort: "15443", + IngressPortName: "http", + Environment: environments, + ClientAssets: clientAssets, + } + identityConfig := IdentityConfig{ + Assetname: "Intuit.ctg.taxprep.partnerdatatotax", + Clusters: []IdentityConfigCluster{cluster}, + } + return identityConfig +} + +func TestParseIdentityConfigJSON(t *testing.T) { + identityConfig := getSampleIdentityConfig() + testCases := []struct { + name string + identityConfig IdentityConfig + }{ + { + name: "Given a JSON identity configuration file, " + + "When the file is parsed, " + + "Then the file should be read into the IdentityConfig struct", + identityConfig: identityConfig, + }, + } + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + jsonResult, err := json.MarshalIndent(c.identityConfig, "", " ") + if err != nil { + t.Errorf("While marshaling IdentityConfig struct into JSON, got error: %s", err) + } + var identityConfigUnmarshalResult IdentityConfig + err = json.Unmarshal(jsonResult, &identityConfigUnmarshalResult) + if err != nil { + t.Errorf("While unmarshaling JSON into IdentityConfig struct, got error: %s", err) + } + if !reflect.DeepEqual(identityConfigUnmarshalResult, c.identityConfig) { + t.Errorf("Mismatch between original IdentityConfig and unmarshaled IdentityConfig") + } + }) + } +} + +func TestGetByIdentityName(t *testing.T) { + sampleIdentityConfig := getSampleIdentityConfig() + registryClient := NewRegistryClient(WithRegistryEndpoint("endpoint"), WithOperatorCluster("test-k8s")) + var jsonErr *json.SyntaxError + testCases := []struct { + name string + expectedIdentityConfig IdentityConfig + expectedError any + identityAlias string + }{ + { + name: "Given an identity, " + + "When the identity config JSON is parsed, " + + "Then the resulting struct should match the expected config", + expectedIdentityConfig: sampleIdentityConfig, + expectedError: nil, + identityAlias: "sample", + }, + { + name: "Given an identity, " + + "When the identity config JSON doesn't exist for it, " + + "Then there should be a non-nil error", + expectedIdentityConfig: IdentityConfig{}, + expectedError: jsonErr, + identityAlias: "failed", + }, + } + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + ctx := context.Background() + identityConfig, err := registryClient.GetByIdentityName(c.identityAlias, ctx) + if err != nil && c.expectedError == nil { + t.Errorf("error while getting identityConfig by name with error: %v", err) + } else if err != nil && c.expectedError != nil && !errors.As(err, &c.expectedError) { + t.Errorf("failed to get correct error: %v, instead got error: %v", c.expectedError, err) + } else { + opts := cmpopts.IgnoreUnexported(networkingV1Alpha3.TrafficPolicy{}, networkingV1Alpha3.LoadBalancerSettings{}, networkingV1Alpha3.LocalityLoadBalancerSetting{}, networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute{}, duration.Duration{}, networkingV1Alpha3.ConnectionPoolSettings{}, networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{}, networkingV1Alpha3.OutlierDetection{}, wrappers.UInt32Value{}) + if !cmp.Equal(identityConfig, c.expectedIdentityConfig, opts) { + t.Errorf("mismatch between parsed JSON file and expected identity config for alias: %s", c.identityAlias) + t.Errorf(cmp.Diff(identityConfig, c.expectedIdentityConfig, opts)) + } + } + }) + } +} + +func TestGetByClusterName(t *testing.T) { + sampleIdentityConfig := getSampleIdentityConfig() + registryClient := NewRegistryClient(WithRegistryEndpoint("endpoint"), WithOperatorCluster("test-k8s")) + var jsonErr *json.SyntaxError + testCases := []struct { + name string + expectedIdentityConfig IdentityConfig + expectedError any + clusterName string + }{ + { + name: "Given a cluster name, " + + "When all the identity configs for the identities in that cluster are processed, " + + "Then the structs returned should match the expected configs", + expectedIdentityConfig: sampleIdentityConfig, + expectedError: nil, + clusterName: "sample", + }, + { + name: "Given a cluster name, " + + "When there exists no identity config for that cluster, " + + "Then there should be a non-nil error", + expectedIdentityConfig: IdentityConfig{}, + expectedError: jsonErr, + clusterName: "failed", + }, + } + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + ctx := context.Background() + identityConfigs, err := registryClient.GetByClusterName(c.clusterName, ctx) + if err != nil && c.expectedError == nil { + t.Errorf("error while getting identityConfigs by cluster name with error: %v", err) + } else if err != nil && c.expectedError != nil && !errors.As(err, &c.expectedError) { + t.Errorf("failed to get correct error: %v, instead got error: %v", c.expectedError, err) + } else { + opts := cmpopts.IgnoreUnexported(networkingV1Alpha3.TrafficPolicy{}, networkingV1Alpha3.LoadBalancerSettings{}, networkingV1Alpha3.LocalityLoadBalancerSetting{}, networkingV1Alpha3.LocalityLoadBalancerSetting_Distribute{}, duration.Duration{}, networkingV1Alpha3.ConnectionPoolSettings{}, networkingV1Alpha3.ConnectionPoolSettings_HTTPSettings{}, networkingV1Alpha3.OutlierDetection{}, wrappers.UInt32Value{}) + if !cmp.Equal(identityConfigs[0], c.expectedIdentityConfig, opts) { + t.Errorf("mismatch between parsed JSON file and expected identity config for file: %s", c.clusterName) + t.Errorf(cmp.Diff(identityConfigs[0], c.expectedIdentityConfig, opts)) + } + } + }) + } +} diff --git a/admiral/pkg/registry/serviceentry.go b/admiral/pkg/registry/serviceentry.go new file mode 100644 index 00000000..d6dc9e79 --- /dev/null +++ b/admiral/pkg/registry/serviceentry.go @@ -0,0 +1,204 @@ +package registry + +import ( + "context" + "errors" + "sort" + "strconv" + "strings" + + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + "github.com/istio-ecosystem/admiral/admiral/pkg/util" + "github.com/sirupsen/logrus" + networkingV1Alpha3 "istio.io/api/networking/v1alpha3" +) + +// IstioSEBuilder is an interface to construct Service Entry objects +// from IdentityConfig objects. It can construct multiple Service Entries +// from an IdentityConfig or construct just one given a IdentityConfigEnvironment. +type IstioSEBuilder interface { + BuildServiceEntriesFromIdentityConfig(ctxLogger *logrus.Entry, ctx context.Context, event admiral.EventType, identityConfig IdentityConfig) ([]*networkingV1Alpha3.ServiceEntry, error) +} + +type ServiceEntryBuilder struct { + OperatorCluster string +} + +// BuildServiceEntriesFromIdentityConfig builds service entries to write to the operator cluster +// by looping through the IdentityConfig clusters and environments to get spec information. It +// builds one SE per environment per cluster the identity is deployed in. +func (b *ServiceEntryBuilder) BuildServiceEntriesFromIdentityConfig(ctxLogger *logrus.Entry, ctx context.Context, event admiral.EventType, identityConfig IdentityConfig) ([]*networkingV1Alpha3.ServiceEntry, error) { + identity := identityConfig.Assetname + serviceEntries := []*networkingV1Alpha3.ServiceEntry{} + var err error + if event == admiral.Add || event == admiral.Update { + ctxLogger.Infof(common.CtxLogFormat, "buildServiceEntry", identity, common.GetSyncNamespace(), b.OperatorCluster, "Beginning to build the SE spec") + ingressEndpoints, ingressErr := getIngressEndpoints(identityConfig.Clusters) + if ingressErr != nil { + err = ingressErr + return serviceEntries, err + } + for i, identityConfigCluster := range identityConfig.Clusters { + sourceCluster := identityConfigCluster.Name + for _, identityConfigEnvironment := range identityConfigCluster.Environment { + se, buildErr := buildServiceEntryForClusterByEnv(ctxLogger, ctx, b.OperatorCluster, sourceCluster, identity, identityConfigCluster.ClientAssets, ingressEndpoints, ingressEndpoints[i].Address, identityConfigEnvironment) + if buildErr != nil { + err = buildErr + } + serviceEntries = append(serviceEntries, se) + } + } + return serviceEntries, err + } + return serviceEntries, err +} + +// buildServiceEntryForClusterByEnv builds a service entry based on cluster and IdentityConfigEnvironment information +// to be written to the operator cluster. +func buildServiceEntryForClusterByEnv(ctxLogger *logrus.Entry, ctx context.Context, operatorCluster string, sourceCluster string, identity string, clientAssets []map[string]string, ingressEndpoints []*networkingV1Alpha3.WorkloadEntry, remoteEndpointAddress string, identityConfigEnvironment IdentityConfigEnvironment) (*networkingV1Alpha3.ServiceEntry, error) { + ctxLogger.Infof(common.CtxLogFormat, "buildServiceEntry", identity, common.GetSyncNamespace(), operatorCluster, "build the SE spec from IdentityConfigEnvironment") + env := identityConfigEnvironment.Name + fqdn := common.GetCnameVal([]string{env, strings.ToLower(identity), common.GetHostnameSuffix()}) + san := common.SpiffePrefix + common.GetSANPrefix() + common.Slash + identity + ports, err := getServiceEntryPorts(identityConfigEnvironment) + if err != nil { + return nil, err + } + endpoints, err := getServiceEntryEndpoints(ctxLogger, operatorCluster, sourceCluster, ingressEndpoints, remoteEndpointAddress, identityConfigEnvironment) + if err != nil { + return nil, err + } + dependentNamespaces, err := getSortedDependentNamespaces(ctxLogger, ctx, operatorCluster, sourceCluster, fqdn, env, clientAssets) + if err != nil { + return nil, err + } + return &networkingV1Alpha3.ServiceEntry{ + Hosts: []string{fqdn}, + Ports: ports, + Location: networkingV1Alpha3.ServiceEntry_MESH_INTERNAL, + Resolution: networkingV1Alpha3.ServiceEntry_DNS, + SubjectAltNames: []string{san}, + Endpoints: endpoints, + ExportTo: dependentNamespaces, + }, err +} + +// getIngressEndpoint constructs the endpoint of the ingress gateway/remote endpoint for an identity +// by reading the information directly from the IdentityConfigCluster. +func getIngressEndpoints(clusters []IdentityConfigCluster) ([]*networkingV1Alpha3.WorkloadEntry, error) { + ingressEndpoints := []*networkingV1Alpha3.WorkloadEntry{} + var err error + for _, cluster := range clusters { + portNumber, parseErr := strconv.ParseInt(cluster.IngressPort, 10, 64) + if parseErr != nil { + err = parseErr + continue + } + ingressEndpoint := &networkingV1Alpha3.WorkloadEntry{ + Address: cluster.IngressEndpoint, + Locality: cluster.Locality, + Ports: map[string]uint32{cluster.IngressPortName: uint32(portNumber)}, + Labels: map[string]string{"security.istio.io/tlsMode": "istio"}, + } + ingressEndpoints = append(ingressEndpoints, ingressEndpoint) + } + return ingressEndpoints, err +} + +// getServiceEntryPorts constructs the ServicePorts of the service entry that should be built +// for the given identityConfigEnvironment. +func getServiceEntryPorts(identityConfigEnvironment IdentityConfigEnvironment) ([]*networkingV1Alpha3.ServicePort, error) { + //TODO: Verify this is how ports should be set + //Find Port with targetPort that matches inbound common.SidecarEnabledPorts + //Set port name and protocol based on that + port := &networkingV1Alpha3.ServicePort{Number: uint32(common.DefaultServiceEntryPort), Name: util.Http, Protocol: util.Http} + var err error + if len(identityConfigEnvironment.Ports) == 0 { + err = errors.New("identityConfigEnvironment had no ports for: " + identityConfigEnvironment.Name) + } + for _, servicePort := range identityConfigEnvironment.Ports { + //TODO: 8090 is supposed to be set as the common.SidecarEnabledPorts (includeInboundPorts), and we check that in the rollout, but we don't have that information here + if servicePort.TargetPort.IntValue() == 8090 { + protocol := util.GetPortProtocol(servicePort.Name) + port.Name = protocol + port.Protocol = protocol + } + } + ports := []*networkingV1Alpha3.ServicePort{port} + return ports, err +} + +// getServiceEntryEndpoints constructs the remote or local endpoint of the service entry that +// should be built for the given identityConfigEnvironment. +func getServiceEntryEndpoints(ctxLogger *logrus.Entry, operatorCluster string, sourceCluster string, ingressEndpoints []*networkingV1Alpha3.WorkloadEntry, remoteEndpointAddress string, identityConfigEnvironment IdentityConfigEnvironment) ([]*networkingV1Alpha3.WorkloadEntry, error) { + //TODO: Verify Local and Remote Endpoints are constructed correctly + endpoints := []*networkingV1Alpha3.WorkloadEntry{} + var err error + for _, endpoint := range ingressEndpoints { + tmpEp := endpoint.DeepCopy() + tmpEp.Labels["type"] = identityConfigEnvironment.Type + if operatorCluster == sourceCluster && tmpEp.Address == remoteEndpointAddress { + //Local Endpoint Address if the identity is deployed on the same cluster as it's client and the endpoint is the remote endpoint for the cluster + tmpEp.Address = identityConfigEnvironment.ServiceName + common.Sep + identityConfigEnvironment.Namespace + common.GetLocalDomainSuffix() + for _, servicePort := range identityConfigEnvironment.Ports { + //There should only be one mesh port here (http-service-mesh), but we are preserving ability to have multiple ports + protocol := util.GetPortProtocol(servicePort.Name) + if _, ok := tmpEp.Ports[protocol]; ok { + tmpEp.Ports[protocol] = uint32(servicePort.Port) + ctxLogger.Infof(common.CtxLogFormat, "LocalMeshPort", servicePort.Port, "", sourceCluster, "Protocol: "+protocol) + } else { + err = errors.New("failed to get Port for protocol: " + protocol) + } + } + } + endpoints = append(endpoints, tmpEp) + } + return endpoints, err +} + +// getSortedDependentNamespaces constructs a sorted list of unique namespaces for a given cluster, client assets, +// and cname, where each namespace is where a client asset of the cname is deployed on the cluster. If the cname +// is also deployed on the cluster then the istio-system namespace is also in the list. +func getSortedDependentNamespaces(ctxLogger *logrus.Entry, ctx context.Context, operatorCluster string, sourceCluster string, cname string, env string, clientAssets []map[string]string) ([]string, error) { + clientNamespaces := []string{} + var err error + var clientIdentityConfig IdentityConfig + for _, clientAsset := range clientAssets { + //TODO: Need to do registry client initialization better, maybe pass it in + registryClient := NewRegistryClient(WithRegistryEndpoint("endpoint"), WithOperatorCluster(operatorCluster)) + // For each client asset of cname, we fetch its identityConfig + clientIdentityConfig, err = registryClient.GetByIdentityName(clientAsset["name"], ctx) + if err != nil { + ctxLogger.Infof(common.CtxLogFormat, "buildServiceEntry", cname, common.GetSyncNamespace(), clientAsset["name"], "Failed to fetch IdentityConfig: "+err.Error()) + continue + } + for _, clientIdentityConfigCluster := range clientIdentityConfig.Clusters { + // For each cluster the client asset is deployed on, we check if that cluster is the operator cluster we are writing to + if operatorCluster == clientIdentityConfigCluster.Name { + for _, clientIdentityConfigEnvironment := range clientIdentityConfigCluster.Environment { + // For each environment of the client asset on the operator cluster, we add the namespace to our list + if clientIdentityConfigEnvironment.Name == env { + //Do we need to check if ENV matches here for exportTo? + clientNamespaces = append(clientNamespaces, clientIdentityConfigEnvironment.Namespace) + } + } + } + } + } + if operatorCluster == sourceCluster { + clientNamespaces = append(clientNamespaces, common.NamespaceIstioSystem) + } + if len(clientNamespaces) > common.GetExportToMaxNamespaces() { + clientNamespaces = []string{"*"} + ctxLogger.Infof("exceeded max namespaces for cname=%s in cluster=%s", cname, operatorCluster) + } + sort.Strings(clientNamespaces) + var dedupClientNamespaces []string + for i := 0; i < len(clientNamespaces); i++ { + if i == 0 || clientNamespaces[i] != clientNamespaces[i-1] { + dedupClientNamespaces = append(dedupClientNamespaces, clientNamespaces[i]) + } + } + return clientNamespaces, err +} diff --git a/admiral/pkg/registry/serviceentry_test.go b/admiral/pkg/registry/serviceentry_test.go new file mode 100644 index 00000000..92b04969 --- /dev/null +++ b/admiral/pkg/registry/serviceentry_test.go @@ -0,0 +1,358 @@ +package registry + +import ( + "context" + "reflect" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + "github.com/istio-ecosystem/admiral/admiral/pkg/util" + networkingV1Alpha3 "istio.io/api/networking/v1alpha3" +) + +func admiralParamsForServiceEntryTests() common.AdmiralParams { + return common.AdmiralParams{ + KubeconfigPath: "testdata/fake.config", + LabelSet: &common.LabelSet{ + GatewayApp: "gatewayapp", + WorkloadIdentityKey: "identity", + PriorityKey: "priority", + EnvKey: "env", + AdmiralCRDIdentityLabel: "identity", + }, + EnableSAN: true, + SANPrefix: "prefix", + HostnameSuffix: "mesh", + SyncNamespace: "ns", + CacheReconcileDuration: 0, + ClusterRegistriesNamespace: "default", + DependenciesNamespace: "default", + WorkloadSidecarName: "default", + Profile: common.AdmiralProfileDefault, + DependentClusterWorkerConcurrency: 5, + EnableSWAwareNSCaches: true, + ExportToIdentityList: []string{"*"}, + ExportToMaxNamespaces: 35, + EnableAbsoluteFQDN: true, + EnableAbsoluteFQDNForLocalEndpoints: true, + } +} + +func createMockServiceEntry(env string, identity string, endpointAddress string, endpointPort int, exportTo []string) networkingV1Alpha3.ServiceEntry { + serviceEntry := networkingV1Alpha3.ServiceEntry{ + Hosts: []string{env + "." + strings.ToLower(identity) + ".mesh"}, + Addresses: nil, + Ports: []*networkingV1Alpha3.ServicePort{{Number: uint32(common.DefaultServiceEntryPort), Name: util.Http, Protocol: util.Http}}, + Location: 1, + Resolution: 2, + Endpoints: []*networkingV1Alpha3.WorkloadEntry{{Address: endpointAddress, + Locality: "us-west-2", + Ports: map[string]uint32{"http": uint32(endpointPort)}, + Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "rollout"}}}, + WorkloadSelector: nil, + ExportTo: exportTo, + SubjectAltNames: []string{"spiffe://prefix/" + identity}, + } + return serviceEntry +} + +func TestGetIngressEndpoints(t *testing.T) { + identityConfig := getSampleIdentityConfig() + expectedIngressEndpoints := []*networkingV1Alpha3.WorkloadEntry{{ + Address: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", + Locality: "us-west-2", + Ports: map[string]uint32{"http": uint32(15443)}, + Labels: map[string]string{"security.istio.io/tlsMode": "istio"}, + }} + testCases := []struct { + name string + identityConfigClusters []IdentityConfigCluster + expectedIngressEndpoints []*networkingV1Alpha3.WorkloadEntry + }{ + { + name: "Given an IdentityConfigCluster, " + + "Then the constructed endpoint should be the ingress endpoint", + identityConfigClusters: identityConfig.Clusters, + expectedIngressEndpoints: expectedIngressEndpoints, + }, + } + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + ingressEndpoints, err := getIngressEndpoints(c.identityConfigClusters) + if err != nil { + t.Errorf("While constructing ingressEndpoint, got error: %v", err) + } + if !reflect.DeepEqual(ingressEndpoints, c.expectedIngressEndpoints) { + t.Errorf("Mismatch between constructed ingressEndpoint and expected ingressEndpoint") + } + }) + } +} + +func TestGetServiceEntryPorts(t *testing.T) { + e2eEnv := getSampleIdentityConfigEnvironment("e2e", "ctg-taxprep-partnerdatatotax-usw2-e2e") + expectedSEPorts := []*networkingV1Alpha3.ServicePort{{Number: uint32(common.DefaultServiceEntryPort), Name: util.Http, Protocol: util.Http}} + testCases := []struct { + name string + identityConfigEnvironment IdentityConfigEnvironment + expectedSEPorts []*networkingV1Alpha3.ServicePort + }{ + { + name: "Given an IdentityConfigEnvironment, " + + "Then the constructed ServiceEntryPorts should be as expected", + identityConfigEnvironment: e2eEnv, + expectedSEPorts: expectedSEPorts, + }, + } + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + sePorts, err := getServiceEntryPorts(e2eEnv) + if err != nil { + t.Errorf("While constructing serviceEntryPorts, got error: %v", err) + } + if !reflect.DeepEqual(sePorts, c.expectedSEPorts) { + t.Errorf("Mismatch between constructed ingressEndpoint and expected ingressEndpoint") + } + }) + } +} + +func TestGetServiceEntryEndpoints(t *testing.T) { + admiralParams := admiralParamsForServiceEntryTests() + common.ResetSync() + common.InitializeConfig(admiralParams) + e2eEnv := getSampleIdentityConfigEnvironment("e2e", "ctg-taxprep-partnerdatatotax-usw2-e2e") + ingressEndpoints := []*networkingV1Alpha3.WorkloadEntry{{ + Address: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", + Locality: "us-west-2", + Ports: map[string]uint32{"http": uint32(15443)}, + Labels: map[string]string{"security.istio.io/tlsMode": "istio"}, + }} + remoteEndpoint := []*networkingV1Alpha3.WorkloadEntry{{ + Address: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", + Locality: "us-west-2", + Ports: map[string]uint32{"http": uint32(15443)}, + Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "rollout"}, + }} + localEndpoint := []*networkingV1Alpha3.WorkloadEntry{{ + Address: "partner-data-to-tax-spk-root-service.ctg-taxprep-partnerdatatotax-usw2-e2e.svc.cluster.local.", + Locality: "us-west-2", + Ports: map[string]uint32{"http": uint32(8090)}, + Labels: map[string]string{"security.istio.io/tlsMode": "istio", "type": "rollout"}, + }} + ctx := context.Background() + ctxLogger := common.GetCtxLogger(ctx, "ctg-taxprep-partnerdatatotax", "") + testCases := []struct { + name string + identityConfigEnvironment IdentityConfigEnvironment + ingressEndpoints []*networkingV1Alpha3.WorkloadEntry + operatorCluster string + sourceCluster string + remoteEndpointAddress string + expectedSEEndpoints []*networkingV1Alpha3.WorkloadEntry + }{ + { + name: "Given an IdentityConfigEnvironment and ingressEndpoint, " + + "When the operator cluster is not the same as the source cluster" + + "Then the constructed endpoint should be a remote endpoint", + identityConfigEnvironment: e2eEnv, + ingressEndpoints: ingressEndpoints, + operatorCluster: "cg-tax-ppd-usw2-k8s", + sourceCluster: "apigw-cx-ppd-usw2-k8s", + remoteEndpointAddress: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", + expectedSEEndpoints: remoteEndpoint, + }, + { + name: "Given an IdentityConfigEnvironment and ingressEndpoint, " + + "When the operator cluster is the same as the source cluster" + + "Then the constructed endpoint should be a local endpoint", + identityConfigEnvironment: e2eEnv, + ingressEndpoints: ingressEndpoints, + operatorCluster: "cg-tax-ppd-usw2-k8s", + sourceCluster: "cg-tax-ppd-usw2-k8s", + remoteEndpointAddress: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", + expectedSEEndpoints: localEndpoint, + }, + } + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + seEndpoint, err := getServiceEntryEndpoints(ctxLogger, c.operatorCluster, c.sourceCluster, c.ingressEndpoints, c.remoteEndpointAddress, c.identityConfigEnvironment) + if err != nil { + t.Errorf("While constructing serviceEntryPortEndpoint, got error: %v", err) + } + opts := cmpopts.IgnoreUnexported(networkingV1Alpha3.WorkloadEntry{}) + if !cmp.Equal(seEndpoint, c.expectedSEEndpoints, opts) { + t.Errorf("Mismatch between constructed ingressEndpoint and expected ingressEndpoint") + t.Errorf(cmp.Diff(seEndpoint, c.expectedSEEndpoints, opts)) + } + }) + } +} + +func TestGetSortedDependentNamespaces(t *testing.T) { + admiralParams := admiralParamsForServiceEntryTests() + common.ResetSync() + common.InitializeConfig(admiralParams) + ctx := context.Background() + ctxLogger := common.GetCtxLogger(ctx, "ctg-taxprep-partnerdatatotax", "") + testCases := []struct { + name string + operatorCluster string + sourceCluster string + cname string + env string + clientAssets []map[string]string + expectedNamespaces []string + }{ + { + name: "Given asset info, cluster info, and client info, " + + "When the operator cluster is the same as the source cluster" + + "Then the constructed dependent namespaces should include istio-system", + operatorCluster: "cg-tax-ppd-usw2-k8s", + sourceCluster: "cg-tax-ppd-usw2-k8s", + cname: "e2e.intuit.ctg.taxprep.partnerdatatotax.mesh", + env: "e2e", + clientAssets: []map[string]string{{"name": "sample"}}, + expectedNamespaces: []string{"ctg-taxprep-partnerdatatotax-usw2-e2e", "istio-system"}, + }, + { + name: "Given asset info, cluster info, and client info, " + + "When the operator cluster is not the same as the source cluster" + + "Then the constructed dependent namespaces should not include istio-system", + operatorCluster: "cg-tax-ppd-usw2-k8s", + sourceCluster: "cg-tax-ppd-use2-k8s", + cname: "e2e.intuit.ctg.taxprep.partnerdatatotax.mesh", + env: "e2e", + clientAssets: []map[string]string{{"name": "sample"}}, + expectedNamespaces: []string{"ctg-taxprep-partnerdatatotax-usw2-e2e"}, + }, + } + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + namespaces, err := getSortedDependentNamespaces(ctxLogger, ctx, c.operatorCluster, c.sourceCluster, c.name, c.env, c.clientAssets) + if err != nil { + t.Errorf("While constructing sorted dependent namespaces, got error: %v", err) + } + if !cmp.Equal(namespaces, c.expectedNamespaces) { + t.Errorf("Mismatch between constructed sortedDependentNamespaces and expected sortedDependentNamespaces") + t.Errorf(cmp.Diff(namespaces, c.expectedNamespaces)) + } + }) + } +} + +func TestBuildServiceEntryForClusterByEnv(t *testing.T) { + admiralParams := admiralParamsForServiceEntryTests() + common.ResetSync() + common.InitializeConfig(admiralParams) + ctx := context.Background() + ctxLogger := common.GetCtxLogger(ctx, "ctg-taxprep-partnerdatatotax", "") + expectedLocalServiceEntry := createMockServiceEntry("e2e", "Intuit.ctg.taxprep.partnerdatatotax", "partner-data-to-tax-spk-root-service.ctg-taxprep-partnerdatatotax-usw2-e2e.svc.cluster.local.", 8090, []string{"ctg-taxprep-partnerdatatotax-usw2-e2e", "istio-system"}) + expectedRemoteServiceEntry := createMockServiceEntry("e2e", "Intuit.ctg.taxprep.partnerdatatotax", "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", 15443, []string{"ctg-taxprep-partnerdatatotax-usw2-e2e"}) + e2eEnv := getSampleIdentityConfigEnvironment("e2e", "ctg-taxprep-partnerdatatotax-usw2-e2e") + ingressEndpoints := []*networkingV1Alpha3.WorkloadEntry{{ + Address: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", + Locality: "us-west-2", + Ports: map[string]uint32{"http": uint32(15443)}, + Labels: map[string]string{"security.istio.io/tlsMode": "istio"}, + }} + testCases := []struct { + name string + operatorCluster string + sourceCluster string + identity string + clientAssets []map[string]string + ingressEndpoints []*networkingV1Alpha3.WorkloadEntry + remoteEndpointAddress string + identityConfigEnvironment IdentityConfigEnvironment + expectedServiceEntry *networkingV1Alpha3.ServiceEntry + }{ + { + name: "Given information to build an se, " + + "When the operator cluster is not the same as the source cluster" + + "Then the constructed se should have remote endpoint and no istio-system in exportTo", + operatorCluster: "cg-tax-ppd-usw2-k8s", + sourceCluster: "apigw-cx-ppd-usw2-k8s", + identity: "Intuit.ctg.taxprep.partnerdatatotax", + clientAssets: []map[string]string{{"name": "sample"}}, + ingressEndpoints: ingressEndpoints, + remoteEndpointAddress: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", + identityConfigEnvironment: e2eEnv, + expectedServiceEntry: &expectedRemoteServiceEntry, + }, + { + name: "Given information to build an se, " + + "When the operator cluster is the same as the source cluster" + + "Then the constructed se should have local endpoint and istio-system in exportTo", + operatorCluster: "cg-tax-ppd-usw2-k8s", + sourceCluster: "cg-tax-ppd-usw2-k8s", + identity: "Intuit.ctg.taxprep.partnerdatatotax", + clientAssets: []map[string]string{{"name": "sample"}}, + ingressEndpoints: ingressEndpoints, + remoteEndpointAddress: "internal-a96ffe9cdbb4c4d81b796cc6a37d3e1d-2123389388.us-west-2.elb.amazonaws.com.", + identityConfigEnvironment: e2eEnv, + expectedServiceEntry: &expectedLocalServiceEntry, + }, + } + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + se, err := buildServiceEntryForClusterByEnv(ctxLogger, ctx, c.operatorCluster, c.sourceCluster, c.identity, c.clientAssets, c.ingressEndpoints, c.remoteEndpointAddress, c.identityConfigEnvironment) + if err != nil { + t.Errorf("While constructing serviceEntry, got error: %v", err) + } + opts := cmpopts.IgnoreUnexported(networkingV1Alpha3.ServiceEntry{}, networkingV1Alpha3.ServicePort{}, networkingV1Alpha3.WorkloadEntry{}) + if !cmp.Equal(se, c.expectedServiceEntry, opts) { + t.Errorf("Mismatch between constructed serviceEntry and expected sortedEntry") + t.Errorf(cmp.Diff(se, c.expectedServiceEntry, opts)) + } + }) + } +} + +func TestBuildServiceEntriesFromIdentityConfig(t *testing.T) { + admiralParams := admiralParamsForServiceEntryTests() + common.ResetSync() + common.InitializeConfig(admiralParams) + ctx := context.Background() + ctxLogger := common.GetCtxLogger(ctx, "ctg-taxprep-partnerdatatotax", "") + identityConfig := getSampleIdentityConfig() + expectedLocalServiceEntryprf := createMockServiceEntry("prf", "Intuit.ctg.taxprep.partnerdatatotax", "partner-data-to-tax-spk-root-service.ctg-taxprep-partnerdatatotax-usw2-prf.svc.cluster.local.", 8090, []string{"ctg-taxprep-partnerdatatotax-usw2-prf", "istio-system"}) + expectedLocalServiceEntrye2e := createMockServiceEntry("e2e", "Intuit.ctg.taxprep.partnerdatatotax", "partner-data-to-tax-spk-root-service.ctg-taxprep-partnerdatatotax-usw2-e2e.svc.cluster.local.", 8090, []string{"ctg-taxprep-partnerdatatotax-usw2-e2e", "istio-system"}) + expectedLocalServiceEntryqal := createMockServiceEntry("qal", "Intuit.ctg.taxprep.partnerdatatotax", "partner-data-to-tax-spk-root-service.ctg-taxprep-partnerdatatotax-usw2-qal.svc.cluster.local.", 8090, []string{"ctg-taxprep-partnerdatatotax-usw2-qal", "istio-system"}) + expectedLocalServiceEntries := []*networkingV1Alpha3.ServiceEntry{&expectedLocalServiceEntryprf, &expectedLocalServiceEntrye2e, &expectedLocalServiceEntryqal} + testCases := []struct { + name string + operatorCluster string + event admiral.EventType + identityConfig IdentityConfig + expectedServiceEntries []*networkingV1Alpha3.ServiceEntry + }{ + { + name: "Given information to build an se, " + + "When the operator cluster is the same as the source cluster" + + "Then the constructed se should have local endpoint and istio-system in exportTo", + operatorCluster: "cg-tax-ppd-usw2-k8s", + event: admiral.Add, + identityConfig: identityConfig, + expectedServiceEntries: expectedLocalServiceEntries, + }, + } + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + serviceEntryBuilder := ServiceEntryBuilder{OperatorCluster: c.operatorCluster} + serviceEntries, err := serviceEntryBuilder.BuildServiceEntriesFromIdentityConfig(ctxLogger, ctx, c.event, c.identityConfig) + if err != nil { + t.Errorf("While constructing service entries, got error: %v", err) + } + opts := cmpopts.IgnoreUnexported(networkingV1Alpha3.ServiceEntry{}, networkingV1Alpha3.ServicePort{}, networkingV1Alpha3.WorkloadEntry{}) + if !cmp.Equal(serviceEntries, c.expectedServiceEntries, opts) { + t.Errorf("Mismatch between constructed sorted entries and expected service entries") + t.Errorf(cmp.Diff(serviceEntries, c.expectedServiceEntries, opts)) + } + }) + } +} diff --git a/admiral/pkg/test/mock.go b/admiral/pkg/test/mock.go index 0e5a380a..2c72a5a0 100644 --- a/admiral/pkg/test/mock.go +++ b/admiral/pkg/test/mock.go @@ -2,14 +2,26 @@ package test import ( "context" + "errors" + + argoprojv1alpha1 "github.com/argoproj/argo-rollouts/pkg/client/clientset/versioned/typed/rollouts/v1alpha1" + metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1" + v1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + admiralV1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1alpha1" v1alpha32 "istio.io/client-go/pkg/apis/networking/v1alpha3" k8sAppsV1 "k8s.io/api/apps/v1" k8sCoreV1 "k8s.io/api/core/v1" ) +var ( + RolloutNamespace = "test-ns" +) + type MockIstioConfigStore struct { TestHook func(interface{}) } @@ -30,42 +42,72 @@ func (m *MockIstioConfigStore) Delete(typ, name, namespace string) error { type MockDeploymentHandler struct { } -func (m *MockDeploymentHandler) Added(ctx context.Context, obj *k8sAppsV1.Deployment) { - +func (m *MockDeploymentHandler) Added(ctx context.Context, obj *k8sAppsV1.Deployment) error { + return nil } -func (m *MockDeploymentHandler) Deleted(ctx context.Context, obj *k8sAppsV1.Deployment) { +func (m *MockDeploymentHandler) Deleted(ctx context.Context, obj *k8sAppsV1.Deployment) error { + return nil +} +type MockDeploymentHandlerError struct { } -type MockRolloutHandler struct { +func (m *MockDeploymentHandlerError) Added(ctx context.Context, obj *k8sAppsV1.Deployment) error { + return nil } -func (m *MockRolloutHandler) Added(ctx context.Context, obj *argo.Rollout) { +func (m *MockDeploymentHandlerError) Deleted(ctx context.Context, obj *k8sAppsV1.Deployment) error { + return errors.New("error while deleting deployment") +} +type MockRolloutHandler struct { + Obj *argo.Rollout } -func (m *MockRolloutHandler) Deleted(ctx context.Context, obj *argo.Rollout) { +func (m *MockRolloutHandler) Added(ctx context.Context, obj *argo.Rollout) error { + m.Obj = obj + return nil +} +func (m *MockRolloutHandler) Deleted(ctx context.Context, obj *argo.Rollout) error { + return nil } -func (m *MockRolloutHandler) Updated(ctx context.Context, obj *argo.Rollout) { +func (m *MockRolloutHandler) Updated(ctx context.Context, obj *argo.Rollout) error { + return nil +} +type MockRolloutHandlerError struct { + Obj *argo.Rollout } -type MockServiceHandler struct { +func (m *MockRolloutHandlerError) Added(ctx context.Context, obj *argo.Rollout) error { + m.Obj = obj + return nil } -func (m *MockServiceHandler) Added(ctx context.Context, obj *k8sCoreV1.Service) { +func (m *MockRolloutHandlerError) Deleted(ctx context.Context, obj *argo.Rollout) error { + return errors.New("error while deleting rollout") +} +func (m *MockRolloutHandlerError) Updated(ctx context.Context, obj *argo.Rollout) error { + return nil } -func (m *MockServiceHandler) Updated(ctx context.Context, obj *k8sCoreV1.Service) { +type MockServiceHandler struct { +} +func (m *MockServiceHandler) Added(ctx context.Context, obj *k8sCoreV1.Service) error { + return nil } -func (m *MockServiceHandler) Deleted(ctx context.Context, obj *k8sCoreV1.Service) { +func (m *MockServiceHandler) Updated(ctx context.Context, obj *k8sCoreV1.Service) error { + return nil +} +func (m *MockServiceHandler) Deleted(ctx context.Context, obj *k8sCoreV1.Service) error { + return nil } type MockPodHandler struct { @@ -94,110 +136,292 @@ func (m *MockNodeHandler) Deleted(obj *k8sCoreV1.Node) { type MockDependencyHandler struct { } -func (m *MockDependencyHandler) Added(ctx context.Context, obj *v1.Dependency) { - +func (m *MockDependencyHandler) Added(ctx context.Context, obj *admiralV1.Dependency) error { + return nil } -func (m *MockDependencyHandler) Updated(ctx context.Context, obj *v1.Dependency) { - +func (m *MockDependencyHandler) Updated(ctx context.Context, obj *admiralV1.Dependency) error { + return nil } -func (m *MockDependencyHandler) Deleted(ctx context.Context, obj *v1.Dependency) { - +func (m *MockDependencyHandler) Deleted(ctx context.Context, obj *admiralV1.Dependency) error { + return nil } type MockGlobalTrafficHandler struct { - Obj *v1.GlobalTrafficPolicy + Obj *admiralV1.GlobalTrafficPolicy } -func (m *MockGlobalTrafficHandler) Added(ctx context.Context, obj *v1.GlobalTrafficPolicy) { +func (m *MockGlobalTrafficHandler) Added(ctx context.Context, obj *admiralV1.GlobalTrafficPolicy) error { m.Obj = obj + return nil } -func (m *MockGlobalTrafficHandler) Updated(ctx context.Context, obj *v1.GlobalTrafficPolicy) { +func (m *MockGlobalTrafficHandler) Updated(ctx context.Context, obj *admiralV1.GlobalTrafficPolicy) error { m.Obj = obj + return nil } -func (m *MockGlobalTrafficHandler) Deleted(ctx context.Context, obj *v1.GlobalTrafficPolicy) { +func (m *MockGlobalTrafficHandler) Deleted(ctx context.Context, obj *admiralV1.GlobalTrafficPolicy) error { m.Obj = nil + return nil } type MockServiceEntryHandler struct { Obj *v1alpha32.ServiceEntry } -func (m *MockServiceEntryHandler) Added(obj *v1alpha32.ServiceEntry) { +func (m *MockServiceEntryHandler) Added(obj *v1alpha32.ServiceEntry) error { m.Obj = obj + return nil } -func (m *MockServiceEntryHandler) Updated(obj *v1alpha32.ServiceEntry) { +func (m *MockServiceEntryHandler) Updated(obj *v1alpha32.ServiceEntry) error { m.Obj = obj + return nil } -func (m *MockServiceEntryHandler) Deleted(obj *v1alpha32.ServiceEntry) { +func (m *MockServiceEntryHandler) Deleted(obj *v1alpha32.ServiceEntry) error { m.Obj = nil + return nil } type MockVirtualServiceHandler struct { Obj *v1alpha32.VirtualService } -func (m *MockVirtualServiceHandler) Added(ctx context.Context, obj *v1alpha32.VirtualService) { +func (m *MockVirtualServiceHandler) Added(ctx context.Context, obj *v1alpha32.VirtualService) error { m.Obj = obj + return nil } -func (m *MockVirtualServiceHandler) Updated(ctx context.Context, obj *v1alpha32.VirtualService) { +func (m *MockVirtualServiceHandler) Updated(ctx context.Context, obj *v1alpha32.VirtualService) error { m.Obj = obj + return nil } -func (m *MockVirtualServiceHandler) Deleted(ctx context.Context, obj *v1alpha32.VirtualService) { +func (m *MockVirtualServiceHandler) Deleted(ctx context.Context, obj *v1alpha32.VirtualService) error { m.Obj = nil + return nil } type MockDestinationRuleHandler struct { Obj *v1alpha32.DestinationRule } -func (m *MockDestinationRuleHandler) Added(ctx context.Context, obj *v1alpha32.DestinationRule) { +func (m *MockDestinationRuleHandler) Added(ctx context.Context, obj *v1alpha32.DestinationRule) error { m.Obj = obj + return nil } -func (m *MockDestinationRuleHandler) Updated(ctx context.Context, obj *v1alpha32.DestinationRule) { +func (m *MockDestinationRuleHandler) Updated(ctx context.Context, obj *v1alpha32.DestinationRule) error { m.Obj = obj + return nil } -func (m *MockDestinationRuleHandler) Deleted(ctx context.Context, obj *v1alpha32.DestinationRule) { +func (m *MockDestinationRuleHandler) Deleted(ctx context.Context, obj *v1alpha32.DestinationRule) error { m.Obj = nil + return nil } type MockSidecarHandler struct { Obj *v1alpha32.Sidecar } -func (m *MockSidecarHandler) Added(ctx context.Context, obj *v1alpha32.Sidecar) { +func (m *MockSidecarHandler) Added(ctx context.Context, obj *v1alpha32.Sidecar) error { m.Obj = obj + return nil } -func (m *MockSidecarHandler) Updated(ctx context.Context, obj *v1alpha32.Sidecar) { +func (m *MockSidecarHandler) Updated(ctx context.Context, obj *v1alpha32.Sidecar) error { m.Obj = obj + return nil } -func (m *MockSidecarHandler) Deleted(ctx context.Context, obj *v1alpha32.Sidecar) { +func (m *MockSidecarHandler) Deleted(ctx context.Context, obj *v1alpha32.Sidecar) error { m.Obj = nil + return nil } type MockRoutingPolicyHandler struct { - Obj *v1.RoutingPolicy + Obj *admiralV1.RoutingPolicy +} + +func (m *MockRoutingPolicyHandler) Added(ctx context.Context, obj *admiralV1.RoutingPolicy) error { + m.Obj = obj + return nil +} + +func (m *MockRoutingPolicyHandler) Deleted(ctx context.Context, obj *admiralV1.RoutingPolicy) error { + m.Obj = nil + return nil +} + +func (m *MockRoutingPolicyHandler) Updated(ctx context.Context, obj *admiralV1.RoutingPolicy) error { + m.Obj = obj + return nil +} + +type MockTrafficConfigHandler struct { + Obj *admiralV1.TrafficConfig } -func (m *MockRoutingPolicyHandler) Added(ctx context.Context, obj *v1.RoutingPolicy) { +func (m *MockTrafficConfigHandler) Added(ctx context.Context, obj *admiralV1.TrafficConfig) { m.Obj = obj } -func (m *MockRoutingPolicyHandler) Deleted(ctx context.Context, obj *v1.RoutingPolicy) { +func (m *MockTrafficConfigHandler) Deleted(ctx context.Context, obj *admiralV1.TrafficConfig) { m.Obj = nil } -func (m *MockRoutingPolicyHandler) Updated(ctx context.Context, obj *v1.RoutingPolicy) { +func (m *MockTrafficConfigHandler) Updated(ctx context.Context, obj *admiralV1.TrafficConfig) { m.Obj = obj } + +type MockEnvoyFilterHandler struct { +} + +func (m *MockEnvoyFilterHandler) Added(context.Context, *v1alpha32.EnvoyFilter) { +} + +func (m *MockEnvoyFilterHandler) Deleted(context.Context, *v1alpha32.EnvoyFilter) { +} + +func (m *MockEnvoyFilterHandler) Updated(context.Context, *v1alpha32.EnvoyFilter) { +} + +type MockDependencyProxyHandler struct { +} + +func (m *MockDependencyProxyHandler) Added(context.Context, *admiralV1.DependencyProxy) error { + return nil +} + +func (m *MockDependencyProxyHandler) Deleted(context.Context, *admiralV1.DependencyProxy) error { + return nil +} + +func (m *MockDependencyProxyHandler) Updated(context.Context, *admiralV1.DependencyProxy) error { + return nil +} + +type MockRolloutsGetter struct{} +type FakeRolloutsImpl struct{} + +func (f FakeRolloutsImpl) Create(ctx context.Context, rollout *v1alpha1.Rollout, opts metaV1.CreateOptions) (*v1alpha1.Rollout, error) { + return nil, nil +} + +func (f FakeRolloutsImpl) Update(ctx context.Context, rollout *v1alpha1.Rollout, opts metaV1.UpdateOptions) (*v1alpha1.Rollout, error) { + return nil, nil +} + +func (f FakeRolloutsImpl) UpdateStatus(ctx context.Context, rollout *v1alpha1.Rollout, opts metaV1.UpdateOptions) (*v1alpha1.Rollout, error) { + return nil, nil +} + +func (f FakeRolloutsImpl) Delete(ctx context.Context, name string, opts metaV1.DeleteOptions) error { + return nil +} + +func (f FakeRolloutsImpl) DeleteCollection(ctx context.Context, opts metaV1.DeleteOptions, listOpts metaV1.ListOptions) error { + return nil +} + +func (f FakeRolloutsImpl) Get(ctx context.Context, name string, opts metaV1.GetOptions) (*v1alpha1.Rollout, error) { + return nil, nil +} + +func (f FakeRolloutsImpl) List(ctx context.Context, opts metaV1.ListOptions) (*v1alpha1.RolloutList, error) { + rollout1 := v1alpha1.Rollout{ + ObjectMeta: metaV1.ObjectMeta{ + Name: "rollout-name", + Namespace: RolloutNamespace, + }, + Spec: v1alpha1.RolloutSpec{ + Strategy: v1alpha1.RolloutStrategy{ + Canary: &v1alpha1.CanaryStrategy{ + TrafficRouting: &v1alpha1.RolloutTrafficRouting{ + Istio: &v1alpha1.IstioTrafficRouting{ + VirtualService: &v1alpha1.IstioVirtualService{ + Name: "virtual-service-1", + }, + }, + }, + }, + }, + }, + } + rollout2 := v1alpha1.Rollout{ + ObjectMeta: metaV1.ObjectMeta{ + Name: "rollout-name2", + Namespace: RolloutNamespace, + }, + Spec: v1alpha1.RolloutSpec{ + Strategy: v1alpha1.RolloutStrategy{ + Canary: &v1alpha1.CanaryStrategy{ + TrafficRouting: &v1alpha1.RolloutTrafficRouting{ + Istio: &v1alpha1.IstioTrafficRouting{ + VirtualService: &v1alpha1.IstioVirtualService{ + Name: "virtual-service-1", + }, + }, + }, + }, + }, + }, + } + list := &v1alpha1.RolloutList{Items: []v1alpha1.Rollout{rollout1, rollout2}} + return list, nil +} + +func (f FakeRolloutsImpl) Watch(ctx context.Context, opts metaV1.ListOptions) (watch.Interface, error) { + return nil, nil +} + +func (f FakeRolloutsImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metaV1.PatchOptions, subresources ...string) (result *v1alpha1.Rollout, err error) { + return nil, nil +} + +func (m MockRolloutsGetter) RESTClient() rest.Interface { + return nil +} + +func (m MockRolloutsGetter) AnalysisRuns(namespace string) argoprojv1alpha1.AnalysisRunInterface { + return nil +} + +func (m MockRolloutsGetter) AnalysisTemplates(namespace string) argoprojv1alpha1.AnalysisTemplateInterface { + return nil +} + +func (m MockRolloutsGetter) ClusterAnalysisTemplates() argoprojv1alpha1.ClusterAnalysisTemplateInterface { + return nil +} + +func (m MockRolloutsGetter) Experiments(namespace string) argoprojv1alpha1.ExperimentInterface { + return nil +} + +func (m MockRolloutsGetter) Rollouts(namespace string) argoprojv1alpha1.RolloutInterface { + return FakeRolloutsImpl{} +} + +type MockOutlierDetectionHandler struct { + Obj *admiralV1.OutlierDetection +} + +func (m *MockOutlierDetectionHandler) Added(ctx context.Context, obj *admiralV1.OutlierDetection) error { + m.Obj = obj + return nil +} + +func (m *MockOutlierDetectionHandler) Updated(ctx context.Context, obj *admiralV1.OutlierDetection) error { + m.Obj = obj + return nil +} + +func (m *MockOutlierDetectionHandler) Deleted(ctx context.Context, obj *admiralV1.OutlierDetection) error { + m.Obj = nil + return nil +} diff --git a/admiral/pkg/test/types.go b/admiral/pkg/test/types.go index d556ceac..2ab7c65f 100644 --- a/admiral/pkg/test/types.go +++ b/admiral/pkg/test/types.go @@ -19,6 +19,6 @@ func (c *FakeConfigMapController) GetConfigMap(ctx context.Context) (*k8sCoreV1. func (c *FakeConfigMapController) PutConfigMap(ctx context.Context, newMap *k8sCoreV1.ConfigMap) error { return c.PutError } -func (c *FakeConfigMapController)GetIPPrefixForServiceEntries() (seIpPrefix string) { +func (c *FakeConfigMapController) GetIPPrefixForServiceEntries() (seIpPrefix string) { return "240.0" } diff --git a/admiral/pkg/util/constants.go b/admiral/pkg/util/constants.go new file mode 100644 index 00000000..807a6199 --- /dev/null +++ b/admiral/pkg/util/constants.go @@ -0,0 +1,9 @@ +package util + +const ( + Http = "http" + Grpc = "grpc" + GrpcWeb = "grpc-web" + Http2 = "http2" + SecretShardKey = "shard" +) diff --git a/admiral/pkg/util/util.go b/admiral/pkg/util/util.go new file mode 100644 index 00000000..5af737f2 --- /dev/null +++ b/admiral/pkg/util/util.go @@ -0,0 +1,38 @@ +package util + +import ( + "strings" + "time" +) + +type AdmiralState struct { + ReadOnly bool + IsStateInitialized bool +} + +var ( + CurrentAdmiralState AdmiralState +) + +func IsAdmiralReadOnly() bool { + return CurrentAdmiralState.ReadOnly +} + +// ResyncIntervals defines the different reconciliation intervals +// for kubernetes operators +type ResyncIntervals struct { + UniversalReconcileInterval time.Duration + SeAndDrReconcileInterval time.Duration +} + +func GetPortProtocol(name string) string { + var protocol = Http + if strings.Index(name, GrpcWeb) == 0 { + protocol = GrpcWeb + } else if strings.Index(name, Grpc) == 0 { + protocol = Grpc + } else if strings.Index(name, Http2) == 0 { + protocol = Http2 + } + return protocol +} diff --git a/admiral/pkg/util/util_test.go b/admiral/pkg/util/util_test.go new file mode 100644 index 00000000..0bdac7db --- /dev/null +++ b/admiral/pkg/util/util_test.go @@ -0,0 +1,48 @@ +package util + +import "testing" + +func TestGetPortProtocol(t *testing.T) { + cases := []struct { + name string + protocol string + expProtocol string + }{ + { + name: "Given valid input parameters, " + + "When port name is " + Http + ", " + + "Then protocol should be " + Http, + protocol: Http, + expProtocol: Http, + }, + { + name: "Given valid input parameters, " + + "When port name is " + GrpcWeb + ", " + + "Then protocol should be " + GrpcWeb, + protocol: GrpcWeb, + expProtocol: GrpcWeb, + }, + { + name: "Given valid input parameters, " + + "When port name is " + Grpc + ", " + + "Then protocol should be " + Grpc, + protocol: Grpc, + expProtocol: Grpc, + }, + { + name: "Given valid input parameters, " + + "When port name is " + Http2 + ", " + + "Then protocol should be " + Http2, + protocol: Http2, + expProtocol: Http2, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + protocol := GetPortProtocol(c.protocol) + if protocol != c.expProtocol { + t.Errorf("expected=%v, got=%v", c.expProtocol, protocol) + } + }) + } +} diff --git a/admiral/pkg/util/variables.go b/admiral/pkg/util/variables.go new file mode 100644 index 00000000..78cddff1 --- /dev/null +++ b/admiral/pkg/util/variables.go @@ -0,0 +1,3 @@ +package util + +var () diff --git a/docs/Compatibility.md b/docs/Compatibility.md index 44d1de57..1bc594ab 100644 --- a/docs/Compatibility.md +++ b/docs/Compatibility.md @@ -31,4 +31,4 @@ v1.0 | AWS, GCP, Azure v1.1 | AWS, GCP, Azure v1.2 | AWS, GCP, Azure -`Note`: Please submit a PR if admiral was tested on other cloud vendors \ No newline at end of file +`Note`: Please submit a PR if admiral was tested on other cloud vendors diff --git a/docs/Examples.md b/docs/Examples.md index 6558c7d3..a9734d9a 100644 --- a/docs/Examples.md +++ b/docs/Examples.md @@ -71,7 +71,7 @@ $ADMIRAL_HOME/scripts/cluster-secret.sh $MAIN_CLUSTER $MAIN_CLUSTER admiral 4\. Install/Run Admiral-Sync in the remote clusters that admiral monitors ``` # Create admiral role and bindings on remote cluster -kubectl apply --kubeconfig=$REMOTE_CLUSTER -f $ADMIRAL_HOME/yaml/remotecluster.yaml +kubectl apply --context=$REMOTE_CLUSTER -f $ADMIRAL_HOME/yaml/remotecluster.yaml ``` 5\. Add Remote Cluster to Admiral's watcher ``` @@ -357,4 +357,4 @@ Run the following script to cleanup admiral and its associated resources ```bash $ADMIRAL_HOME/scripts/cleanup.sh -``` +``` \ No newline at end of file