diff --git a/admiral/cmd/admiral/cmd/root.go b/admiral/cmd/admiral/cmd/root.go index 4e1000770..1b71eb034 100644 --- a/admiral/cmd/admiral/cmd/root.go +++ b/admiral/cmd/admiral/cmd/root.go @@ -4,17 +4,18 @@ import ( "context" "flag" "fmt" - "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/routes" - "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/server" - "github.com/istio-ecosystem/admiral/admiral/pkg/clusters" - "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" - log "github.com/sirupsen/logrus" "os" "os/signal" "sync" "syscall" "time" + "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/routes" + "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/server" + "github.com/istio-ecosystem/admiral/admiral/pkg/clusters" + "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" + log "github.com/sirupsen/logrus" + "github.com/spf13/cobra" ) @@ -142,6 +143,8 @@ func GetRootCmd(args []string) *cobra.Command { "The value of envoy filter is to add additional config to the filter config section") rootCmd.PersistentFlags().BoolVar(¶ms.EnableRoutingPolicy, "enable_routing_policy", false, "If Routing Policy feature needs to be enabled") + rootCmd.PersistentFlags().StringArrayVar(¶ms.ExcludeAssetList, "exclude_asset_list", []string{}, + "List of assets which should be excluded from getting processed") return rootCmd } diff --git a/admiral/pkg/clusters/handler.go b/admiral/pkg/clusters/handler.go index 2b5d9c8d6..24f46e632 100644 --- a/admiral/pkg/clusters/handler.go +++ b/admiral/pkg/clusters/handler.go @@ -8,13 +8,13 @@ import ( "strings" "time" + argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model" v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/util" - argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/golang/protobuf/ptypes/duration" "github.com/golang/protobuf/ptypes/wrappers" "github.com/google/go-cmp/cmp" @@ -103,7 +103,6 @@ func getDestinationRule(se *v1alpha32.ServiceEntry, locality string, gtpTrafficP }, }, } - if len(locality) == 0 { log.Warnf(LogErrFormat, "Process", "GlobalTrafficPolicy", dr.Host, "", "Skipping gtp processing, locality of the cluster nodes cannot be determined. Is this minikube?") processGtp = false @@ -330,7 +329,6 @@ func handleDestinationRuleEvent(ctx context.Context, obj *v1alpha3.DestinationRu dependentClusters = r.AdmiralCache.CnameDependentClusterCache.Get(destinationRule.Host).Copy() allDependentClusters = make(map[string]string) ) - if len(dependentClusters) > 0 { log.Infof(LogFormat, "Event", "DestinationRule", obj.Name, clusterId, "Processing") util.MapCopy(allDependentClusters, dependentClusters) @@ -396,7 +394,6 @@ func handleVirtualServiceEvent( syncNamespace = common.GetSyncNamespace() ) log.Infof(LogFormat, "Event", resourceType, obj.Name, vh.ClusterID, "Received event") - if len(virtualService.Hosts) > 1 { log.Errorf(LogFormat, "Event", resourceType, obj.Name, clusterId, "Skipping as multiple hosts not supported for virtual service namespace="+obj.Namespace) return nil diff --git a/admiral/pkg/clusters/handler_test.go b/admiral/pkg/clusters/handler_test.go index c19e14f9b..8fcdb36ef 100644 --- a/admiral/pkg/clusters/handler_test.go +++ b/admiral/pkg/clusters/handler_test.go @@ -7,16 +7,16 @@ import ( "testing" "time" + argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/wrappers" + "github.com/google/go-cmp/cmp" "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/istio" "github.com/istio-ecosystem/admiral/admiral/pkg/test" - argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - "github.com/golang/protobuf/ptypes/duration" - "github.com/golang/protobuf/ptypes/wrappers" - "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "google.golang.org/protobuf/testing/protocmp" "istio.io/api/networking/v1alpha3" @@ -197,7 +197,6 @@ func TestGetDestinationRule(t *testing.T) { }, }, } - se := &v1alpha3.ServiceEntry{Hosts: []string{"qa.myservice.global"}, Endpoints: []*v1alpha3.WorkloadEntry{ {Address: "east.com", Locality: "us-east-2"}, {Address: "west.com", Locality: "us-west-2"}, }} @@ -1589,7 +1588,6 @@ func TestAddUpdateServiceEntry(t *testing.T) { IstioClient: fakeIstioClient, } ) - twoEndpointSe := v1alpha3.ServiceEntry{ Hosts: []string{"e2e.my-first-service.mesh"}, Addresses: []string{"240.10.1.1"}, diff --git a/admiral/pkg/clusters/registry_test.go b/admiral/pkg/clusters/registry_test.go index fa7c662be..48a31b6e3 100644 --- a/admiral/pkg/clusters/registry_test.go +++ b/admiral/pkg/clusters/registry_test.go @@ -3,6 +3,7 @@ package clusters import ( "context" "strings" + "sync" "testing" "time" @@ -23,61 +24,60 @@ import ( "k8s.io/client-go/tools/clientcmd" ) -func init() { - p := common.AdmiralParams{ - KubeconfigPath: "testdata/fake.config", - LabelSet: &common.LabelSet{}, - EnableSAN: true, - SANPrefix: "prefix", - HostnameSuffix: "mesh", - SyncNamespace: "ns", - CacheRefreshDuration: time.Minute, - ClusterRegistriesNamespace: "default", - DependenciesNamespace: "default", - SecretResolver: "", - WorkloadSidecarUpdate: "enabled", - WorkloadSidecarName: "default", - EnableRoutingPolicy: true, - EnvoyFilterVersion: "1.13", - } - - p.LabelSet.WorkloadIdentityKey = "identity" - p.LabelSet.GlobalTrafficDeploymentLabel = "identity" - p.LabelSet.PriorityKey = "priority" - p.LabelSet.EnvKey = "admiral.io/env" - - common.InitializeConfig(p) +var registryTestSingleton sync.Once + +func setupForRegistryTests() { + registryTestSingleton.Do(func() { + common.ResetSync() + p := common.AdmiralParams{ + KubeconfigPath: "testdata/fake.config", + LabelSet: &common.LabelSet{}, + EnableSAN: true, + SANPrefix: "prefix", + HostnameSuffix: "mesh", + SyncNamespace: "ns", + CacheRefreshDuration: time.Minute, + ClusterRegistriesNamespace: "default", + DependenciesNamespace: "default", + SecretResolver: "", + WorkloadSidecarUpdate: "enabled", + WorkloadSidecarName: "default", + EnableRoutingPolicy: true, + EnvoyFilterVersion: "1.13", + } + p.LabelSet.WorkloadIdentityKey = "identity" + p.LabelSet.GlobalTrafficDeploymentLabel = "identity" + p.LabelSet.PriorityKey = "priority" + p.LabelSet.EnvKey = "admiral.io/env" + common.InitializeConfig(p) + }) } func TestDeleteCacheControllerThatDoesntExist(t *testing.T) { - + setupForRegistryTests() w := NewRemoteRegistry(nil, common.AdmiralParams{}) - err := w.deleteCacheController("I don't exit") - if err != nil { t.Fail() } } func TestDeleteCacheController(t *testing.T) { - - w := NewRemoteRegistry(nil, common.AdmiralParams{}) - - r := rest.Config{ - Host: "test.com", - } - - cluster := "test.cluster" + setupForRegistryTests() + var ( + cluster = "test.cluster" + w = NewRemoteRegistry(context.TODO(), common.AdmiralParams{}) + r = rest.Config{ + Host: "test.com", + } + ) w.createCacheController(&r, cluster, time.Second*time.Duration(300)) rc := w.GetRemoteController(cluster) - if rc == nil { t.Fail() } err := w.deleteCacheController(cluster) - if err != nil { t.Fail() } @@ -89,54 +89,56 @@ func TestDeleteCacheController(t *testing.T) { } func TestCopyServiceEntry(t *testing.T) { - - se := networking.ServiceEntry{ - Hosts: []string{"test.com"}, - } - - r := copyServiceEntry(&se) - + setupForRegistryTests() + var ( + se = networking.ServiceEntry{ + Hosts: []string{"test.com"}, + } + r = copyServiceEntry(&se) + ) if r.Hosts[0] != "test.com" { t.Fail() } } func TestCopyEndpoint(t *testing.T) { - - se := networking.WorkloadEntry{ - Address: "127.0.0.1", - } - - r := copyEndpoint(&se) - + var ( + se = networking.WorkloadEntry{ + Address: "127.0.0.1", + } + r = copyEndpoint(&se) + ) + setupForRegistryTests() if r.Address != "127.0.0.1" { t.Fail() } - } func TestCopySidecar(t *testing.T) { - spec := networking.Sidecar{ - WorkloadSelector: &networking.WorkloadSelector{ - Labels: map[string]string{"TestLabel": "TestValue"}, - }, - } - - //nolint - sidecar := v1alpha3.Sidecar{Spec: spec} - - newSidecar := copySidecar(&sidecar) - + setupForRegistryTests() + var ( + spec = networking.Sidecar{ + WorkloadSelector: &networking.WorkloadSelector{ + Labels: map[string]string{"TestLabel": "TestValue"}, + }, + } + //nolint + sidecar = v1alpha3.Sidecar{Spec: spec} + newSidecar = copySidecar(&sidecar) + ) if newSidecar.Spec.WorkloadSelector != spec.WorkloadSelector { t.Fail() } } func createMockRemoteController(f func(interface{})) (*RemoteController, error) { - config := rest.Config{ - Host: "localhost", - } - stop := make(chan struct{}) + var ( + config = rest.Config{ + Host: "localhost", + } + stop = make(chan struct{}) + ) + d, err := admiral.NewDeploymentController("", stop, &test.MockDeploymentHandler{}, &config, time.Second*time.Duration(300)) if err != nil { return nil, err @@ -202,83 +204,72 @@ func createMockRemoteController(f func(interface{})) (*RemoteController, error) } func TestCreateSecretController(t *testing.T) { - + setupForRegistryTests() err := createSecretController(context.Background(), NewRemoteRegistry(nil, common.AdmiralParams{})) - if err != nil { t.Fail() } - common.SetKubeconfigPath("fail") - err = createSecretController(context.Background(), NewRemoteRegistry(nil, common.AdmiralParams{})) - - common.SetKubeconfigPath("testdata/fake.config") - if err == nil { t.Fail() } + common.SetKubeconfigPath("testdata/fake.config") } func TestInitAdmiral(t *testing.T) { - + setupForRegistryTests() p := common.AdmiralParams{ KubeconfigPath: "testdata/fake.config", LabelSet: &common.LabelSet{}, } - p.LabelSet.WorkloadIdentityKey = "overridden-key" - rr, err := InitAdmiral(context.Background(), p) - if err != nil { t.Fail() } if len(rr.GetClusterIds()) != 0 { t.Fail() } - if common.GetWorkloadIdentifier() != "identity" { t.Errorf("Workload identity label override failed. Expected \"identity\", got %v", common.GetWorkloadIdentifier()) } } func TestAdded(t *testing.T) { - ctx := context.Background() - p := common.AdmiralParams{ - KubeconfigPath: "testdata/fake.config", - } - rr, _ := InitAdmiral(context.Background(), p) - + setupForRegistryTests() + var ( + ctx = context.Background() + p = common.AdmiralParams{ + KubeconfigPath: "testdata/fake.config", + } + rr, _ = InitAdmiral(context.Background(), p) + ) rc, _ := createMockRemoteController(func(i interface{}) { t.Fail() }) rr.PutRemoteController("test.cluster", rc) - d, e := admiral.NewDependencyController(make(chan struct{}), &test.MockDependencyHandler{}, p.KubeconfigPath, "dep-ns", time.Second*time.Duration(300)) - - if e != nil { + d, err := admiral.NewDependencyController(make(chan struct{}), &test.MockDependencyHandler{}, p.KubeconfigPath, "dep-ns", time.Second*time.Duration(300)) + if err != nil { t.Fail() } - dh := DependencyHandler{ RemoteRegistry: rr, DepController: d, } - depData := v1.Dependency{ Spec: depModel.Dependency{ - IdentityLabel: "idenity", + IdentityLabel: "identity", Destinations: []string{"one", "two"}, Source: "bar", }, } - dh.Added(ctx, &depData) dh.Deleted(ctx, &depData) - } func TestGetServiceForDeployment(t *testing.T) { + setupForRegistryTests() baseRc, _ := createMockRemoteController(func(i interface{}) { //res := i.(istio.Config) //se, ok := res.Spec.(*v1alpha3.ServiceEntry) @@ -298,7 +289,6 @@ func TestGetServiceForDeployment(t *testing.T) { // } //} }) - service := k8sCoreV1.Service{} service.Namespace = "under-test" service.Spec.Ports = []k8sCoreV1.ServicePort{ @@ -364,6 +354,7 @@ func TestGetServiceForDeployment(t *testing.T) { } func TestUpdateCacheController(t *testing.T) { + setupForRegistryTests() p := common.AdmiralParams{ KubeconfigPath: "testdata/fake.config", } diff --git a/admiral/pkg/clusters/serviceentry.go b/admiral/pkg/clusters/serviceentry.go index c6cf2a821..303f12b33 100644 --- a/admiral/pkg/clusters/serviceentry.go +++ b/admiral/pkg/clusters/serviceentry.go @@ -57,28 +57,26 @@ func modifyServiceEntryForNewServiceOrPod( ctx context.Context, event admiral.EventType, env string, sourceIdentity string, remoteRegistry *RemoteRegistry) map[string]*networking.ServiceEntry { defer util.LogElapsedTime("modifyServiceEntryForNewServiceOrPod", sourceIdentity, env, "")() - if CurrentAdmiralState.ReadOnly { log.Infof(LogFormat, event, env, sourceIdentity, "", "Processing skipped as Admiral is in Read-only mode") return nil } - if IsCacheWarmupTime(remoteRegistry) { log.Infof(LogFormat, event, env, sourceIdentity, "", "Processing skipped during cache warm up state") return nil } - + //create a service entry, destination rule and virtual service in the local cluster var ( - cname string namespace string + cname string serviceInstance *k8sV1.Service + weightedServices map[string]*WeightedService rollout *argo.Rollout deployment *k8sAppsV1.Deployment + gtps = make(map[string][]*v1.GlobalTrafficPolicy) start = time.Now() gtpKey = common.ConstructGtpKey(env, sourceIdentity) clusters = remoteRegistry.GetClusterIds() - gtps = make(map[string][]*v1.GlobalTrafficPolicy) - weightedServices = make(map[string]*WeightedService) cnames = make(map[string]string) sourceServices = make(map[string]*k8sV1.Service) sourceWeightedServices = make(map[string]map[string]*WeightedService) @@ -99,7 +97,15 @@ func modifyServiceEntryForNewServiceOrPod( if rc.RolloutController != nil { rollout = rc.RolloutController.Cache.Get(sourceIdentity, env) } + if deployment == nil && rollout == nil { + log.Infof("Neither deployment nor rollouts found for identity=%s in env=%s namespace=%s", sourceIdentity, env, namespace) + continue + } if deployment != nil { + if isAnExcludedAsset(common.GetDeploymentGlobalIdentifier(deployment), remoteRegistry.ExcludeAssetList) { + log.Infof(LogFormat, event, env, sourceIdentity, clusterId, "Processing skipped as asset is in the exclude list") + return nil + } remoteRegistry.AdmiralCache.IdentityClusterCache.Put(sourceIdentity, rc.ClusterID, rc.ClusterID) serviceInstance = getServiceForDeployment(rc, deployment) if serviceInstance == nil { @@ -107,11 +113,14 @@ func modifyServiceEntryForNewServiceOrPod( } namespace = deployment.Namespace localMeshPorts := GetMeshPorts(rc.ClusterID, serviceInstance, deployment) - cname = common.GetCname(deployment, common.GetWorkloadIdentifier(), common.GetHostnameSuffix()) sourceDeployments[rc.ClusterID] = deployment createServiceEntryForDeployment(ctx, event, rc, remoteRegistry.AdmiralCache, localMeshPorts, deployment, serviceEntries) } else if rollout != nil { + if isAnExcludedAsset(common.GetRolloutGlobalIdentifier(rollout), remoteRegistry.ExcludeAssetList) { + log.Infof(LogFormat, event, env, sourceIdentity, clusterId, "Processing skipped as asset is in the exclude list") + return nil + } remoteRegistry.AdmiralCache.IdentityClusterCache.Put(sourceIdentity, rc.ClusterID, rc.ClusterID) weightedServices = getServiceForRollout(ctx, rc, rollout) if len(weightedServices) == 0 { @@ -125,7 +134,6 @@ func modifyServiceEntryForNewServiceOrPod( } namespace = rollout.Namespace localMeshPorts := GetMeshPortsForRollout(rc.ClusterID, serviceInstance, rollout) - cname = common.GetCnameForRollout(rollout, common.GetWorkloadIdentifier(), common.GetHostnameSuffix()) cnames[cname] = "1" sourceRollouts[rc.ClusterID] = rollout @@ -187,7 +195,8 @@ func modifyServiceEntryForNewServiceOrPod( ctx, remoteRegistry, map[string]string{sourceCluster: sourceCluster}, map[string]*networking.ServiceEntry{key: serviceEntry}) } - clusterIngress, _ := rc.ServiceController.Cache.GetLoadBalancer(common.GetAdmiralParams().LabelSet.GatewayApp, common.NamespaceIstioSystem) + clusterIngress, _ := rc.ServiceController.Cache.GetLoadBalancer( + common.GetAdmiralParams().LabelSet.GatewayApp, common.NamespaceIstioSystem) for _, ep := range serviceEntry.Endpoints { //replace istio ingress-gateway address with local fqdn, note that ingress-gateway can be empty (not provisoned, or is not up) if ep.Address == clusterIngress || ep.Address == "" { @@ -225,11 +234,14 @@ func modifyServiceEntryForNewServiceOrPod( } if common.GetWorkloadSidecarUpdate() == "enabled" { - modifySidecarForLocalClusterCommunication(ctx, serviceInstance.Namespace, remoteRegistry.AdmiralCache.DependencyNamespaceCache.Get(sourceIdentity), rc) + modifySidecarForLocalClusterCommunication( + ctx, serviceInstance.Namespace, + remoteRegistry.AdmiralCache.DependencyNamespaceCache.Get(sourceIdentity), rc) } for _, val := range dependents { - remoteRegistry.AdmiralCache.DependencyNamespaceCache.Put(val, serviceInstance.Namespace, localFqdn, cnames) + remoteRegistry.AdmiralCache.DependencyNamespaceCache.Put( + val, serviceInstance.Namespace, localFqdn, cnames) } } @@ -245,9 +257,7 @@ func modifyServiceEntryForNewServiceOrPod( } AddServiceEntriesWithDr(ctx, remoteRegistry, dependentClusters, serviceEntries) - util.LogElapsedTimeSince("WriteServiceEntryToDependentClusters", sourceIdentity, env, "", start) - return serviceEntries } @@ -255,7 +265,6 @@ func modifyServiceEntryForNewServiceOrPod( //i) Picks the GTP that was created most recently from the passed in GTP list based on GTP priority label (GTPs from all clusters) //ii) Updates the global GTP cache with the selected GTP in i) func updateGlobalGtpCache(cache *AdmiralCache, identity, env string, gtps map[string][]*v1.GlobalTrafficPolicy) { - defer util.LogElapsedTime("updateGlobalGtpCache", identity, env, "")() gtpsOrdered := make([]*v1.GlobalTrafficPolicy, 0) for _, gtpsInCluster := range gtps { gtpsOrdered = append(gtpsOrdered, gtpsInCluster...) @@ -290,10 +299,10 @@ func sortGtpsByPriorityAndCreationTime(gtpsToOrder []*v1.GlobalTrafficPolicy, id jTime := gtpsToOrder[j].CreationTimestamp if iPriority != jPriority { - log.Debugf("GTP sorting identity=%s env=%s name1=%s creationTime1=%v priority1=%d name2=%s creationTime2=%v priority2=%d", identity, env, gtpsToOrder[i].Name, iTime, iPriority, gtpsToOrder[j].Name, jTime, jPriority) + log.Infof("GTP sorting identity=%s env=%s name1=%s creationTime1=%v priority1=%d name2=%s creationTime2=%v priority2=%d", identity, env, gtpsToOrder[i].Name, iTime, iPriority, gtpsToOrder[j].Name, jTime, jPriority) return iPriority > jPriority } - log.Debugf("GTP sorting identity=%s env=%s name1=%s creationTime1=%v priority1=%d name2=%s creationTime2=%v priority2=%d", identity, env, gtpsToOrder[i].Name, iTime, iPriority, gtpsToOrder[j].Name, jTime, jPriority) + log.Infof("GTP sorting identity=%s env=%s name1=%s creationTime1=%v priority1=%d name2=%s creationTime2=%v priority2=%d", identity, env, gtpsToOrder[i].Name, iTime, iPriority, gtpsToOrder[j].Name, jTime, jPriority) return iTime.After(jTime.Time) }) } @@ -367,8 +376,10 @@ func modifySidecarForLocalClusterCommunication(ctx context.Context, sidecarNames return } - sidecar, _ := sidecarConfig.IstioClient.NetworkingV1alpha3().Sidecars(sidecarNamespace).Get(ctx, common.GetWorkloadSidecarName(), v12.GetOptions{}) - + sidecar, err := sidecarConfig.IstioClient.NetworkingV1alpha3().Sidecars(sidecarNamespace).Get(ctx, common.GetWorkloadSidecarName(), v12.GetOptions{}) + if err != nil { + return + } if sidecar == nil || (sidecar.Spec.Egress == nil) { return } @@ -461,7 +472,6 @@ func AddServiceEntriesWithDr(ctx context.Context, rr *RemoteRegistry, sourceClus log.Infof(LogFormat, "Get (error)", "old DestinationRule", seDr.DrName, sourceCluster, err) oldDestinationRule = nil } - var deleteOldServiceEntry = false if oldServiceEntry != nil { areEndpointsValid := validateAndProcessServiceEntryEndpoints(oldServiceEntry) @@ -729,8 +739,9 @@ func getUniqueAddress(ctx context.Context, admiralCache *AdmiralCache, globalFqd needsCacheUpdate := false for err == nil && counter < maxRetries { - address, needsCacheUpdate, err = GetLocalAddressForSe(ctx, getIstioResourceName(globalFqdn, "-se"), admiralCache.ServiceEntryAddressStore, admiralCache.ConfigMapController) - + address, needsCacheUpdate, err = GetLocalAddressForSe( + ctx, getIstioResourceName(globalFqdn, "-se"), + admiralCache.ServiceEntryAddressStore, admiralCache.ConfigMapController) if err != nil { log.Errorf("Error getting local address for Service Entry. Err: %v", err) break diff --git a/admiral/pkg/clusters/serviceentry_test.go b/admiral/pkg/clusters/serviceentry_test.go index 4a8add8b9..f1e51b4b4 100644 --- a/admiral/pkg/clusters/serviceentry_test.go +++ b/admiral/pkg/clusters/serviceentry_test.go @@ -19,69 +19,395 @@ import ( "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/istio" "github.com/istio-ecosystem/admiral/admiral/pkg/test" + log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "google.golang.org/protobuf/testing/protocmp" "gopkg.in/yaml.v2" - istionetworkingv1alpha3 "istio.io/api/networking/v1alpha3" + istioNetworkingV1Alpha3 "istio.io/api/networking/v1alpha3" "istio.io/client-go/pkg/apis/networking/v1alpha3" istiofake "istio.io/client-go/pkg/clientset/versioned/fake" + k8sAppsV1 "k8s.io/api/apps/v1" v14 "k8s.io/api/apps/v1" coreV1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v12 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" ) -func init() { - p := common.AdmiralParams{ - KubeconfigPath: "testdata/fake.config", - LabelSet: &common.LabelSet{}, +var serviceEntryTestSingleton sync.Once + +func admiralParams() common.AdmiralParams { + return common.AdmiralParams{ + KubeconfigPath: "testdata/fake.config", + LabelSet: &common.LabelSet{ + GatewayApp: "gatewayapp", + WorkloadIdentityKey: "identity", + PriorityKey: "priority", + EnvKey: "env", + }, EnableSAN: true, SANPrefix: "prefix", HostnameSuffix: "mesh", SyncNamespace: "ns", - CacheRefreshDuration: time.Minute, + CacheRefreshDuration: 0, ClusterRegistriesNamespace: "default", DependenciesNamespace: "default", + WorkloadSidecarName: "default", SecretResolver: "", } +} - p.LabelSet.WorkloadIdentityKey = "identity" - p.LabelSet.GlobalTrafficDeploymentLabel = "identity" - p.LabelSet.PriorityKey = "priority" +func setupForServiceEntryTests() { + var initHappened bool + serviceEntryTestSingleton.Do(func() { + common.ResetSync() + initHappened = true + common.InitializeConfig(admiralParams()) + }) + if !initHappened { + log.Warn("InitializeConfig was NOT called from setupForServiceEntryTests") + } else { + log.Info("InitializeConfig was called setupForServiceEntryTests") + } +} - common.InitializeConfig(p) +func makeTestDeployment(name, namespace, identityLabelValue string) *k8sAppsV1.Deployment { + return &k8sAppsV1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: map[string]string{ + "env": "test", + "traffic.sidecar.istio.io/includeInboundPorts": "8090", + }, + }, + Spec: k8sAppsV1.DeploymentSpec{ + Template: coreV1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "env": "test", + "traffic.sidecar.istio.io/includeInboundPorts": "8090", + }, + Labels: map[string]string{ + "identity": identityLabelValue, + }, + }, + Spec: coreV1.PodSpec{}, + }, + Selector: &v12.LabelSelector{ + MatchLabels: map[string]string{ + "identity": identityLabelValue, + "app": identityLabelValue, + }, + }, + }, + } } -func TestAddServiceEntriesWithDr(t *testing.T) { - admiralCache := AdmiralCache{} +func makeTestRollout(name, namespace, identityLabelValue string) argo.Rollout { + return argo.Rollout{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: map[string]string{ + "env": "test", + }, + }, + Spec: argo.RolloutSpec{ + Template: coreV1.PodTemplateSpec{ + ObjectMeta: v12.ObjectMeta{ + Labels: map[string]string{"identity": identityLabelValue}, + Annotations: map[string]string{ + "env": "test", + "traffic.sidecar.istio.io/includeInboundPorts": "8090", + }, + }, + }, + Strategy: argo.RolloutStrategy{ + Canary: &argo.CanaryStrategy{ + TrafficRouting: &argo.RolloutTrafficRouting{ + Istio: &argo.IstioTrafficRouting{ + VirtualService: &argo.IstioVirtualService{ + Name: name + "-canary", + }, + }, + }, + CanaryService: name + "-canary", + StableService: name + "-stable", + }, + }, + Selector: &v12.LabelSelector{ + MatchLabels: map[string]string{ + "identity": identityLabelValue, + "app": identityLabelValue, + }, + }, + }, + } +} - admiralCache.SeClusterCache = common.NewMapOfMaps() +func TestModifyServiceEntryForNewServiceOrPodForExcludedAsset(t *testing.T) { + setupForServiceEntryTests() + var ( + env = "test" + stop = make(chan struct{}) + foobarMetadataName = "foobar" + foobarMetadataNamespace = "foobar-ns" + rollout1Identity = "rollout1" + deployment1Identity = "deployment1" + testRollout1 = makeTestRollout(foobarMetadataName, foobarMetadataNamespace, rollout1Identity) + testDeployment1 = makeTestDeployment(foobarMetadataName, foobarMetadataNamespace, deployment1Identity) + clusterID = "test-dev-k8s" + fakeIstioClient = istiofake.NewSimpleClientset() + config = rest.Config{Host: "localhost"} + expectedServiceEntriesForDeployment = map[string]*istioNetworkingV1Alpha3.ServiceEntry{ + "test." + deployment1Identity + ".mesh": &istioNetworkingV1Alpha3.ServiceEntry{ + Hosts: []string{"test." + deployment1Identity + ".mesh"}, + Addresses: []string{"127.0.0.1"}, + Ports: []*istioNetworkingV1Alpha3.Port{ + &istioNetworkingV1Alpha3.Port{ + Number: 80, + Protocol: "http", + Name: "http", + }, + }, + Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, + Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ + &istioNetworkingV1Alpha3.WorkloadEntry{ + Address: "dummy.admiral.global", + Ports: map[string]uint32{ + "http": 0, + }, + Locality: "us-west-2", + }, + }, + SubjectAltNames: []string{"spiffe://prefix/" + deployment1Identity}, + }, + } + /* + expectedServiceEntriesForRollout = map[string]*istioNetworkingV1Alpha3.ServiceEntry{ + "test." + deployment1Identity + ".mesh": &istioNetworkingV1Alpha3.ServiceEntry{ + Hosts: []string{"test." + rollout1Identity + ".mesh"}, + Addresses: []string{"127.0.0.1"}, + Ports: []*istioNetworkingV1Alpha3.Port{ + &istioNetworkingV1Alpha3.Port{ + Number: 80, + Protocol: "http", + Name: "http", + }, + }, + Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, + Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ + &istioNetworkingV1Alpha3.WorkloadEntry{ + Address: "dummy.admiral.global", + Ports: map[string]uint32{ + "http": 0, + }, + Locality: "us-west-2", + }, + }, + SubjectAltNames: []string{"spiffe://prefix/" + rollout1Identity}, + }, + } + */ + serviceEntryAddressStore = &ServiceEntryAddressStore{ + EntryAddresses: map[string]string{ + "test." + deployment1Identity + ".mesh-se": "127.0.0.1", + "test." + rollout1Identity + ".mesh-se": "127.0.0.1", + }, + Addresses: []string{}, + } + serviceForRollout = &coreV1.Service{ + ObjectMeta: v12.ObjectMeta{ + Name: foobarMetadataName + "-stable", + Namespace: foobarMetadataNamespace, + }, + Spec: coreV1.ServiceSpec{ + Selector: map[string]string{"app": rollout1Identity}, + Ports: []coreV1.ServicePort{ + { + Name: "http", + Port: 8090, + }, + }, + }, + } + serviceForDeployment = &coreV1.Service{ + ObjectMeta: v12.ObjectMeta{ + Name: foobarMetadataName, + Namespace: foobarMetadataNamespace, + }, + Spec: coreV1.ServiceSpec{ + Selector: map[string]string{"app": deployment1Identity}, + Ports: []coreV1.ServicePort{ + { + Name: "http", + Port: 8090, + }, + }, + }, + } + rr1, _ = InitAdmiral(context.Background(), admiralParams()) + rr2, _ = InitAdmiral(context.Background(), admiralParams()) + ) + deploymentController, err := admiral.NewDeploymentController(clusterID, make(chan struct{}), &test.MockDeploymentHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fail() + } + deploymentController.Cache.UpdateDeploymentToClusterCache(deployment1Identity, testDeployment1) + rolloutController, err := admiral.NewRolloutsController(clusterID, make(chan struct{}), &test.MockRolloutHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fail() + } + rolloutController.Cache.UpdateRolloutToClusterCache(rollout1Identity, &testRollout1) + serviceController, err := admiral.NewServiceController(clusterID, stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fatalf("%v", err) + } + virtualServiceController, err := istio.NewVirtualServiceController(clusterID, make(chan struct{}), &test.MockVirtualServiceHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fatalf("%v", err) + } + gtpc, err := admiral.NewGlobalTrafficController("", make(chan struct{}), &test.MockGlobalTrafficHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fatalf("%v", err) + t.FailNow() + } + t.Logf("expectedServiceEntriesForDeployment: %v\n", expectedServiceEntriesForDeployment) + serviceController.Cache.Put(serviceForRollout) + serviceController.Cache.Put(serviceForDeployment) + rc := &RemoteController{ + ClusterID: clusterID, + DeploymentController: deploymentController, + RolloutController: rolloutController, + ServiceController: serviceController, + VirtualServiceController: virtualServiceController, + NodeController: &admiral.NodeController{ + Locality: &admiral.Locality{ + Region: "us-west-2", + }, + }, + ServiceEntryController: &istio.ServiceEntryController{ + IstioClient: fakeIstioClient, + }, + DestinationRuleController: &istio.DestinationRuleController{ + IstioClient: fakeIstioClient, + }, + GlobalTraffic: gtpc, + } + rr1.PutRemoteController(clusterID, rc) + rr1.ExcludeAssetList = []string{"asset1"} + rr1.StartTime = time.Now() + rr1.AdmiralCache.ServiceEntryAddressStore = serviceEntryAddressStore + + rr2.PutRemoteController(clusterID, rc) + rr2.StartTime = time.Now() + rr2.AdmiralCache.ServiceEntryAddressStore = serviceEntryAddressStore - cnameIdentityCache := sync.Map{} + testCases := []struct { + name string + assetIdentity string + remoteRegistry *RemoteRegistry + expectedServiceEntries map[string]*istioNetworkingV1Alpha3.ServiceEntry + }{ + { + name: "Given asset is using a deployment," + + "And asset is in the exclude list, " + + "When modifyServiceEntryForNewServiceOrPod is called, " + + "Then, it should skip creating service entries, and return an empty map of service entries", + assetIdentity: "asset1", + remoteRegistry: rr1, + expectedServiceEntries: nil, + }, + { + name: "Given asset is using a rollout," + + "And asset is in the exclude list, " + + "When modifyServiceEntryForNewServiceOrPod is called, " + + "Then, it should skip creating service entries, and return an empty map of service entries", + assetIdentity: "asset1", + remoteRegistry: rr1, + expectedServiceEntries: nil, + }, + { + name: "Given asset is using a deployment, " + + "And asset is NOT in the exclude list" + + "When modifyServiceEntryForNewServiceOrPod is called, " + + "Then, corresponding service entry should be created, " + + "And the function should return a map containing the created service entry", + assetIdentity: deployment1Identity, + remoteRegistry: rr2, + expectedServiceEntries: expectedServiceEntriesForDeployment, + }, + /* + { + name: "Given asset is using a rollout, " + + "And asset is NOT in the exclude list" + + "When modifyServiceEntryForNewServiceOrPod is called, " + + "Then, corresponding service entry should be created, " + + "And the function should return a map containing the created service entry", + assetIdentity: rollout1Identity, + remoteRegistry: rr2, + expectedServiceEntries: expectedServiceEntriesForRollout, + }, + */ + } + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + serviceEntries := modifyServiceEntryForNewServiceOrPod( + context.Background(), + admiral.Add, + env, + c.assetIdentity, + c.remoteRegistry, + ) + if len(serviceEntries) != len(c.expectedServiceEntries) { + t.Fatalf("expected service entries to be of length: %d, but got: %d", len(c.expectedServiceEntries), len(serviceEntries)) + } + if len(c.expectedServiceEntries) > 0 { + for k := range c.expectedServiceEntries { + if serviceEntries[k] == nil { + t.Fatalf( + "expected service entries to contain service entry for: %s, "+ + "but did not find it. Got map: %v", + k, serviceEntries, + ) + } + } + } + }) + } +} + +func TestAddServiceEntriesWithDr(t *testing.T) { + setupForServiceEntryTests() + var ( + admiralCache = AdmiralCache{} + cnameIdentityCache = sync.Map{} + gtpCache = &globalTrafficCache{} + ) + admiralCache.SeClusterCache = common.NewMapOfMaps() cnameIdentityCache.Store("dev.bar.global", "bar") admiralCache.CnameIdentityCache = &cnameIdentityCache - - gtpCache := &globalTrafficCache{} gtpCache.identityCache = make(map[string]*v13.GlobalTrafficPolicy) gtpCache.mutex = &sync.Mutex{} admiralCache.GlobalTrafficCache = gtpCache - - se := istionetworkingv1alpha3.ServiceEntry{ + se := istioNetworkingV1Alpha3.ServiceEntry{ Hosts: []string{"dev.bar.global"}, - Endpoints: []*istionetworkingv1alpha3.WorkloadEntry{ + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ {Address: "127.0.0.1", Ports: map[string]uint32{"https": 80}, Labels: map[string]string{}, Network: "mesh1", Locality: "us-west", Weight: 100}, }, } - emptyEndpointSe := istionetworkingv1alpha3.ServiceEntry{ + emptyEndpointSe := istioNetworkingV1Alpha3.ServiceEntry{ Hosts: []string{"dev.bar.global"}, - Endpoints: []*istionetworkingv1alpha3.WorkloadEntry{}, + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{}, } - dummyEndpointSe := istionetworkingv1alpha3.ServiceEntry{ + dummyEndpointSe := istioNetworkingV1Alpha3.ServiceEntry{ Hosts: []string{"dev.dummy.global"}, - Endpoints: []*istionetworkingv1alpha3.WorkloadEntry{ + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ {Address: "dummy.admiral.global", Ports: map[string]uint32{"https": 80}, Labels: map[string]string{}, Network: "mesh1", Locality: "us-west", Weight: 100}, }, } @@ -92,16 +418,13 @@ func TestAddServiceEntriesWithDr(t *testing.T) { } seConfig.Name = "se1" seConfig.Namespace = "admiral-sync" - dummySeConfig := v1alpha3.ServiceEntry{ //nolint Spec: dummyEndpointSe, } dummySeConfig.Name = "dummySe" dummySeConfig.Namespace = "admiral-sync" - ctx := context.Background() - fakeIstioClient := istiofake.NewSimpleClientset() fakeIstioClient.NetworkingV1alpha3().ServiceEntries("admiral-sync").Create(ctx, &seConfig, v12.CreateOptions{}) fakeIstioClient.NetworkingV1alpha3().ServiceEntries("admiral-sync").Create(ctx, &dummySeConfig, v12.CreateOptions{}) @@ -122,21 +445,20 @@ func TestAddServiceEntriesWithDr(t *testing.T) { rr := NewRemoteRegistry(nil, common.AdmiralParams{}) rr.PutRemoteController("cl1", rc) rr.AdmiralCache = &admiralCache - AddServiceEntriesWithDr(ctx, rr, map[string]string{"cl1": "cl1"}, map[string]*istionetworkingv1alpha3.ServiceEntry{"se1": &se}) - AddServiceEntriesWithDr(ctx, rr, map[string]string{"cl1": "cl1"}, map[string]*istionetworkingv1alpha3.ServiceEntry{"se1": &emptyEndpointSe}) - AddServiceEntriesWithDr(ctx, rr, map[string]string{"cl1": "cl1"}, map[string]*istionetworkingv1alpha3.ServiceEntry{"dummySe": &dummyEndpointSe}) - + AddServiceEntriesWithDr(ctx, rr, map[string]string{"cl1": "cl1"}, map[string]*istioNetworkingV1Alpha3.ServiceEntry{"se1": &se}) + AddServiceEntriesWithDr(ctx, rr, map[string]string{"cl1": "cl1"}, map[string]*istioNetworkingV1Alpha3.ServiceEntry{"se1": &emptyEndpointSe}) + AddServiceEntriesWithDr(ctx, rr, map[string]string{"cl1": "cl1"}, map[string]*istioNetworkingV1Alpha3.ServiceEntry{"dummySe": &dummyEndpointSe}) } func TestCreateSeAndDrSetFromGtp(t *testing.T) { - - host := "dev.bar.global" - west := "west" - east := "east" - eastWithCaps := "East" - - admiralCache := AdmiralCache{} - + setupForServiceEntryTests() + var ( + host = "dev.bar.global" + west = "west" + east = "east" + eastWithCaps = "East" + admiralCache = AdmiralCache{} + ) admiralCache.ServiceEntryAddressStore = &ServiceEntryAddressStore{ EntryAddresses: map[string]string{}, Addresses: []string{}, @@ -155,10 +477,10 @@ func TestCreateSeAndDrSetFromGtp(t *testing.T) { admiralCache.ConfigMapController = cacheController - se := &istionetworkingv1alpha3.ServiceEntry{ + se := &istioNetworkingV1Alpha3.ServiceEntry{ Addresses: []string{"240.10.1.0"}, Hosts: []string{host}, - Endpoints: []*istionetworkingv1alpha3.WorkloadEntry{ + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ {Address: "127.0.0.1", Ports: map[string]uint32{"https": 80}, Labels: map[string]string{}, Locality: "us-west-2"}, {Address: "240.20.0.1", Ports: map[string]uint32{"https": 80}, Labels: map[string]string{}, Locality: "us-east-2"}, }, @@ -222,7 +544,7 @@ func TestCreateSeAndDrSetFromGtp(t *testing.T) { name string env string locality string - se *istionetworkingv1alpha3.ServiceEntry + se *istioNetworkingV1Alpha3.ServiceEntry gtp *v13.GlobalTrafficPolicy seDrSet map[string]*SeDrTuple }{ @@ -283,11 +605,14 @@ func TestCreateSeAndDrSetFromGtp(t *testing.T) { } func TestCreateServiceEntryForNewServiceOrPod(t *testing.T) { - + setupForServiceEntryTests() p := common.AdmiralParams{ KubeconfigPath: "testdata/fake.config", } - rr, _ := InitAdmiral(context.Background(), p) + rr, err := InitAdmiral(context.Background(), p) + if err != nil { + t.Fatalf("unable to initialize admiral, err: %v", err) + } rr.StartTime = time.Now().Add(-60 * time.Second) config := rest.Config{ @@ -325,6 +650,7 @@ func TestCreateServiceEntryForNewServiceOrPod(t *testing.T) { } func TestGetLocalAddressForSe(t *testing.T) { + setupForServiceEntryTests() t.Parallel() cacheWithEntry := ServiceEntryAddressStore{ EntryAddresses: map[string]string{"e2e.a.mesh": common.LocalAddressPrefix + ".10.1"}, @@ -466,11 +792,13 @@ func TestGetLocalAddressForSe(t *testing.T) { } func TestMakeRemoteEndpointForServiceEntry(t *testing.T) { - address := "1.2.3.4" - locality := "us-west-2" - portName := "port" - - endpoint := makeRemoteEndpointForServiceEntry(address, locality, portName, common.DefaultMtlsPort) + setupForServiceEntryTests() + var ( + address = "1.2.3.4" + locality = "us-west-2" + portName = "port" + endpoint = makeRemoteEndpointForServiceEntry(address, locality, portName, common.DefaultMtlsPort) + ) if endpoint.Address != address { t.Errorf("Address mismatch. Got: %v, expected: %v", endpoint.Address, address) @@ -483,10 +811,10 @@ func TestMakeRemoteEndpointForServiceEntry(t *testing.T) { } } -func buildFakeConfigMapFromAddressStore(addressStore *ServiceEntryAddressStore, resourceVersion string) *v1.ConfigMap { +func buildFakeConfigMapFromAddressStore(addressStore *ServiceEntryAddressStore, resourceVersion string) *coreV1.ConfigMap { bytes, _ := yaml.Marshal(addressStore) - cm := v1.ConfigMap{ + cm := coreV1.ConfigMap{ Data: map[string]string{"serviceEntryAddressStore": string(bytes)}, } cm.Name = "se-address-configmap" @@ -496,57 +824,59 @@ func buildFakeConfigMapFromAddressStore(addressStore *ServiceEntryAddressStore, } func TestModifyNonExistingSidecarForLocalClusterCommunication(t *testing.T) { - sidecarController := &istio.SidecarController{} + setupForServiceEntryTests() + var ( + ctx = context.Background() + sidecarEgressMap = make(map[string]common.SidecarEgress) + sidecarController = &istio.SidecarController{} + remoteController = &RemoteController{} + ) sidecarController.IstioClient = istiofake.NewSimpleClientset() - remoteController := &RemoteController{} remoteController.SidecarController = sidecarController - sidecarEgressMap := make(map[string]common.SidecarEgress) sidecarEgressMap["test-dependency-namespace"] = common.SidecarEgress{Namespace: "test-dependency-namespace", FQDN: "test-local-fqdn"} - ctx := context.Background() - modifySidecarForLocalClusterCommunication(ctx, "test-sidecar-namespace", sidecarEgressMap, remoteController) - sidecarObj, err := sidecarController.IstioClient.NetworkingV1alpha3().Sidecars("test-sidecar-namespace").Get(ctx, common.GetWorkloadSidecarName(), v12.GetOptions{}) if err == nil { t.Errorf("expected 404 not found error but got nil") } - if sidecarObj != nil { t.Fatalf("Modify non existing resource failed, as no new resource should be created.") } } func TestModifyExistingSidecarForLocalClusterCommunication(t *testing.T) { - - sidecarController := &istio.SidecarController{} + setupForServiceEntryTests() + var ( + ctx = context.Background() + existingSidecarObj = &v1alpha3.Sidecar{} + istioEgress = istioNetworkingV1Alpha3.IstioEgressListener{ + Hosts: []string{"test-host"}, + } + remoteController = &RemoteController{} + sidecarController = &istio.SidecarController{} + ) sidecarController.IstioClient = istiofake.NewSimpleClientset() - remoteController := &RemoteController{} remoteController.SidecarController = sidecarController - existingSidecarObj := &v1alpha3.Sidecar{} existingSidecarObj.ObjectMeta.Namespace = "test-sidecar-namespace" existingSidecarObj.ObjectMeta.Name = "default" - istioEgress := istionetworkingv1alpha3.IstioEgressListener{ - Hosts: []string{"test-host"}, - } - - existingSidecarObj.Spec = istionetworkingv1alpha3.Sidecar{ - Egress: []*istionetworkingv1alpha3.IstioEgressListener{&istioEgress}, + existingSidecarObj.Spec = istioNetworkingV1Alpha3.Sidecar{ + Egress: []*istioNetworkingV1Alpha3.IstioEgressListener{&istioEgress}, } - ctx := context.Background() createdSidecar, err := sidecarController.IstioClient.NetworkingV1alpha3().Sidecars("test-sidecar-namespace").Create(ctx, existingSidecarObj, v12.CreateOptions{}) if err != nil { t.Error(err) } - if createdSidecar != nil { + if createdSidecar != nil { sidecarEgressMap := make(map[string]common.SidecarEgress) sidecarEgressMap["test-dependency-namespace"] = common.SidecarEgress{Namespace: "test-dependency-namespace", FQDN: "test-local-fqdn", CNAMEs: map[string]string{"test.myservice.global": "1"}} + time.Sleep(5 * time.Second) modifySidecarForLocalClusterCommunication(ctx, "test-sidecar-namespace", sidecarEgressMap, remoteController) updatedSidecar, err := sidecarController.IstioClient.NetworkingV1alpha3().Sidecars("test-sidecar-namespace").Get(ctx, "default", v12.GetOptions{}) @@ -567,7 +897,7 @@ func TestModifyExistingSidecarForLocalClusterCommunication(t *testing.T) { if !cmp.Equal(updatedSidecar, createdSidecar, protocmp.Transform()) { t.Fatalf("Modify existing sidecar failed as configuration is not same. Details - %v", cmp.Diff(updatedSidecar, createdSidecar)) } - var matched *istionetworkingv1alpha3.IstioEgressListener + var matched *istioNetworkingV1Alpha3.IstioEgressListener for _, listener := range createdSidecarEgress { matched = nil @@ -582,6 +912,7 @@ func TestModifyExistingSidecarForLocalClusterCommunication(t *testing.T) { newHosts := matched.Hosts listener.Hosts = listener.Hosts[:0] matched.Hosts = matched.Hosts[:0] + t.Logf("old: %v, new: %v", oldHosts, newHosts) assert.ElementsMatch(t, oldHosts, newHosts, "hosts should match") if !cmp.Equal(listener, matched, protocmp.Transform()) { t.Fatalf("Listeners do not match. Details - %v", cmp.Diff(listener, matched)) @@ -596,22 +927,21 @@ func TestModifyExistingSidecarForLocalClusterCommunication(t *testing.T) { } func TestCreateServiceEntry(t *testing.T) { - - config := rest.Config{ - Host: "localhost", - } - stop := make(chan struct{}) - s, e := admiral.NewServiceController("test", stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300)) - - if e != nil { - t.Fatalf("%v", e) + setupForServiceEntryTests() + var ( + stop = make(chan struct{}) + admiralCache = AdmiralCache{} + cnameIdentityCache = sync.Map{} + config = rest.Config{ + Host: "localhost", + } + fakeIstioClient = istiofake.NewSimpleClientset() + ) + s, err := admiral.NewServiceController("test", stop, &test.MockServiceHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fatalf("%v", err) } - - admiralCache := AdmiralCache{} - localAddress := common.LocalAddressPrefix + ".10.1" - - cnameIdentityCache := sync.Map{} cnameIdentityCache.Store("dev.bar.global", "bar") admiralCache.CnameIdentityCache = &cnameIdentityCache @@ -621,8 +951,6 @@ func TestCreateServiceEntry(t *testing.T) { } admiralCache.CnameClusterCache = common.NewMapOfMaps() - - fakeIstioClient := istiofake.NewSimpleClientset() rc := &RemoteController{ ServiceEntryController: &istio.ServiceEntryController{ IstioClient: fakeIstioClient, @@ -658,93 +986,93 @@ func TestCreateServiceEntry(t *testing.T) { secondDeployment := v14.Deployment{} secondDeployment.Spec.Template.Labels = map[string]string{"env": "e2e", "identity": "my-first-service"} - se := istionetworkingv1alpha3.ServiceEntry{ + se := istioNetworkingV1Alpha3.ServiceEntry{ Hosts: []string{"e2e.my-first-service.mesh"}, Addresses: []string{localAddress}, - Ports: []*istionetworkingv1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort), + Ports: []*istioNetworkingV1Alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort), Name: "http", Protocol: "http"}}, - Location: istionetworkingv1alpha3.ServiceEntry_MESH_INTERNAL, - Resolution: istionetworkingv1alpha3.ServiceEntry_DNS, + Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, + Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, SubjectAltNames: []string{"spiffe://prefix/my-first-service"}, - Endpoints: []*istionetworkingv1alpha3.WorkloadEntry{ + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"}, }, } - oneEndpointSe := istionetworkingv1alpha3.ServiceEntry{ + oneEndpointSe := istioNetworkingV1Alpha3.ServiceEntry{ Hosts: []string{"e2e.my-first-service.mesh"}, Addresses: []string{localAddress}, - Ports: []*istionetworkingv1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort), + Ports: []*istioNetworkingV1Alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort), Name: "http", Protocol: "http"}}, - Location: istionetworkingv1alpha3.ServiceEntry_MESH_INTERNAL, - Resolution: istionetworkingv1alpha3.ServiceEntry_DNS, + Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, + Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, SubjectAltNames: []string{"spiffe://prefix/my-first-service"}, - Endpoints: []*istionetworkingv1alpha3.WorkloadEntry{ + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"}, }, } - twoEndpointSe := istionetworkingv1alpha3.ServiceEntry{ + twoEndpointSe := istioNetworkingV1Alpha3.ServiceEntry{ Hosts: []string{"e2e.my-first-service.mesh"}, Addresses: []string{localAddress}, - Ports: []*istionetworkingv1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort), + Ports: []*istioNetworkingV1Alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort), Name: "http", Protocol: "http"}}, - Location: istionetworkingv1alpha3.ServiceEntry_MESH_INTERNAL, - Resolution: istionetworkingv1alpha3.ServiceEntry_DNS, + Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, + Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, SubjectAltNames: []string{"spiffe://prefix/my-first-service"}, - Endpoints: []*istionetworkingv1alpha3.WorkloadEntry{ + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"}, {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"}, }, } - threeEndpointSe := istionetworkingv1alpha3.ServiceEntry{ + threeEndpointSe := istioNetworkingV1Alpha3.ServiceEntry{ Hosts: []string{"e2e.my-first-service.mesh"}, Addresses: []string{localAddress}, - Ports: []*istionetworkingv1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort), + Ports: []*istioNetworkingV1Alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort), Name: "http", Protocol: "http"}}, - Location: istionetworkingv1alpha3.ServiceEntry_MESH_INTERNAL, - Resolution: istionetworkingv1alpha3.ServiceEntry_DNS, + Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, + Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, SubjectAltNames: []string{"spiffe://prefix/my-first-service"}, - Endpoints: []*istionetworkingv1alpha3.WorkloadEntry{ + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"}, {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-west-2"}, {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"}, }, } - eastEndpointSe := istionetworkingv1alpha3.ServiceEntry{ + eastEndpointSe := istioNetworkingV1Alpha3.ServiceEntry{ Hosts: []string{"e2e.my-first-service.mesh"}, Addresses: []string{localAddress}, - Ports: []*istionetworkingv1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort), + Ports: []*istioNetworkingV1Alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort), Name: "http", Protocol: "http"}}, - Location: istionetworkingv1alpha3.ServiceEntry_MESH_INTERNAL, - Resolution: istionetworkingv1alpha3.ServiceEntry_DNS, + Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, + Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, SubjectAltNames: []string{"spiffe://prefix/my-first-service"}, - Endpoints: []*istionetworkingv1alpha3.WorkloadEntry{ + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ {Address: "dummy.admiral.global", Ports: map[string]uint32{"http": 0}, Locality: "us-east-2"}, }, } - emptyEndpointSe := istionetworkingv1alpha3.ServiceEntry{ + emptyEndpointSe := istioNetworkingV1Alpha3.ServiceEntry{ Hosts: []string{"e2e.my-first-service.mesh"}, Addresses: []string{localAddress}, - Ports: []*istionetworkingv1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort), + Ports: []*istioNetworkingV1Alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort), Name: "http", Protocol: "http"}}, - Location: istionetworkingv1alpha3.ServiceEntry_MESH_INTERNAL, - Resolution: istionetworkingv1alpha3.ServiceEntry_DNS, + Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, + Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, SubjectAltNames: []string{"spiffe://prefix/my-first-service"}, - Endpoints: []*istionetworkingv1alpha3.WorkloadEntry{}, + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{}, } - grpcSe := istionetworkingv1alpha3.ServiceEntry{ + grpcSe := istioNetworkingV1Alpha3.ServiceEntry{ Hosts: []string{"e2e.my-first-service.mesh"}, Addresses: []string{localAddress}, - Ports: []*istionetworkingv1alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort), + Ports: []*istioNetworkingV1Alpha3.Port{{Number: uint32(common.DefaultServiceEntryPort), Name: "grpc", Protocol: "grpc"}}, - Location: istionetworkingv1alpha3.ServiceEntry_MESH_INTERNAL, - Resolution: istionetworkingv1alpha3.ServiceEntry_DNS, + Location: istioNetworkingV1Alpha3.ServiceEntry_MESH_INTERNAL, + Resolution: istioNetworkingV1Alpha3.ServiceEntry_DNS, SubjectAltNames: []string{"spiffe://prefix/my-first-service"}, - Endpoints: []*istionetworkingv1alpha3.WorkloadEntry{ + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ {Address: "dummy.admiral.global", Ports: map[string]uint32{"grpc": 0}, Locality: "us-west-2"}, }, } @@ -756,8 +1084,8 @@ func TestCreateServiceEntry(t *testing.T) { admiralCache AdmiralCache meshPorts map[string]uint32 deployment v14.Deployment - serviceEntries map[string]*istionetworkingv1alpha3.ServiceEntry - expectedResult *istionetworkingv1alpha3.ServiceEntry + serviceEntries map[string]*istioNetworkingV1Alpha3.ServiceEntry + expectedResult *istioNetworkingV1Alpha3.ServiceEntry }{ { name: "Should return a created service entry with grpc protocol", @@ -766,7 +1094,7 @@ func TestCreateServiceEntry(t *testing.T) { admiralCache: admiralCache, meshPorts: map[string]uint32{"grpc": uint32(80)}, deployment: deployment, - serviceEntries: map[string]*istionetworkingv1alpha3.ServiceEntry{}, + serviceEntries: map[string]*istioNetworkingV1Alpha3.ServiceEntry{}, expectedResult: &grpcSe, }, { @@ -776,7 +1104,7 @@ func TestCreateServiceEntry(t *testing.T) { admiralCache: admiralCache, meshPorts: map[string]uint32{"http": uint32(80)}, deployment: deployment, - serviceEntries: map[string]*istionetworkingv1alpha3.ServiceEntry{}, + serviceEntries: map[string]*istioNetworkingV1Alpha3.ServiceEntry{}, expectedResult: &se, }, { @@ -786,7 +1114,7 @@ func TestCreateServiceEntry(t *testing.T) { admiralCache: admiralCache, meshPorts: map[string]uint32{"http": uint32(80)}, deployment: deployment, - serviceEntries: map[string]*istionetworkingv1alpha3.ServiceEntry{ + serviceEntries: map[string]*istioNetworkingV1Alpha3.ServiceEntry{ "e2e.my-first-service.mesh": &oneEndpointSe, }, expectedResult: &emptyEndpointSe, @@ -798,7 +1126,7 @@ func TestCreateServiceEntry(t *testing.T) { admiralCache: admiralCache, meshPorts: map[string]uint32{"http": uint32(80)}, deployment: deployment, - serviceEntries: map[string]*istionetworkingv1alpha3.ServiceEntry{ + serviceEntries: map[string]*istioNetworkingV1Alpha3.ServiceEntry{ "e2e.my-first-service.mesh": &twoEndpointSe, }, expectedResult: &eastEndpointSe, @@ -810,7 +1138,7 @@ func TestCreateServiceEntry(t *testing.T) { admiralCache: admiralCache, meshPorts: map[string]uint32{"http": uint32(80)}, deployment: deployment, - serviceEntries: map[string]*istionetworkingv1alpha3.ServiceEntry{ + serviceEntries: map[string]*istioNetworkingV1Alpha3.ServiceEntry{ "e2e.my-first-service.mesh": &threeEndpointSe, }, expectedResult: &eastEndpointSe, @@ -839,7 +1167,7 @@ func TestCreateServiceEntry(t *testing.T) { admiralCache AdmiralCache meshPorts map[string]uint32 rollout argo.Rollout - expectedResult *istionetworkingv1alpha3.ServiceEntry + expectedResult *istioNetworkingV1Alpha3.ServiceEntry }{ { name: "Should return a created service entry with grpc protocol", @@ -862,7 +1190,7 @@ func TestCreateServiceEntry(t *testing.T) { //Run the test for every provided case for _, c := range rolloutSeCreationTestCases { t.Run(c.name, func(t *testing.T) { - createdSE := createServiceEntryForRollout(ctx, admiral.Add, c.rc, &c.admiralCache, c.meshPorts, &c.rollout, map[string]*istionetworkingv1alpha3.ServiceEntry{}) + createdSE := createServiceEntryForRollout(ctx, admiral.Add, c.rc, &c.admiralCache, c.meshPorts, &c.rollout, map[string]*istioNetworkingV1Alpha3.ServiceEntry{}) if !reflect.DeepEqual(createdSE, c.expectedResult) { t.Errorf("Test %s failed, expected: %v got %v", c.name, c.expectedResult, createdSE) } @@ -872,36 +1200,46 @@ func TestCreateServiceEntry(t *testing.T) { } func TestCreateServiceEntryForNewServiceOrPodRolloutsUsecase(t *testing.T) { + setupForServiceEntryTests() const ( namespace = "test-test" serviceName = "serviceNameActive" rolloutPodHashLabel string = "rollouts-pod-template-hash" ) - ctx := context.Background() - p := common.AdmiralParams{ - KubeconfigPath: "testdata/fake.config", + var ( + ctx = context.Background() + config = rest.Config{ + Host: "localhost", + } + p = common.AdmiralParams{ + KubeconfigPath: "testdata/fake.config", + } + ) + rr, err := InitAdmiral(context.Background(), p) + if err != nil { + t.Fatalf("unable to initialize admiral, err: %v", err) } - - rr, _ := InitAdmiral(context.Background(), p) - rr.StartTime = time.Now().Add(-60 * time.Second) - - config := rest.Config{ - Host: "localhost", + d, err := admiral.NewDeploymentController("", make(chan struct{}), &test.MockDeploymentHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fail() } - - d, e := admiral.NewDeploymentController("", make(chan struct{}), &test.MockDeploymentHandler{}, &config, time.Second*time.Duration(300)) - - r, e := admiral.NewRolloutsController("test", make(chan struct{}), &test.MockRolloutHandler{}, &config, time.Second*time.Duration(300)) - v, e := istio.NewVirtualServiceController("", make(chan struct{}), &test.MockVirtualServiceHandler{}, &config, time.Second*time.Duration(300)) - - if e != nil { + r, err := admiral.NewRolloutsController("test", make(chan struct{}), &test.MockRolloutHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fail() + } + v, err := istio.NewVirtualServiceController("", make(chan struct{}), &test.MockVirtualServiceHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fail() + } + s, err := admiral.NewServiceController("test", make(chan struct{}), &test.MockServiceHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fail() + } + gtpc, err := admiral.NewGlobalTrafficController("", make(chan struct{}), &test.MockGlobalTrafficHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { t.Fail() } - s, e := admiral.NewServiceController("test", make(chan struct{}), &test.MockServiceHandler{}, &config, time.Second*time.Duration(300)) - - gtpc, e := admiral.NewGlobalTrafficController("", make(chan struct{}), &test.MockGlobalTrafficHandler{}, &config, time.Second*time.Duration(300)) - cacheWithEntry := ServiceEntryAddressStore{ EntryAddresses: map[string]string{"test.test.mesh-se": common.LocalAddressPrefix + ".10.1"}, Addresses: []string{common.LocalAddressPrefix + ".10.1"}, @@ -1009,35 +1347,48 @@ func TestCreateServiceEntryForNewServiceOrPodRolloutsUsecase(t *testing.T) { } func TestCreateServiceEntryForBlueGreenRolloutsUsecase(t *testing.T) { - - const NAMESPACE = "test-test" - const ACTIVE_SERVICENAME = "serviceNameActive" - const PREVIEW_SERVICENAME = "serviceNamePreview" - const ROLLOUT_POD_HASH_LABEL string = "rollouts-pod-template-hash" - - ctx := context.Background() - - p := common.AdmiralParams{ - KubeconfigPath: "testdata/fake.config", - PreviewHostnamePrefix: "preview", - } - rr, _ := InitAdmiral(context.Background(), p) - config := rest.Config{ - Host: "localhost", + setupForServiceEntryTests() + const ( + namespace = "test-test" + activeServiceName = "serviceNameActive" + previewServiceName = "serviceNamePreview" + rolloutPodHashLabel string = "rollouts-pod-template-hash" + ) + var ( + ctx = context.Background() + p = common.AdmiralParams{ + KubeconfigPath: "testdata/fake.config", + PreviewHostnamePrefix: "preview", + } + config = rest.Config{ + Host: "localhost", + } + ) + rr, err := InitAdmiral(context.Background(), p) + if err != nil { + t.Fatalf("unable to initialize admiral, err: %v", err) } rr.StartTime = time.Now().Add(-60 * time.Second) - - d, e := admiral.NewDeploymentController("", make(chan struct{}), &test.MockDeploymentHandler{}, &config, time.Second*time.Duration(300)) - - r, e := admiral.NewRolloutsController("test", make(chan struct{}), &test.MockRolloutHandler{}, &config, time.Second*time.Duration(300)) - v, e := istio.NewVirtualServiceController("", make(chan struct{}), &test.MockVirtualServiceHandler{}, &config, time.Second*time.Duration(300)) - - if e != nil { + d, err := admiral.NewDeploymentController("", make(chan struct{}), &test.MockDeploymentHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fail() + } + r, err := admiral.NewRolloutsController("test", make(chan struct{}), &test.MockRolloutHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fail() + } + v, err := istio.NewVirtualServiceController("", make(chan struct{}), &test.MockVirtualServiceHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fail() + } + s, err := admiral.NewServiceController("test", make(chan struct{}), &test.MockServiceHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { + t.Fail() + } + gtpc, err := admiral.NewGlobalTrafficController("", make(chan struct{}), &test.MockGlobalTrafficHandler{}, &config, time.Second*time.Duration(300)) + if err != nil { t.Fail() } - s, e := admiral.NewServiceController("test", make(chan struct{}), &test.MockServiceHandler{}, &config, time.Second*time.Duration(300)) - gtpc, e := admiral.NewGlobalTrafficController("", make(chan struct{}), &test.MockGlobalTrafficHandler{}, &config, time.Second*time.Duration(300)) - cacheWithEntry := ServiceEntryAddressStore{ EntryAddresses: map[string]string{ "test.test.mesh-se": common.LocalAddressPrefix + ".10.1", @@ -1092,9 +1443,9 @@ func TestCreateServiceEntryForBlueGreenRolloutsUsecase(t *testing.T) { }, } - rollout.Namespace = NAMESPACE + rollout.Namespace = namespace rollout.Spec.Strategy = argo.RolloutStrategy{ - BlueGreen: &argo.BlueGreenStrategy{ActiveService: ACTIVE_SERVICENAME, PreviewService: PREVIEW_SERVICENAME}, + BlueGreen: &argo.BlueGreenStrategy{ActiveService: activeServiceName, PreviewService: previewServiceName}, } labelMap := make(map[string]string) labelMap["identity"] = "test" @@ -1111,7 +1462,7 @@ func TestCreateServiceEntryForBlueGreenRolloutsUsecase(t *testing.T) { selectorMap := make(map[string]string) selectorMap["app"] = "test" - selectorMap[ROLLOUT_POD_HASH_LABEL] = "hash" + selectorMap[rolloutPodHashLabel] = "hash" port1 := coreV1.ServicePort{ Port: 8080, @@ -1130,8 +1481,8 @@ func TestCreateServiceEntryForBlueGreenRolloutsUsecase(t *testing.T) { Selector: selectorMap, }, } - activeService.Name = ACTIVE_SERVICENAME - activeService.Namespace = NAMESPACE + activeService.Name = activeServiceName + activeService.Namespace = namespace activeService.Spec.Ports = ports s.Cache.Put(activeService) @@ -1141,8 +1492,8 @@ func TestCreateServiceEntryForBlueGreenRolloutsUsecase(t *testing.T) { Selector: selectorMap, }, } - previewService.Name = PREVIEW_SERVICENAME - previewService.Namespace = NAMESPACE + previewService.Name = previewServiceName + previewService.Namespace = namespace previewService.Spec.Ports = ports s.Cache.Put(previewService) @@ -1166,7 +1517,7 @@ func TestCreateServiceEntryForBlueGreenRolloutsUsecase(t *testing.T) { // When Preview service is not defined in BlueGreen strategy rollout.Spec.Strategy = argo.RolloutStrategy{ - BlueGreen: &argo.BlueGreenStrategy{ActiveService: ACTIVE_SERVICENAME}, + BlueGreen: &argo.BlueGreenStrategy{ActiveService: activeServiceName}, } se = modifyServiceEntryForNewServiceOrPod(ctx, admiral.Add, "test", "bar", rr) @@ -1182,51 +1533,53 @@ func TestCreateServiceEntryForBlueGreenRolloutsUsecase(t *testing.T) { } func TestUpdateEndpointsForBlueGreen(t *testing.T) { - const CLUSTER_INGRESS_1 = "ingress1.com" - const ACTIVE_SERVICE = "activeService" - const PREVIEW_SERVICE = "previewService" - const NAMESPACE = "namespace" - const ACTIVE_MESH_HOST = "qal.example.mesh" - const PREVIEW_MESH_HOST = "preview.qal.example.mesh" - - rollout := &argo.Rollout{} + setupForServiceEntryTests() + const ( + clusterIngress1 = "ingress1.com" + activeService = "activeService" + previewService = "previewService" + namespace = "namespace" + activeMeshHost = "qal.example.mesh" + previewMeshHost = "preview.qal.example.mesh" + ) + var ( + rollout = &argo.Rollout{} + meshPorts = map[string]uint32{"http": 8080} + ) rollout.Spec.Strategy = argo.RolloutStrategy{ BlueGreen: &argo.BlueGreenStrategy{ - ActiveService: ACTIVE_SERVICE, - PreviewService: PREVIEW_SERVICE, + ActiveService: activeService, + PreviewService: previewService, }, } rollout.Spec.Template.Annotations = map[string]string{} rollout.Spec.Template.Annotations[common.SidecarEnabledPorts] = "8080" - - endpoint := &istionetworkingv1alpha3.WorkloadEntry{ - Labels: map[string]string{}, Address: CLUSTER_INGRESS_1, Ports: map[string]uint32{"http": 15443}, + endpoint := &istioNetworkingV1Alpha3.WorkloadEntry{ + Labels: map[string]string{}, Address: clusterIngress1, Ports: map[string]uint32{"http": 15443}, } - meshPorts := map[string]uint32{"http": 8080} - weightedServices := map[string]*WeightedService{ - ACTIVE_SERVICE: {Service: &v1.Service{ObjectMeta: v12.ObjectMeta{Name: ACTIVE_SERVICE, Namespace: NAMESPACE}}}, - PREVIEW_SERVICE: {Service: &v1.Service{ObjectMeta: v12.ObjectMeta{Name: PREVIEW_SERVICE, Namespace: NAMESPACE}}}, + activeService: {Service: &coreV1.Service{ObjectMeta: v12.ObjectMeta{Name: activeService, Namespace: namespace}}}, + previewService: {Service: &coreV1.Service{ObjectMeta: v12.ObjectMeta{Name: previewService, Namespace: namespace}}}, } - activeWantedEndpoints := &istionetworkingv1alpha3.WorkloadEntry{ - Address: ACTIVE_SERVICE + common.Sep + NAMESPACE + common.DotLocalDomainSuffix, Ports: meshPorts, + activeWantedEndpoints := &istioNetworkingV1Alpha3.WorkloadEntry{ + Address: activeService + common.Sep + namespace + common.DotLocalDomainSuffix, Ports: meshPorts, } - previewWantedEndpoints := &istionetworkingv1alpha3.WorkloadEntry{ - Address: PREVIEW_SERVICE + common.Sep + NAMESPACE + common.DotLocalDomainSuffix, Ports: meshPorts, + previewWantedEndpoints := &istioNetworkingV1Alpha3.WorkloadEntry{ + Address: previewService + common.Sep + namespace + common.DotLocalDomainSuffix, Ports: meshPorts, } testCases := []struct { name string rollout *argo.Rollout - inputEndpoint *istionetworkingv1alpha3.WorkloadEntry + inputEndpoint *istioNetworkingV1Alpha3.WorkloadEntry weightedServices map[string]*WeightedService clusterIngress string meshPorts map[string]uint32 meshHost string - wantedEndpoints *istionetworkingv1alpha3.WorkloadEntry + wantedEndpoints *istioNetworkingV1Alpha3.WorkloadEntry }{ { name: "should return endpoint with active service address", @@ -1234,7 +1587,7 @@ func TestUpdateEndpointsForBlueGreen(t *testing.T) { inputEndpoint: endpoint, weightedServices: weightedServices, meshPorts: meshPorts, - meshHost: ACTIVE_MESH_HOST, + meshHost: activeMeshHost, wantedEndpoints: activeWantedEndpoints, }, { @@ -1243,7 +1596,7 @@ func TestUpdateEndpointsForBlueGreen(t *testing.T) { inputEndpoint: endpoint, weightedServices: weightedServices, meshPorts: meshPorts, - meshHost: PREVIEW_MESH_HOST, + meshHost: previewMeshHost, wantedEndpoints: previewWantedEndpoints, }, } @@ -1259,56 +1612,54 @@ func TestUpdateEndpointsForBlueGreen(t *testing.T) { } func TestUpdateEndpointsForWeightedServices(t *testing.T) { + setupForServiceEntryTests() t.Parallel() - - const CLUSTER_INGRESS_1 = "ingress1.com" - const CLUSTER_INGRESS_2 = "ingress2.com" - const CANARY_SERVICE = "canaryService" - const STABLE_SERVICE = "stableService" - const NAMESPACE = "namespace" - - se := &istionetworkingv1alpha3.ServiceEntry{ - Endpoints: []*istionetworkingv1alpha3.WorkloadEntry{ - {Labels: map[string]string{}, Address: CLUSTER_INGRESS_1, Weight: 10, Ports: map[string]uint32{"http": 15443}}, - {Labels: map[string]string{}, Address: CLUSTER_INGRESS_2, Weight: 10, Ports: map[string]uint32{"http": 15443}}, - }, - } - - meshPorts := map[string]uint32{"http": 8080} - - weightedServices := map[string]*WeightedService{ - CANARY_SERVICE: {Weight: 10, Service: &v1.Service{ObjectMeta: v12.ObjectMeta{Name: CANARY_SERVICE, Namespace: NAMESPACE}}}, - STABLE_SERVICE: {Weight: 90, Service: &v1.Service{ObjectMeta: v12.ObjectMeta{Name: STABLE_SERVICE, Namespace: NAMESPACE}}}, - } - weightedServicesZeroWeight := map[string]*WeightedService{ - CANARY_SERVICE: {Weight: 0, Service: &v1.Service{ObjectMeta: v12.ObjectMeta{Name: CANARY_SERVICE, Namespace: NAMESPACE}}}, - STABLE_SERVICE: {Weight: 100, Service: &v1.Service{ObjectMeta: v12.ObjectMeta{Name: STABLE_SERVICE, Namespace: NAMESPACE}}}, - } - - wantedEndpoints := []*istionetworkingv1alpha3.WorkloadEntry{ - {Address: CLUSTER_INGRESS_2, Weight: 10, Ports: map[string]uint32{"http": 15443}}, - {Address: STABLE_SERVICE + common.Sep + NAMESPACE + common.DotLocalDomainSuffix, Weight: 90, Ports: meshPorts}, - {Address: CANARY_SERVICE + common.Sep + NAMESPACE + common.DotLocalDomainSuffix, Weight: 10, Ports: meshPorts}, - } - - wantedEndpointsZeroWeights := []*istionetworkingv1alpha3.WorkloadEntry{ - {Address: CLUSTER_INGRESS_2, Weight: 10, Ports: map[string]uint32{"http": 15443}}, - {Address: STABLE_SERVICE + common.Sep + NAMESPACE + common.DotLocalDomainSuffix, Weight: 100, Ports: meshPorts}, - } - + const ( + clusterIngress1 = "ingress1.com" + clusterIngress2 = "ingress2.com" + canaryService = "canaryService" + stableService = "stableService" + namespace = "namespace" + ) + var ( + meshPorts = map[string]uint32{"http": 8080} + se = &istioNetworkingV1Alpha3.ServiceEntry{ + Endpoints: []*istioNetworkingV1Alpha3.WorkloadEntry{ + {Labels: map[string]string{}, Address: clusterIngress1, Weight: 10, Ports: map[string]uint32{"http": 15443}}, + {Labels: map[string]string{}, Address: clusterIngress2, Weight: 10, Ports: map[string]uint32{"http": 15443}}, + }, + } + weightedServices = map[string]*WeightedService{ + canaryService: {Weight: 10, Service: &coreV1.Service{ObjectMeta: v12.ObjectMeta{Name: canaryService, Namespace: namespace}}}, + stableService: {Weight: 90, Service: &coreV1.Service{ObjectMeta: v12.ObjectMeta{Name: stableService, Namespace: namespace}}}, + } + weightedServicesZeroWeight = map[string]*WeightedService{ + canaryService: {Weight: 0, Service: &coreV1.Service{ObjectMeta: v12.ObjectMeta{Name: canaryService, Namespace: namespace}}}, + stableService: {Weight: 100, Service: &coreV1.Service{ObjectMeta: v12.ObjectMeta{Name: stableService, Namespace: namespace}}}, + } + wantedEndpoints = []*istioNetworkingV1Alpha3.WorkloadEntry{ + {Address: clusterIngress2, Weight: 10, Ports: map[string]uint32{"http": 15443}}, + {Address: stableService + common.Sep + namespace + common.DotLocalDomainSuffix, Weight: 90, Ports: meshPorts}, + {Address: canaryService + common.Sep + namespace + common.DotLocalDomainSuffix, Weight: 10, Ports: meshPorts}, + } + wantedEndpointsZeroWeights = []*istioNetworkingV1Alpha3.WorkloadEntry{ + {Address: clusterIngress2, Weight: 10, Ports: map[string]uint32{"http": 15443}}, + {Address: stableService + common.Sep + namespace + common.DotLocalDomainSuffix, Weight: 100, Ports: meshPorts}, + } + ) testCases := []struct { name string - inputServiceEntry *istionetworkingv1alpha3.ServiceEntry + inputServiceEntry *istioNetworkingV1Alpha3.ServiceEntry weightedServices map[string]*WeightedService clusterIngress string meshPorts map[string]uint32 - wantedEndpoints []*istionetworkingv1alpha3.WorkloadEntry + wantedEndpoints []*istioNetworkingV1Alpha3.WorkloadEntry }{ { name: "should return endpoints with assigned weights", inputServiceEntry: copyServiceEntry(se), weightedServices: weightedServices, - clusterIngress: CLUSTER_INGRESS_1, + clusterIngress: clusterIngress1, meshPorts: meshPorts, wantedEndpoints: wantedEndpoints, }, @@ -1324,7 +1675,7 @@ func TestUpdateEndpointsForWeightedServices(t *testing.T) { name: "should not return endpoints with zero weight", inputServiceEntry: copyServiceEntry(se), weightedServices: weightedServicesZeroWeight, - clusterIngress: CLUSTER_INGRESS_1, + clusterIngress: clusterIngress1, meshPorts: meshPorts, wantedEndpoints: wantedEndpointsZeroWeights, }, @@ -1348,45 +1699,35 @@ func TestUpdateEndpointsForWeightedServices(t *testing.T) { } }) } +} +func makeGTP(name, namespace, identity, env, dnsPrefix string, creationTimestamp v12.Time) *v13.GlobalTrafficPolicy { + return &v13.GlobalTrafficPolicy{ + ObjectMeta: v12.ObjectMeta{ + Name: name, + Namespace: namespace, + CreationTimestamp: creationTimestamp, + Labels: map[string]string{"identity": identity, "env": env}, + }, + Spec: model.GlobalTrafficPolicy{ + Policy: []*model.TrafficPolicy{{DnsPrefix: dnsPrefix}}, + }, + } } func TestUpdateGlobalGtpCache(t *testing.T) { - + setupForServiceEntryTests() var ( + envStage = "stage" + identity1 = "identity1" admiralCache = &AdmiralCache{GlobalTrafficCache: &globalTrafficCache{identityCache: make(map[string]*v13.GlobalTrafficPolicy), mutex: &sync.Mutex{}}} - - identity1 = "identity1" - - env_stage = "stage" - - gtp = &v13.GlobalTrafficPolicy{ObjectMeta: v12.ObjectMeta{Name: "gtp", Namespace: "namespace1", CreationTimestamp: v12.NewTime(time.Now().Add(time.Duration(-30))), Labels: map[string]string{"identity": identity1, "env": env_stage}}, Spec: model.GlobalTrafficPolicy{ - Policy: []*model.TrafficPolicy{{DnsPrefix: "hello"}}, - }} - - gtp2 = &v13.GlobalTrafficPolicy{ObjectMeta: v12.ObjectMeta{Name: "gtp2", Namespace: "namespace1", CreationTimestamp: v12.NewTime(time.Now().Add(time.Duration(-15))), Labels: map[string]string{"identity": identity1, "env": env_stage}}, Spec: model.GlobalTrafficPolicy{ - Policy: []*model.TrafficPolicy{{DnsPrefix: "hellogtp2"}}, - }} - - gtp7 = &v13.GlobalTrafficPolicy{ObjectMeta: v12.ObjectMeta{Name: "gtp7", Namespace: "namespace1", CreationTimestamp: v12.NewTime(time.Now().Add(time.Duration(-45))), Labels: map[string]string{"identity": identity1, "env": env_stage, "priority": "2"}}, Spec: model.GlobalTrafficPolicy{ - Policy: []*model.TrafficPolicy{{DnsPrefix: "hellogtp7"}}, - }} - - gtp3 = &v13.GlobalTrafficPolicy{ObjectMeta: v12.ObjectMeta{Name: "gtp3", Namespace: "namespace2", CreationTimestamp: v12.NewTime(time.Now()), Labels: map[string]string{"identity": identity1, "env": env_stage}}, Spec: model.GlobalTrafficPolicy{ - Policy: []*model.TrafficPolicy{{DnsPrefix: "hellogtp3"}}, - }} - - gtp4 = &v13.GlobalTrafficPolicy{ObjectMeta: v12.ObjectMeta{Name: "gtp4", Namespace: "namespace1", CreationTimestamp: v12.NewTime(time.Now().Add(time.Duration(-30))), Labels: map[string]string{"identity": identity1, "env": env_stage, "priority": "10"}}, Spec: model.GlobalTrafficPolicy{ - Policy: []*model.TrafficPolicy{{DnsPrefix: "hellogtp4"}}, - }} - - gtp5 = &v13.GlobalTrafficPolicy{ObjectMeta: v12.ObjectMeta{Name: "gtp5", Namespace: "namespace1", CreationTimestamp: v12.NewTime(time.Now().Add(time.Duration(-15))), Labels: map[string]string{"identity": identity1, "env": env_stage, "priority": "2"}}, Spec: model.GlobalTrafficPolicy{ - Policy: []*model.TrafficPolicy{{DnsPrefix: "hellogtp5"}}, - }} - - gtp6 = &v13.GlobalTrafficPolicy{ObjectMeta: v12.ObjectMeta{Name: "gtp6", Namespace: "namespace3", CreationTimestamp: v12.NewTime(time.Now()), Labels: map[string]string{"identity": identity1, "env": env_stage, "priority": "1000"}}, Spec: model.GlobalTrafficPolicy{ - Policy: []*model.TrafficPolicy{{DnsPrefix: "hellogtp6"}}, - }} + gtp1 = makeGTP("gtp1", "namespace1", identity1, envStage, "hellogtp1", v12.NewTime(time.Now().Add(time.Duration(-30)))) + gtp2 = makeGTP("gtp2", "namespace1", identity1, envStage, "hellogtp2", v12.NewTime(time.Now().Add(time.Duration(-15)))) + gtp7 = makeGTP("gtp7", "namespace1", identity1, envStage, "hellogtp7", v12.NewTime(time.Now().Add(time.Duration(-45)))) + gtp3 = makeGTP("gtp3", "namespace2", identity1, envStage, "hellogtp3", v12.NewTime(time.Now())) + gtp4 = makeGTP("gtp4", "namespace1", identity1, envStage, "hellogtp4", v12.NewTime(time.Now().Add(time.Duration(-30)))) + gtp5 = makeGTP("gtp5", "namespace1", identity1, envStage, "hellogtp5", v12.NewTime(time.Now().Add(time.Duration(-15)))) + gtp6 = makeGTP("gtp6", "namespace3", identity1, envStage, "hellogtp6", v12.NewTime(time.Now())) ) testCases := []struct { @@ -1400,49 +1741,49 @@ func TestUpdateGlobalGtpCache(t *testing.T) { name: "Should return nil when no GTP present", gtps: map[string][]*v13.GlobalTrafficPolicy{}, identity: identity1, - env: env_stage, + env: envStage, expectedGtp: nil, }, { name: "Should return the only existing gtp", - gtps: map[string][]*v13.GlobalTrafficPolicy{"c1": {gtp}}, + gtps: map[string][]*v13.GlobalTrafficPolicy{"c1": {gtp1}}, identity: identity1, - env: env_stage, - expectedGtp: gtp, + env: envStage, + expectedGtp: gtp1, }, { name: "Should return the gtp recently created within the cluster", - gtps: map[string][]*v13.GlobalTrafficPolicy{"c1": {gtp, gtp2}}, + gtps: map[string][]*v13.GlobalTrafficPolicy{"c1": {gtp1, gtp2}}, identity: identity1, - env: env_stage, + env: envStage, expectedGtp: gtp2, }, { name: "Should return the gtp recently created from another cluster", - gtps: map[string][]*v13.GlobalTrafficPolicy{"c1": {gtp, gtp2}, "c2": {gtp3}}, + gtps: map[string][]*v13.GlobalTrafficPolicy{"c1": {gtp1, gtp2}, "c2": {gtp3}}, identity: identity1, - env: env_stage, + env: envStage, expectedGtp: gtp3, }, { name: "Should return the existing priority gtp within the cluster", - gtps: map[string][]*v13.GlobalTrafficPolicy{"c1": {gtp, gtp2, gtp7}}, + gtps: map[string][]*v13.GlobalTrafficPolicy{"c1": {gtp1, gtp2, gtp7}}, identity: identity1, - env: env_stage, + env: envStage, expectedGtp: gtp7, }, { name: "Should return the recently created priority gtp within the cluster", - gtps: map[string][]*v13.GlobalTrafficPolicy{"c1": {gtp5, gtp4, gtp, gtp2}}, + gtps: map[string][]*v13.GlobalTrafficPolicy{"c1": {gtp5, gtp4, gtp1, gtp2}}, identity: identity1, - env: env_stage, - expectedGtp: gtp4, + env: envStage, + expectedGtp: gtp5, }, { name: "Should return the recently created priority gtp from another cluster", - gtps: map[string][]*v13.GlobalTrafficPolicy{"c1": {gtp, gtp2, gtp4, gtp5, gtp7}, "c2": {gtp6}, "c3": {gtp3}}, + gtps: map[string][]*v13.GlobalTrafficPolicy{"c1": {gtp1, gtp2, gtp4, gtp5, gtp7}, "c2": {gtp6}, "c3": {gtp3}}, identity: identity1, - env: env_stage, + env: envStage, expectedGtp: gtp6, }, } @@ -1468,8 +1809,13 @@ func isLower(s string) bool { } func TestIsBlueGreenStrategy(t *testing.T) { + setupForServiceEntryTests() var ( - emptyRollout *argo.Rollout + emptyRollout *argo.Rollout + rolloutWithEmptySpec = &argo.Rollout{} + rolloutWithNoStrategy = &argo.Rollout{ + Spec: argo.RolloutSpec{}, + } rolloutWithBlueGreenStrategy = &argo.Rollout{ Spec: argo.RolloutSpec{ Strategy: argo.RolloutStrategy{ @@ -1488,10 +1834,6 @@ func TestIsBlueGreenStrategy(t *testing.T) { }, }, } - rolloutWithNoStrategy = &argo.Rollout{ - Spec: argo.RolloutSpec{}, - } - rolloutWithEmptySpec = &argo.Rollout{} ) cases := []struct { name string @@ -1534,7 +1876,6 @@ func TestIsBlueGreenStrategy(t *testing.T) { expectedResult: false, }, } - for _, c := range cases { t.Run(c.name, func(t *testing.T) { result := isBlueGreenStrategy(c.rollout) diff --git a/admiral/pkg/clusters/testdata/fake.config b/admiral/pkg/clusters/testdata/fake.config index ab5da600f..3fc12dd29 100644 --- a/admiral/pkg/clusters/testdata/fake.config +++ b/admiral/pkg/clusters/testdata/fake.config @@ -1,7 +1,6 @@ apiVersion: v1 clusters: - cluster: - server: https://192.168.99.121:8443 name: kube contexts: diff --git a/admiral/pkg/clusters/types.go b/admiral/pkg/clusters/types.go index 9ec592991..b96f1d05a 100644 --- a/admiral/pkg/clusters/types.go +++ b/admiral/pkg/clusters/types.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "strings" "sync" "time" @@ -66,6 +67,7 @@ type RemoteRegistry struct { ctx context.Context AdmiralCache *AdmiralCache StartTime time.Time + ExcludeAssetList []string } func NewRemoteRegistry(ctx context.Context, params common.AdmiralParams) *RemoteRegistry { @@ -100,6 +102,7 @@ func NewRemoteRegistry(ctx context.Context, params common.AdmiralParams) *Remote StartTime: time.Now(), remoteControllers: make(map[string]*RemoteController), AdmiralCache: admiralCache, + ExcludeAssetList: params.ExcludeAssetList, } } @@ -302,18 +305,16 @@ func (r RoutingPolicyHandler) Added(ctx context.Context, obj *v1.RoutingPolicy) log.Info("No dependents found for Routing Policy - ", obj.Name) return } - r.processroutingPolicy(ctx, dependents, obj, admiral.Add) - + r.processRoutingPolicy(ctx, dependents, obj, admiral.Add) log.Infof(LogFormat, admiral.Add, "routingpolicy", obj.Name, "", "finished processing routing policy") } else { log.Infof(LogFormat, admiral.Add, "routingpolicy", obj.Name, "", "routingpolicy disabled") } } -func (r RoutingPolicyHandler) processroutingPolicy(ctx context.Context, dependents map[string]string, routingPolicy *v1.RoutingPolicy, eventType admiral.EventType) { +func (r RoutingPolicyHandler) processRoutingPolicy(ctx context.Context, dependents map[string]string, routingPolicy *v1.RoutingPolicy, eventType admiral.EventType) { for _, remoteController := range r.RemoteRegistry.remoteControllers { for _, dependent := range dependents { - // Check if the dependent exists in this remoteCluster. If so, we create an envoyFilter with dependent identity as workload selector if _, ok := r.RemoteRegistry.AdmiralCache.IdentityClusterCache.Get(dependent).Copy()[remoteController.ClusterID]; ok { selectors := r.RemoteRegistry.AdmiralCache.WorkloadSelectorCache.Get(dependent + remoteController.ClusterID).Copy() @@ -345,8 +346,7 @@ func (r RoutingPolicyHandler) Updated(ctx context.Context, obj *v1.RoutingPolicy if len(dependents) == 0 { return } - r.processroutingPolicy(ctx, dependents, obj, admiral.Update) - + r.processRoutingPolicy(ctx, dependents, obj, admiral.Update) log.Infof(LogFormat, admiral.Update, "routingpolicy", obj.Name, "", "updated routing policy") } else { log.Infof(LogFormat, admiral.Update, "routingpolicy", obj.Name, "", "routingpolicy disabled") @@ -436,6 +436,15 @@ func (sh *ServiceHandler) Deleted(ctx context.Context, obj *k8sV1.Service) { } } +func isAnExcludedAsset(assetName string, excludedAssetList []string) bool { + for _, excludedAsset := range excludedAssetList { + if strings.EqualFold(assetName, excludedAsset) { + return true + } + } + return false +} + func HandleEventForService(ctx context.Context, svc *k8sV1.Service, remoteRegistry *RemoteRegistry, clusterName string) error { if svc.Spec.Selector == nil { return fmt.Errorf("selector missing on service=%s in namespace=%s cluster=%s", svc.Name, svc.Namespace, clusterName) @@ -447,9 +456,9 @@ func HandleEventForService(ctx context.Context, svc *k8sV1.Service, remoteRegist deploymentController := rc.DeploymentController rolloutController := rc.RolloutController if deploymentController != nil { - matchingDeployements := deploymentController.GetDeploymentBySelectorInNamespace(ctx, svc.Spec.Selector, svc.Namespace) - if len(matchingDeployements) > 0 { - for _, deployment := range matchingDeployements { + matchingDeployments := deploymentController.GetDeploymentBySelectorInNamespace(ctx, svc.Spec.Selector, svc.Namespace) + if len(matchingDeployments) > 0 { + for _, deployment := range matchingDeployments { HandleEventForDeployment(ctx, admiral.Update, &deployment, remoteRegistry, clusterName) } } @@ -467,31 +476,23 @@ func HandleEventForService(ctx context.Context, svc *k8sV1.Service, remoteRegist } func (dh *DependencyHandler) Added(ctx context.Context, obj *v1.Dependency) { - log.Infof(LogFormat, "Add", "dependency-record", obj.Name, "", "Received=true namespace="+obj.Namespace) - HandleDependencyRecord(ctx, obj, dh.RemoteRegistry) - } func (dh *DependencyHandler) Updated(ctx context.Context, obj *v1.Dependency) { - log.Infof(LogFormat, "Update", "dependency-record", obj.Name, "", "Received=true namespace="+obj.Namespace) - // need clean up before handle it as added, I need to handle update that delete the dependency, find diff first // this is more complex cos want to make sure no other service depend on the same service (which we just removed the dependancy). // need to make sure nothing depend on that before cleaning up the SE for that service HandleDependencyRecord(ctx, obj, dh.RemoteRegistry) - } func HandleDependencyRecord(ctx context.Context, obj *v1.Dependency, remoteRegitry *RemoteRegistry) { sourceIdentity := obj.Spec.Source - if len(sourceIdentity) == 0 { log.Infof(LogFormat, "Event", "dependency-record", obj.Name, "", "No identity found namespace="+obj.Namespace) } - updateIdentityDependencyCache(sourceIdentity, remoteRegitry.AdmiralCache.IdentityDependencyCache, obj) } @@ -547,10 +548,8 @@ func (rh *RolloutHandler) Deleted(ctx context.Context, obj *argo.Rollout) { // HandleEventForRollout helper function to handle add and delete for RolloutHandler func HandleEventForRollout(ctx context.Context, event admiral.EventType, obj *argo.Rollout, remoteRegistry *RemoteRegistry, clusterName string) { - log.Infof(LogFormat, event, "rollout", obj.Name, clusterName, "Received") globalIdentifier := common.GetRolloutGlobalIdentifier(obj) - if len(globalIdentifier) == 0 { log.Infof(LogFormat, "Event", "rollout", obj.Name, clusterName, "Skipped as '"+common.GetWorkloadIdentifier()+" was not found', namespace="+obj.Namespace) return @@ -564,16 +563,12 @@ func HandleEventForRollout(ctx context.Context, event admiral.EventType, obj *ar // helper function to handle add and delete for DeploymentHandler func HandleEventForDeployment(ctx context.Context, event admiral.EventType, obj *k8sAppsV1.Deployment, remoteRegistry *RemoteRegistry, clusterName string) { - globalIdentifier := common.GetDeploymentGlobalIdentifier(obj) - if len(globalIdentifier) == 0 { log.Infof(LogFormat, "Event", "deployment", obj.Name, clusterName, "Skipped as '"+common.GetWorkloadIdentifier()+" was not found', namespace="+obj.Namespace) return } - env := common.GetEnv(obj) - // Use the same function as added deployment function to update and put new service entry in place to replace old one modifyServiceEntryForNewServiceOrPod(ctx, event, env, globalIdentifier, remoteRegistry) } @@ -581,15 +576,11 @@ func HandleEventForDeployment(ctx context.Context, event admiral.EventType, obj // HandleEventForGlobalTrafficPolicy processes all the events related to GTPs func HandleEventForGlobalTrafficPolicy(ctx context.Context, event admiral.EventType, gtp *v1.GlobalTrafficPolicy, remoteRegistry *RemoteRegistry, clusterName string) error { - globalIdentifier := common.GetGtpIdentity(gtp) - if len(globalIdentifier) == 0 { return fmt.Errorf(LogFormat, "Event", "globaltrafficpolicy", gtp.Name, clusterName, "Skipped as '"+common.GetWorkloadIdentifier()+" was not found', namespace="+gtp.Namespace) } - env := common.GetGtpEnv(gtp) - // For now we're going to force all the events to update only in order to prevent // the endpoints from being deleted. // TODO: Need to come up with a way to prevent deleting default endpoints so that this hack can be removed. diff --git a/admiral/pkg/clusters/types_test.go b/admiral/pkg/clusters/types_test.go index 858567163..128c4a684 100644 --- a/admiral/pkg/clusters/types_test.go +++ b/admiral/pkg/clusters/types_test.go @@ -24,35 +24,36 @@ import ( ) var ignoreUnexported = cmpopts.IgnoreUnexported(v1.GlobalTrafficPolicy{}.Status) - -func init() { - p := common.AdmiralParams{ - KubeconfigPath: "testdata/fake.config", - LabelSet: &common.LabelSet{}, - EnableSAN: true, - SANPrefix: "prefix", - HostnameSuffix: "mesh", - SyncNamespace: "ns", - CacheRefreshDuration: time.Minute, - ClusterRegistriesNamespace: "default", - DependenciesNamespace: "default", - SecretResolver: "", - EnableRoutingPolicy: true, - EnvoyFilterVersion: "1.13", - } - - p.LabelSet.WorkloadIdentityKey = "identity" - p.LabelSet.EnvKey = "admiral.io/env" - p.LabelSet.GlobalTrafficDeploymentLabel = "identity" - p.LabelSet.PriorityKey = "priority" - - common.InitializeConfig(p) +var typeTestSingleton sync.Once + +func setupForTypeTests() { + typeTestSingleton.Do(func() { + common.ResetSync() + p := common.AdmiralParams{ + KubeconfigPath: "testdata/fake.config", + LabelSet: &common.LabelSet{}, + EnableSAN: true, + SANPrefix: "prefix", + HostnameSuffix: "mesh", + SyncNamespace: "ns", + CacheRefreshDuration: time.Minute, + ClusterRegistriesNamespace: "default", + DependenciesNamespace: "default", + SecretResolver: "", + EnableRoutingPolicy: true, + EnvoyFilterVersion: "1.13", + } + p.LabelSet.WorkloadIdentityKey = "identity" + p.LabelSet.EnvKey = "admiral.io/env" + p.LabelSet.GlobalTrafficDeploymentLabel = "identity" + p.LabelSet.PriorityKey = "priority" + common.InitializeConfig(p) + }) } func TestDeploymentHandler(t *testing.T) { - + setupForTypeTests() ctx := context.Background() - p := common.AdmiralParams{ KubeconfigPath: "testdata/fake.config", } @@ -132,9 +133,8 @@ func TestDeploymentHandler(t *testing.T) { } func TestRolloutHandler(t *testing.T) { - + setupForTypeTests() ctx := context.Background() - p := common.AdmiralParams{ KubeconfigPath: "testdata/fake.config", } @@ -219,6 +219,7 @@ func TestRolloutHandler(t *testing.T) { } func TestHandleEventForGlobalTrafficPolicy(t *testing.T) { + setupForTypeTests() ctx := context.Background() event := admiral.EventType("Add") p := common.AdmiralParams{ @@ -274,6 +275,7 @@ func TestHandleEventForGlobalTrafficPolicy(t *testing.T) { } func TestRoutingPolicyHandler(t *testing.T) { + common.ResetSync() p := common.AdmiralParams{ KubeconfigPath: "testdata/fake.config", LabelSet: &common.LabelSet{}, diff --git a/admiral/pkg/clusters/util.go b/admiral/pkg/clusters/util.go index 0db890de7..5c509ac46 100644 --- a/admiral/pkg/clusters/util.go +++ b/admiral/pkg/clusters/util.go @@ -2,15 +2,16 @@ package clusters import ( "errors" + "strconv" + "strings" + "time" + argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" log "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" k8sAppsV1 "k8s.io/api/apps/v1" k8sV1 "k8s.io/api/core/v1" - "strconv" - "strings" - "time" ) func GetMeshPorts(clusterName string, destService *k8sV1.Service, @@ -30,13 +31,13 @@ func GetMeshPortsForRollout(clusterName string, destService *k8sV1.Service, // Get the service selector to add as workload selector for envoyFilter func GetServiceSelector(clusterName string, destService *k8sV1.Service) *common.Map { var selectors = destService.Spec.Selector - if len(selectors) == 0{ + if len(selectors) == 0 { log.Infof(LogFormat, "GetServiceLabels", "no selectors present", destService.Name, clusterName, selectors) return nil } var tempMap = common.NewMap() for key, value := range selectors { - tempMap.Put(key,value) + tempMap.Put(key, value) } log.Infof(LogFormat, "GetServiceLabels", "selectors present", destService.Name, clusterName, selectors) return tempMap @@ -93,7 +94,7 @@ func getMeshPortsHelper(meshPorts string, destService *k8sV1.Service, clusterNam } if _, ok := meshPortMap[targetPort]; ok { var protocol = GetPortProtocol(servicePort.Name) - log.Debugf(LogFormat, "GetMeshPorts", servicePort.Port, destService.Name, clusterName, "Adding mesh port for protocol: " + protocol) + log.Debugf(LogFormat, "GetMeshPorts", servicePort.Port, destService.Name, clusterName, "Adding mesh port for protocol: "+protocol) ports[protocol] = uint32(servicePort.Port) break } diff --git a/admiral/pkg/clusters/util_test.go b/admiral/pkg/clusters/util_test.go index d418177f4..e5d8dd01e 100644 --- a/admiral/pkg/clusters/util_test.go +++ b/admiral/pkg/clusters/util_test.go @@ -2,56 +2,49 @@ package clusters import ( "errors" + "reflect" + "strconv" + "testing" + argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" "github.com/istio-ecosystem/admiral/admiral/pkg/controller/common" k8sAppsV1 "k8s.io/api/apps/v1" coreV1 "k8s.io/api/core/v1" k8sV1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "reflect" - "strconv" - "testing" ) func TestGetMeshPorts(t *testing.T) { - - annotatedPort := 8090 - annotatedSecondPort := 8091 - defaultServicePort := uint32(8080) - - defaultK8sSvcPortNoName := k8sV1.ServicePort{Port: int32(defaultServicePort)} - defaultK8sSvcPort := k8sV1.ServicePort{Name: "default", Port: int32(defaultServicePort)} - meshK8sSvcPort := k8sV1.ServicePort{Name: "mesh", Port: int32(annotatedPort)} - - serviceMeshPorts := []k8sV1.ServicePort{defaultK8sSvcPort, meshK8sSvcPort} - - serviceMeshPortsOnlyDefault := []k8sV1.ServicePort{defaultK8sSvcPortNoName} - - service := k8sV1.Service{ - ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Ports: serviceMeshPorts}, - } - deployment := k8sAppsV1.Deployment{ - Spec: k8sAppsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: strconv.Itoa(annotatedPort)}}, - }}} - - deploymentWithMultipleMeshPorts := k8sAppsV1.Deployment{ - Spec: k8sAppsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{ - ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: strconv.Itoa(annotatedPort) + "," + strconv.Itoa(annotatedSecondPort)}}, - }}} - - ports := map[string]uint32{"http": uint32(annotatedPort)} - portsDiffTargetPort := map[string]uint32{"http": uint32(80)} - - grpcPorts := map[string]uint32{"grpc": uint32(annotatedPort)} - grpcWebPorts := map[string]uint32{"grpc-web": uint32(annotatedPort)} - http2Ports := map[string]uint32{"http2": uint32(annotatedPort)} - - portsFromDefaultSvcPort := map[string]uint32{"http": defaultServicePort} - - emptyPorts := map[string]uint32{} + var ( + annotatedPort = 8090 + annotatedSecondPort = 8091 + defaultServicePort = uint32(8080) + ports = map[string]uint32{"http": uint32(annotatedPort)} + portsDiffTargetPort = map[string]uint32{"http": uint32(80)} + grpcPorts = map[string]uint32{"grpc": uint32(annotatedPort)} + grpcWebPorts = map[string]uint32{"grpc-web": uint32(annotatedPort)} + http2Ports = map[string]uint32{"http2": uint32(annotatedPort)} + portsFromDefaultSvcPort = map[string]uint32{"http": defaultServicePort} + emptyPorts = map[string]uint32{} + defaultK8sSvcPortNoName = k8sV1.ServicePort{Port: int32(defaultServicePort)} + defaultK8sSvcPort = k8sV1.ServicePort{Name: "default", Port: int32(defaultServicePort)} + meshK8sSvcPort = k8sV1.ServicePort{Name: "mesh", Port: int32(annotatedPort)} + serviceMeshPorts = []k8sV1.ServicePort{defaultK8sSvcPort, meshK8sSvcPort} + serviceMeshPortsOnlyDefault = []k8sV1.ServicePort{defaultK8sSvcPortNoName} + service = k8sV1.Service{ + ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, + Spec: k8sV1.ServiceSpec{Ports: serviceMeshPorts}, + } + deployment = k8sAppsV1.Deployment{ + Spec: k8sAppsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: strconv.Itoa(annotatedPort)}}, + }}} + deploymentWithMultipleMeshPorts = k8sAppsV1.Deployment{ + Spec: k8sAppsV1.DeploymentSpec{Template: coreV1.PodTemplateSpec{ + ObjectMeta: v1.ObjectMeta{Annotations: map[string]string{common.SidecarEnabledPorts: strconv.Itoa(annotatedPort) + "," + strconv.Itoa(annotatedSecondPort)}}, + }}} + ) testCases := []struct { name string @@ -67,17 +60,17 @@ func TestGetMeshPorts(t *testing.T) { expected: ports, }, { - name: "should return a http port if no port name is specified", - service: k8sV1.Service{ + name: "should return a http port if no port name is specified", + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Port: int32(80), TargetPort: intstr.FromInt(annotatedPort),}}}, + Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Port: int32(80), TargetPort: intstr.FromInt(annotatedPort)}}}, }, deployment: deployment, expected: portsDiffTargetPort, }, { - name: "should return a http port if the port name doesn't start with a protocol name", - service: k8sV1.Service{ + name: "should return a http port if the port name doesn't start with a protocol name", + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "hello-grpc", Port: int32(annotatedPort)}}}, }, @@ -85,8 +78,8 @@ func TestGetMeshPorts(t *testing.T) { expected: ports, }, { - name: "should return a grpc port based on annotation", - service: k8sV1.Service{ + name: "should return a grpc port based on annotation", + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "grpc-service", Port: int32(annotatedPort)}}}, }, @@ -94,8 +87,8 @@ func TestGetMeshPorts(t *testing.T) { expected: grpcPorts, }, { - name: "should return a grpc-web port based on annotation", - service: k8sV1.Service{ + name: "should return a grpc-web port based on annotation", + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "grpc-web", Port: int32(annotatedPort)}}}, }, @@ -103,8 +96,8 @@ func TestGetMeshPorts(t *testing.T) { expected: grpcWebPorts, }, { - name: "should return a http2 port based on annotation", - service: k8sV1.Service{ + name: "should return a http2 port based on annotation", + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "http2", Port: int32(annotatedPort)}}}, }, @@ -136,10 +129,10 @@ func TestGetMeshPorts(t *testing.T) { expected: emptyPorts, }, { - name: "should return a http port if the port name doesn't start with a protocol name", - service: k8sV1.Service{ + name: "should return a http port if the port name doesn't start with a protocol name", + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "http", Port: int32(annotatedPort)}, + Spec: k8sV1.ServiceSpec{Ports: []k8sV1.ServicePort{{Name: "http", Port: int32(annotatedPort)}, {Name: "grpc", Port: int32(annotatedSecondPort)}}}, }, deployment: deploymentWithMultipleMeshPorts, @@ -216,51 +209,51 @@ func TestValidateConfigmapBeforePutting(t *testing.T) { func TestGetServiceSelector(t *testing.T) { - selector := map[string]string {"app":"test1"} + selector := map[string]string{"app": "test1"} testCases := []struct { name string clusterName string service k8sV1.Service expected map[string]string - }{ + }{ { name: "should return a selectors based on service", clusterName: "test-cluster", - service: k8sV1.Service{ + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, - Spec: k8sV1.ServiceSpec{Selector: selector}, + Spec: k8sV1.ServiceSpec{Selector: selector}, }, - expected: selector, + expected: selector, }, { name: "should return empty selectors", clusterName: "test-cluster", - service: k8sV1.Service{ + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, Spec: k8sV1.ServiceSpec{Selector: map[string]string{}}, }, - expected: nil, + expected: nil, }, { name: "should return nil", clusterName: "test-cluster", - service: k8sV1.Service{ + service: k8sV1.Service{ ObjectMeta: v1.ObjectMeta{Name: "server", Labels: map[string]string{"asset": "Intuit.platform.mesh.server"}}, Spec: k8sV1.ServiceSpec{Selector: nil}, }, - expected: nil, + expected: nil, }, } for _, c := range testCases { t.Run(c.name, func(t *testing.T) { - selectors := GetServiceSelector(c.clusterName,&c.service) + selectors := GetServiceSelector(c.clusterName, &c.service) if selectors == nil { if c.expected != nil { t.Errorf("Wanted selectors: %v, got: %v", c.expected, selectors) } - }else if !reflect.DeepEqual(selectors.Copy(), c.expected) { + } else if !reflect.DeepEqual(selectors.Copy(), c.expected) { t.Errorf("Wanted selectors: %v, got: %v", c.expected, selectors) } }) diff --git a/admiral/pkg/controller/common/config.go b/admiral/pkg/controller/common/config.go index d4afaaa32..0d26534a1 100644 --- a/admiral/pkg/controller/common/config.go +++ b/admiral/pkg/controller/common/config.go @@ -1,16 +1,22 @@ package common import ( - log "github.com/sirupsen/logrus" - "sync" "time" + + "github.com/matryer/resync" + + log "github.com/sirupsen/logrus" ) var admiralParams = AdmiralParams{ LabelSet: &LabelSet{}, } -var once sync.Once +var once resync.Once + +func ResetSync() { + once.Reset() +} func InitializeConfig(params AdmiralParams) { var initHappened = false @@ -19,7 +25,9 @@ func InitializeConfig(params AdmiralParams) { initHappened = true InitializeMetrics() }) - if !initHappened { + if initHappened { + log.Info("InitializeConfig was called.") + } else { log.Warn("InitializeConfig was called but didn't take effect. It can only be called once, and thus has already been initialized. Please ensure you aren't re-initializing the config.") } } diff --git a/admiral/pkg/controller/common/rolloutcommon.go b/admiral/pkg/controller/common/rolloutcommon.go index 5500d03db..474ae8dcf 100644 --- a/admiral/pkg/controller/common/rolloutcommon.go +++ b/admiral/pkg/controller/common/rolloutcommon.go @@ -1,11 +1,12 @@ package common import ( - argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" - "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1" - log "github.com/sirupsen/logrus" "sort" "strings" + + argo "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" + v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1" + log "github.com/sirupsen/logrus" ) // GetCname returns cname in the format ..global, Ex: stage.Admiral.services.registry.global diff --git a/admiral/pkg/controller/common/types.go b/admiral/pkg/controller/common/types.go index f1e0e9e08..866c99a82 100644 --- a/admiral/pkg/controller/common/types.go +++ b/admiral/pkg/controller/common/types.go @@ -48,9 +48,10 @@ type AdmiralParams struct { AdmiralStateCheckerName string DRStateStoreConfigPath string ServiceEntryIPPrefix string - EnvoyFilterVersion string - EnvoyFilterAdditionalConfig string + EnvoyFilterVersion string + EnvoyFilterAdditionalConfig string EnableRoutingPolicy bool + ExcludeAssetList []string } func (b AdmiralParams) String() string { diff --git a/go.mod b/go.mod index 0543e668e..19b16f629 100644 --- a/go.mod +++ b/go.mod @@ -53,6 +53,7 @@ require ( github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/matryer/resync v0.0.0-20161211202428-d39c09a11215 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect diff --git a/go.sum b/go.sum index 581873f2a..44b63ae63 100644 --- a/go.sum +++ b/go.sum @@ -235,6 +235,8 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/matryer/resync v0.0.0-20161211202428-d39c09a11215 h1:hDa3vAq/Zo5gjfJ46XMsGFbH+hTizpR4fUzQCk2nxgk= +github.com/matryer/resync v0.0.0-20161211202428-d39c09a11215/go.mod h1:LH+NgPY9AJpDfqAFtzyer01N9MYNsAKUf3DC9DV1xIY= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=