Skip to content

Commit

Permalink
abstract update preview endpoint to method
Browse files Browse the repository at this point in the history
Signed-off-by: nbn01 <[email protected]>
  • Loading branch information
nbn01 committed Mar 31, 2022
1 parent 50c0ce5 commit 42f513c
Show file tree
Hide file tree
Showing 2 changed files with 41 additions and 36 deletions.
36 changes: 17 additions & 19 deletions admiral/pkg/clusters/registry_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,9 @@ package clusters

import (
"context"
"strings"
"testing"
"time"

"github.com/google/go-cmp/cmp"
depModel "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/model"
v1 "github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
"github.com/istio-ecosystem/admiral/admiral/pkg/apis/admiral/v1"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/admiral"
"github.com/istio-ecosystem/admiral/admiral/pkg/controller/common"
"github.com/istio-ecosystem/admiral/admiral/pkg/test"
Expand All @@ -21,6 +17,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"strings"
"testing"
"time"
)

func init() {
Expand All @@ -37,7 +36,6 @@ func init() {
SecretResolver: "",
WorkloadSidecarUpdate: "enabled",
WorkloadSidecarName: "default",
PreviewHostnamePrefix: "preview",
}

p.LabelSet.WorkloadIdentityKey = "identity"
Expand Down Expand Up @@ -451,24 +449,24 @@ func TestUpdateCacheController(t *testing.T) {

//Struct of test case info. Name is required.
testCases := []struct {
name string
oldConfig *rest.Config
newConfig *rest.Config
clusterId string
name string
oldConfig *rest.Config
newConfig *rest.Config
clusterId string
shouldRefresh bool
}{
{
name: "Should update controller when kubeconfig changes",
oldConfig: originalConfig,
newConfig: changedConfig,
clusterId: "test.cluster",
name: "Should update controller when kubeconfig changes",
oldConfig: originalConfig,
newConfig: changedConfig,
clusterId: "test.cluster",
shouldRefresh: true,
},
{
name: "Should not update controller when kubeconfig doesn't change",
oldConfig: originalConfig,
newConfig: originalConfig,
clusterId: "test.cluster",
name: "Should not update controller when kubeconfig doesn't change",
oldConfig: originalConfig,
newConfig: originalConfig,
clusterId: "test.cluster",
shouldRefresh: false,
},
}
Expand All @@ -478,7 +476,7 @@ func TestUpdateCacheController(t *testing.T) {
t.Run(c.name, func(t *testing.T) {
hook := logTest.NewGlobal()
rr.RemoteControllers[c.clusterId].ApiServer = c.oldConfig.Host
d, err := admiral.NewDeploymentController(make(chan struct{}), &test.MockDeploymentHandler{}, c.oldConfig, time.Second*time.Duration(300))
d, err := admiral.NewDeploymentController(make(chan struct{}), &test.MockDeploymentHandler{}, c.oldConfig, time.Second*time.Duration(300))
if err != nil {
t.Fatalf("Unexpected error creating controller %v", err)
}
Expand Down
41 changes: 24 additions & 17 deletions admiral/pkg/clusters/serviceentry.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,9 +142,8 @@ func modifyServiceEntryForNewServiceOrPod(event admiral.EventType, env string, s

if len(sourceDeployments) > 0 {
meshPorts = GetMeshPorts(sourceCluster, serviceInstance, sourceDeployments[sourceCluster])
} else if !isBlueGreenStrategy {
meshPorts = GetMeshPortsForRollout(sourceCluster, serviceInstance, sourceRollouts[sourceCluster])
}

for key, serviceEntry := range serviceEntries {
if len(serviceEntry.Endpoints) == 0 {
AddServiceEntriesWithDr(remoteRegistry.AdmiralCache, map[string]string{sourceCluster: sourceCluster}, remoteRegistry.RemoteControllers,
Expand All @@ -156,22 +155,9 @@ func modifyServiceEntryForNewServiceOrPod(event admiral.EventType, env string, s
if ep.Address == clusterIngress || ep.Address == "" {
// Update endpoints with locafqdn for active and preview se of bluegreen rollout
if isBlueGreenStrategy {
activeServiceName := sourceRollouts[sourceCluster].Spec.Strategy.BlueGreen.ActiveService
previewServiceName := sourceRollouts[sourceCluster].Spec.Strategy.BlueGreen.PreviewService
oldPorts := ep.Ports
if previewService, ok := weightedServices[previewServiceName]; strings.HasPrefix(key, common.BlueGreenRolloutPreviewPrefix+common.Sep) && ok {
previewServiceInstance := previewService.Service
localFqdn := previewServiceInstance.Name + common.Sep + previewServiceInstance.Namespace + common.DotLocalDomainSuffix
cnames[localFqdn] = "1"
ep.Address = localFqdn
ep.Ports = GetMeshPortsForRollout(sourceCluster, previewServiceInstance, sourceRollouts[sourceCluster])
} else if activeService, ok := weightedServices[activeServiceName]; ok {
activeServiceInstance := activeService.Service
localFqdn := activeServiceInstance.Name + common.Sep + activeServiceInstance.Namespace + common.DotLocalDomainSuffix
cnames[localFqdn] = "1"
ep.Address = localFqdn
ep.Ports = GetMeshPortsForRollout(sourceCluster, activeServiceInstance, sourceRollouts[sourceCluster])
}
updateEndpointsForBlueGreen(sourceRollouts[sourceCluster], weightedServices, cnames, ep, sourceCluster, key)

AddServiceEntriesWithDr(remoteRegistry.AdmiralCache, map[string]string{sourceCluster: sourceCluster}, remoteRegistry.RemoteControllers,
map[string]*networking.ServiceEntry{key: serviceEntry})
//swap it back to use for next iteration
Expand All @@ -180,6 +166,7 @@ func modifyServiceEntryForNewServiceOrPod(event admiral.EventType, env string, s
// see if we have weighted services (rollouts with canary strategy)
} else if len(weightedServices) > 1 {
//add one endpoint per each service, may be modify
meshPorts = GetMeshPortsForRollout(sourceCluster, serviceInstance, sourceRollouts[sourceCluster])
var se = copyServiceEntry(serviceEntry)
updateEndpointsForWeightedServices(se, weightedServices, clusterIngress, meshPorts)
AddServiceEntriesWithDr(remoteRegistry.AdmiralCache, map[string]string{sourceCluster: sourceCluster}, remoteRegistry.RemoteControllers,
Expand Down Expand Up @@ -209,6 +196,26 @@ func modifyServiceEntryForNewServiceOrPod(event admiral.EventType, env string, s
return serviceEntries
}

func updateEndpointsForBlueGreen(rollout *argo.Rollout, weightedServices map[string]*WeightedService, cnames map[string]string,
ep *networking.ServiceEntry_Endpoint, sourceCluster string, meshHost string) {
activeServiceName := rollout.Spec.Strategy.BlueGreen.ActiveService
previewServiceName := rollout.Spec.Strategy.BlueGreen.PreviewService

if previewService, ok := weightedServices[previewServiceName]; strings.HasPrefix(meshHost, common.BlueGreenRolloutPreviewPrefix+common.Sep) && ok {
previewServiceInstance := previewService.Service
localFqdn := previewServiceInstance.Name + common.Sep + previewServiceInstance.Namespace + common.DotLocalDomainSuffix
cnames[localFqdn] = "1"
ep.Address = localFqdn
ep.Ports = GetMeshPortsForRollout(sourceCluster, previewServiceInstance, rollout)
} else if activeService, ok := weightedServices[activeServiceName]; ok {
activeServiceInstance := activeService.Service
localFqdn := activeServiceInstance.Name + common.Sep + activeServiceInstance.Namespace + common.DotLocalDomainSuffix
cnames[localFqdn] = "1"
ep.Address = localFqdn
ep.Ports = GetMeshPortsForRollout(sourceCluster, activeServiceInstance, rollout)
}
}

//update endpoints for Argo rollouts specific Service Entries to account for traffic splitting (Canary strategy)
func updateEndpointsForWeightedServices(serviceEntry *networking.ServiceEntry, weightedServices map[string]*WeightedService, clusterIngress string, meshPorts map[string]uint32) {
var endpoints = make([]*networking.ServiceEntry_Endpoint, 0)
Expand Down

0 comments on commit 42f513c

Please sign in to comment.