From f0fc34184f4e8ab40c5d7c51e17c6505ff002e93 Mon Sep 17 00:00:00 2001 From: Satish Matti Date: Fri, 20 Sep 2019 10:47:10 -0700 Subject: [PATCH] Cleanup backend namer workflow --- pkg/backends/backends.go | 6 +- pkg/backends/ig_linker.go | 10 +- pkg/backends/ig_linker_test.go | 16 +-- pkg/backends/integration_test.go | 12 +- pkg/backends/neg_linker.go | 10 +- pkg/backends/neg_linker_test.go | 15 +-- pkg/backends/syncer.go | 12 +- pkg/backends/syncer_test.go | 111 +++++++++--------- pkg/controller/controller.go | 12 +- pkg/controller/controller_test.go | 16 ++- pkg/controller/translator/translator.go | 12 +- pkg/controller/translator/translator_test.go | 10 +- pkg/firewalls/controller.go | 2 +- pkg/healthchecks/healthchecks.go | 8 +- pkg/healthchecks/healthchecks_test.go | 26 ++--- pkg/instances/instances.go | 4 +- pkg/loadbalancers/loadbalancers_test.go | 116 +++++++++---------- pkg/loadbalancers/url_maps.go | 4 +- pkg/loadbalancers/url_maps_test.go | 12 +- pkg/utils/namer/interfaces.go | 31 +++++ pkg/utils/serviceport.go | 13 ++- 21 files changed, 241 insertions(+), 217 deletions(-) create mode 100644 pkg/utils/namer/interfaces.go diff --git a/pkg/backends/backends.go b/pkg/backends/backends.go index c8420458f6..5930c9fafc 100644 --- a/pkg/backends/backends.go +++ b/pkg/backends/backends.go @@ -30,7 +30,7 @@ import ( // Backends handles CRUD operations for backends. type Backends struct { cloud *gce.Cloud - namer *namer.Namer + namer namer.BackendNamer } // Backends is a Pool. @@ -39,7 +39,7 @@ var _ Pool = (*Backends)(nil) // NewPool returns a new backend pool. // - cloud: implements BackendServices // - namer: produces names for backends. -func NewPool(cloud *gce.Cloud, namer *namer.Namer) *Backends { +func NewPool(cloud *gce.Cloud, namer namer.BackendNamer) *Backends { return &Backends{ cloud: cloud, namer: namer, @@ -60,7 +60,7 @@ func ensureDescription(be *composite.BackendService, sp *utils.ServicePort) (nee // Create implements Pool. func (b *Backends) Create(sp utils.ServicePort, hcLink string) (*composite.BackendService, error) { - name := sp.BackendName(b.namer) + name := sp.BackendName() namedPort := &compute.NamedPort{ Name: b.namer.NamedPort(sp.NodePort), Port: sp.NodePort, diff --git a/pkg/backends/ig_linker.go b/pkg/backends/ig_linker.go index 0288a9e5a5..cee726e9b8 100644 --- a/pkg/backends/ig_linker.go +++ b/pkg/backends/ig_linker.go @@ -23,7 +23,6 @@ import ( "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/instances" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/ingress-gce/pkg/utils/namer" "k8s.io/klog" ) @@ -66,7 +65,6 @@ const maxRPS = 1 type instanceGroupLinker struct { instancePool instances.NodePool backendPool Pool - namer *namer.Namer } // instanceGroupLinker is a Linker @@ -74,12 +72,10 @@ var _ Linker = (*instanceGroupLinker)(nil) func NewInstanceGroupLinker( instancePool instances.NodePool, - backendPool Pool, - namer *namer.Namer) Linker { + backendPool Pool) Linker { return &instanceGroupLinker{ instancePool: instancePool, backendPool: backendPool, - namer: namer, } } @@ -87,7 +83,7 @@ func NewInstanceGroupLinker( func (l *instanceGroupLinker) Link(sp utils.ServicePort, groups []GroupKey) error { var igLinks []string for _, group := range groups { - ig, err := l.instancePool.Get(l.namer.InstanceGroup(), group.Zone) + ig, err := l.instancePool.Get(sp.IGName(), group.Zone) if err != nil { return fmt.Errorf("error retrieving IG for linking with backend %+v: %v", sp, err) } @@ -97,7 +93,7 @@ func (l *instanceGroupLinker) Link(sp utils.ServicePort, groups []GroupKey) erro // ig_linker only supports L7 HTTP(s) External Load Balancer // Hardcoded here since IGs are not supported for non GA-Global right now // TODO(shance): find a way to remove hardcoded values - be, err := l.backendPool.Get(sp.BackendName(l.namer), meta.VersionGA, meta.Global) + be, err := l.backendPool.Get(sp.BackendName(), meta.VersionGA, meta.Global) if err != nil { return err } diff --git a/pkg/backends/ig_linker_test.go b/pkg/backends/ig_linker_test.go index d5efb1a914..969a6ed930 100644 --- a/pkg/backends/ig_linker_test.go +++ b/pkg/backends/ig_linker_test.go @@ -15,17 +15,17 @@ package backends import ( "context" - "k8s.io/ingress-gce/pkg/backends/features" "net/http" "testing" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta" "github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/mock" - compute "google.golang.org/api/compute/v1" + "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/ingress-gce/pkg/annotations" + "k8s.io/ingress-gce/pkg/backends/features" "k8s.io/ingress-gce/pkg/instances" "k8s.io/ingress-gce/pkg/utils" "k8s.io/legacy-cloud-providers/gce" @@ -42,7 +42,7 @@ func newTestIGLinker(fakeGCE *gce.Cloud, fakeInstancePool instances.NodePool) *i (fakeGCE.Compute().(*cloud.MockGCE)).MockBetaBackendServices.UpdateHook = mock.UpdateBetaBackendServiceHook (fakeGCE.Compute().(*cloud.MockGCE)).MockBackendServices.UpdateHook = mock.UpdateBackendServiceHook - return &instanceGroupLinker{fakeInstancePool, fakeBackendPool, defaultNamer} + return &instanceGroupLinker{fakeInstancePool, fakeBackendPool} } func TestLink(t *testing.T) { @@ -51,7 +51,7 @@ func TestLink(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) linker := newTestIGLinker(fakeGCE, fakeNodePool) - sp := utils.ServicePort{NodePort: 8080, Protocol: annotations.ProtocolHTTP} + sp := utils.ServicePort{NodePort: 8080, Protocol: annotations.ProtocolHTTP, BackendNamer: defaultNamer} // Mimic the instance group being created if _, err := linker.instancePool.EnsureInstanceGroupsAndPorts(defaultNamer.InstanceGroup(), []int64{sp.NodePort}); err != nil { @@ -65,7 +65,7 @@ func TestLink(t *testing.T) { t.Fatalf("%v", err) } - be, err := fakeGCE.GetGlobalBackendService(sp.BackendName(defaultNamer)) + be, err := fakeGCE.GetGlobalBackendService(sp.BackendName()) if err != nil { t.Fatalf("%v", err) } @@ -81,7 +81,7 @@ func TestLinkWithCreationModeError(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) linker := newTestIGLinker(fakeGCE, fakeNodePool) - sp := utils.ServicePort{NodePort: 8080, Protocol: annotations.ProtocolHTTP} + sp := utils.ServicePort{NodePort: 8080, Protocol: annotations.ProtocolHTTP, BackendNamer: defaultNamer} modes := []BalancingMode{Rate, Utilization} // block the update of Backends with the given balancingMode @@ -109,7 +109,7 @@ func TestLinkWithCreationModeError(t *testing.T) { t.Fatalf("%v", err) } - be, err := fakeGCE.GetGlobalBackendService(sp.BackendName(defaultNamer)) + be, err := fakeGCE.GetGlobalBackendService(sp.BackendName()) if err != nil { t.Fatalf("%v", err) } @@ -123,6 +123,6 @@ func TestLinkWithCreationModeError(t *testing.T) { t.Fatalf("Wrong balancing mode, expected %v got %v", modes[(i+1)%len(modes)], b.BalancingMode) } } - linker.backendPool.Delete(sp.BackendName(defaultNamer), features.VersionFromServicePort(&sp), features.ScopeFromServicePort(&sp)) + linker.backendPool.Delete(sp.BackendName(), features.VersionFromServicePort(&sp), features.ScopeFromServicePort(&sp)) } } diff --git a/pkg/backends/integration_test.go b/pkg/backends/integration_test.go index e01c734ef7..ca3e77a754 100644 --- a/pkg/backends/integration_test.go +++ b/pkg/backends/integration_test.go @@ -41,7 +41,7 @@ type Jig struct { } func newTestJig(fakeGCE *gce.Cloud) *Jig { - fakeHealthChecks := healthchecks.NewHealthChecker(fakeGCE, "/", "/healthz", defaultNamer, defaultBackendSvc) + fakeHealthChecks := healthchecks.NewHealthChecker(fakeGCE, "/", "/healthz", defaultBackendSvc) fakeBackendPool := NewPool(fakeGCE, defaultNamer) fakeIGs := instances.NewFakeInstanceGroups(sets.NewString(), defaultNamer) @@ -58,8 +58,8 @@ func newTestJig(fakeGCE *gce.Cloud) *Jig { return &Jig{ fakeInstancePool: fakeInstancePool, - linker: NewInstanceGroupLinker(fakeInstancePool, fakeBackendPool, defaultNamer), - syncer: NewBackendSyncer(fakeBackendPool, fakeHealthChecks, defaultNamer, fakeGCE), + linker: NewInstanceGroupLinker(fakeInstancePool, fakeBackendPool), + syncer: NewBackendSyncer(fakeBackendPool, fakeHealthChecks, fakeGCE), pool: fakeBackendPool, } } @@ -68,7 +68,7 @@ func TestBackendInstanceGroupClobbering(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) jig := newTestJig(fakeGCE) - sp := utils.ServicePort{NodePort: 80} + sp := utils.ServicePort{NodePort: 80, BackendNamer: defaultNamer} _, err := jig.fakeInstancePool.EnsureInstanceGroupsAndPorts(defaultNamer.InstanceGroup(), []int64{sp.NodePort}) if err != nil { t.Fatalf("Did not expect error when ensuring IG for ServicePort %+v: %v", sp, err) @@ -140,7 +140,7 @@ func TestSyncChaosMonkey(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) jig := newTestJig(fakeGCE) - sp := utils.ServicePort{NodePort: 8080, Protocol: annotations.ProtocolHTTP} + sp := utils.ServicePort{NodePort: 8080, Protocol: annotations.ProtocolHTTP, BackendNamer: defaultNamer} _, err := jig.fakeInstancePool.EnsureInstanceGroupsAndPorts(defaultNamer.InstanceGroup(), []int64{sp.NodePort}) if err != nil { @@ -154,7 +154,7 @@ func TestSyncChaosMonkey(t *testing.T) { t.Fatalf("Did not expect error when linking backend with port %v to groups, err: %v", sp.NodePort, err) } - beName := sp.BackendName(defaultNamer) + beName := sp.BackendName() be, err := fakeGCE.GetGlobalBackendService(beName) if err != nil { t.Fatalf("%v", err) diff --git a/pkg/backends/neg_linker.go b/pkg/backends/neg_linker.go index 23e392889a..b5218f1e3c 100644 --- a/pkg/backends/neg_linker.go +++ b/pkg/backends/neg_linker.go @@ -19,7 +19,6 @@ import ( befeatures "k8s.io/ingress-gce/pkg/backends/features" "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/ingress-gce/pkg/utils/namer" "k8s.io/legacy-cloud-providers/gce" ) @@ -27,7 +26,6 @@ import ( type negLinker struct { backendPool Pool negGetter NEGGetter - namer *namer.Namer cloud *gce.Cloud } @@ -37,12 +35,10 @@ var _ Linker = (*negLinker)(nil) func NewNEGLinker( backendPool Pool, negGetter NEGGetter, - namer *namer.Namer, cloud *gce.Cloud) Linker { return &negLinker{ backendPool: backendPool, negGetter: negGetter, - namer: namer, cloud: cloud, } } @@ -53,10 +49,10 @@ func (l *negLinker) Link(sp utils.ServicePort, groups []GroupKey) error { var err error for _, group := range groups { // If the group key contains a name, then use that. - // Otherwise, generate the name using the namer. + // Otherwise, get the name from svc port. negName := group.Name if negName == "" { - negName = sp.BackendName(l.namer) + negName = sp.BackendName() } neg, err := l.negGetter.GetNetworkEndpointGroup(negName, group.Zone) if err != nil { @@ -65,7 +61,7 @@ func (l *negLinker) Link(sp utils.ServicePort, groups []GroupKey) error { negs = append(negs, neg) } - beName := sp.BackendName(l.namer) + beName := sp.BackendName() version := befeatures.VersionFromServicePort(&sp) scope := befeatures.ScopeFromServicePort(&sp) diff --git a/pkg/backends/neg_linker_test.go b/pkg/backends/neg_linker_test.go index 98b41524e3..96a822941d 100644 --- a/pkg/backends/neg_linker_test.go +++ b/pkg/backends/neg_linker_test.go @@ -35,7 +35,7 @@ func newTestNEGLinker(fakeNEG negtypes.NetworkEndpointGroupCloud, fakeGCE *gce.C (fakeGCE.Compute().(*cloud.MockGCE)).MockBetaBackendServices.UpdateHook = mock.UpdateBetaBackendServiceHook (fakeGCE.Compute().(*cloud.MockGCE)).MockBackendServices.UpdateHook = mock.UpdateBackendServiceHook - return &negLinker{fakeBackendPool, fakeNEG, defaultNamer, fakeGCE} + return &negLinker{fakeBackendPool, fakeNEG, fakeGCE} } func TestLinkBackendServiceToNEG(t *testing.T) { @@ -53,11 +53,12 @@ func TestLinkBackendServiceToNEG(t *testing.T) { Name: name, }, }, - Port: 80, - NodePort: 30001, - Protocol: annotations.ProtocolHTTP, - TargetPort: port, - NEGEnabled: true, + Port: 80, + NodePort: 30001, + Protocol: annotations.ProtocolHTTP, + TargetPort: port, + NEGEnabled: true, + BackendNamer: defaultNamer, } // Mimic how the syncer would create the backend. @@ -76,7 +77,7 @@ func TestLinkBackendServiceToNEG(t *testing.T) { t.Fatalf("Failed to link backend service to NEG: %v", err) } - beName := svcPort.BackendName(defaultNamer) + beName := svcPort.BackendName() bs, err := fakeGCE.GetGlobalBackendService(beName) if err != nil { t.Fatalf("Failed to retrieve backend service: %v", err) diff --git a/pkg/backends/syncer.go b/pkg/backends/syncer.go index 24445c50c8..a7b331bc6e 100644 --- a/pkg/backends/syncer.go +++ b/pkg/backends/syncer.go @@ -27,7 +27,6 @@ import ( "k8s.io/ingress-gce/pkg/healthchecks" lbfeatures "k8s.io/ingress-gce/pkg/loadbalancers/features" "k8s.io/ingress-gce/pkg/utils" - "k8s.io/ingress-gce/pkg/utils/namer" "k8s.io/klog" "k8s.io/legacy-cloud-providers/gce" ) @@ -37,7 +36,6 @@ type backendSyncer struct { backendPool Pool healthChecker healthchecks.HealthChecker prober ProbeProvider - namer *namer.Namer cloud *gce.Cloud } @@ -47,12 +45,10 @@ var _ Syncer = (*backendSyncer)(nil) func NewBackendSyncer( backendPool Pool, healthChecker healthchecks.HealthChecker, - namer *namer.Namer, cloud *gce.Cloud) Syncer { return &backendSyncer{ backendPool: backendPool, healthChecker: healthChecker, - namer: namer, cloud: cloud, } } @@ -78,7 +74,7 @@ func (s *backendSyncer) ensureBackendService(sp utils.ServicePort) error { // We must track the ports even if creating the backends failed, because // we might've created health-check for them. be := &composite.BackendService{} - beName := sp.BackendName(s.namer) + beName := sp.BackendName() version := features.VersionFromServicePort(&sp) scope := features.ScopeFromServicePort(&sp) @@ -141,7 +137,7 @@ func (s *backendSyncer) ensureBackendService(sp utils.ServicePort) error { // GC implements Syncer. func (s *backendSyncer) GC(svcPorts []utils.ServicePort) error { - knownPorts, err := knownPortsFromServicePorts(s.cloud, s.namer, svcPorts) + knownPorts, err := knownPortsFromServicePorts(s.cloud, svcPorts) if err != nil { return err } @@ -216,11 +212,11 @@ func (s *backendSyncer) gc(backends []*composite.BackendService, knownPorts sets } // TODO: (shance) add unit tests -func knownPortsFromServicePorts(cloud *gce.Cloud, namer *namer.Namer, svcPorts []utils.ServicePort) (sets.String, error) { +func knownPortsFromServicePorts(cloud *gce.Cloud, svcPorts []utils.ServicePort) (sets.String, error) { knownPorts := sets.NewString() for _, sp := range svcPorts { - name := sp.BackendName(namer) + name := sp.BackendName() if key, err := composite.CreateKey(cloud, name, features.ScopeFromServicePort(&sp)); err != nil { return nil, err } else { diff --git a/pkg/backends/syncer_test.go b/pkg/backends/syncer_test.go index 26e2aa3582..77c9ccacad 100644 --- a/pkg/backends/syncer_test.go +++ b/pkg/backends/syncer_test.go @@ -94,7 +94,7 @@ func (p *portset) del(ports []utils.ServicePort) error { func (p *portset) check(fakeGCE *gce.Cloud) error { for sp, _ := range p.all { _, found := p.existing[sp] - beName := sp.BackendName(defaultNamer) + beName := sp.BackendName() key, err := composite.CreateKey(fakeGCE, beName, features.ScopeFromServicePort(&sp)) if err != nil { return fmt.Errorf("Error creating key for backend service %s: %v", beName, err) @@ -131,18 +131,17 @@ var ( ) func newTestSyncer(fakeGCE *gce.Cloud) *backendSyncer { - fakeHealthChecks := healthchecks.NewHealthChecker(fakeGCE, "/", "/healthz", defaultNamer, defaultBackendSvc) + fakeHealthChecks := healthchecks.NewHealthChecker(fakeGCE, "/", "/healthz", defaultBackendSvc) fakeBackendPool := NewPool(fakeGCE, defaultNamer) syncer := &backendSyncer{ backendPool: fakeBackendPool, healthChecker: fakeHealthChecks, - namer: defaultNamer, cloud: fakeGCE, } - probes := map[utils.ServicePort]*api_v1.Probe{{NodePort: 443, Protocol: annotations.ProtocolHTTPS}: existingProbe} + probes := map[utils.ServicePort]*api_v1.Probe{{NodePort: 443, Protocol: annotations.ProtocolHTTPS, BackendNamer: defaultNamer}: existingProbe} syncer.Init(NewFakeProbeProvider(probes)) // Add standard hooks for mocking update calls. Each test can set a different update hook if it chooses to. @@ -162,10 +161,10 @@ func TestSync(t *testing.T) { syncer := newTestSyncer(fakeGCE) testCases := []utils.ServicePort{ - {NodePort: 80, Protocol: annotations.ProtocolHTTP}, + {NodePort: 80, Protocol: annotations.ProtocolHTTP, BackendNamer: defaultNamer}, // Note: 443 gets its healthcheck from a probe - {NodePort: 443, Protocol: annotations.ProtocolHTTPS}, - {NodePort: 3000, Protocol: annotations.ProtocolHTTP2}, + {NodePort: 443, Protocol: annotations.ProtocolHTTPS, BackendNamer: defaultNamer}, + {NodePort: 3000, Protocol: annotations.ProtocolHTTP2, BackendNamer: defaultNamer}, } for _, sp := range testCases { @@ -173,7 +172,7 @@ func TestSync(t *testing.T) { if err := syncer.Sync([]utils.ServicePort{sp}); err != nil { t.Fatalf("Unexpected error when syncing backend with port %v: %v", sp.NodePort, err) } - beName := sp.BackendName(defaultNamer) + beName := sp.BackendName() // Check that the new backend has the right port be, err := syncer.backendPool.Get(beName, features.VersionFromServicePort(&sp), features.ScopeFromServicePort(&sp)) @@ -204,9 +203,9 @@ func TestSyncUpdateHTTPS(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) syncer := newTestSyncer(fakeGCE) - p := utils.ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP} + p := utils.ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP, BackendNamer: defaultNamer} syncer.Sync([]utils.ServicePort{p}) - beName := p.BackendName(defaultNamer) + beName := p.BackendName() be, err := syncer.backendPool.Get(beName, features.VersionFromServicePort(&p), features.ScopeFromServicePort(&p)) if err != nil { @@ -248,9 +247,9 @@ func TestSyncUpdateHTTP2(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) syncer := newTestSyncer(fakeGCE) - p := utils.ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP} + p := utils.ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP, BackendNamer: defaultNamer} syncer.Sync([]utils.ServicePort{p}) - beName := p.BackendName(defaultNamer) + beName := p.BackendName() be, err := syncer.backendPool.Get(beName, features.VersionFromServicePort(&p), features.ScopeFromServicePort(&p)) if err != nil { @@ -294,9 +293,9 @@ func TestGC(t *testing.T) { syncer := newTestSyncer(fakeGCE) svcNodePorts := []utils.ServicePort{ - {NodePort: 81, Protocol: annotations.ProtocolHTTP}, - {NodePort: 82, Protocol: annotations.ProtocolHTTPS}, - {NodePort: 83, Protocol: annotations.ProtocolHTTP}, + {NodePort: 81, Protocol: annotations.ProtocolHTTP, BackendNamer: defaultNamer}, + {NodePort: 82, Protocol: annotations.ProtocolHTTPS, BackendNamer: defaultNamer}, + {NodePort: 83, Protocol: annotations.ProtocolHTTP, BackendNamer: defaultNamer}, } ps := newPortset(svcNodePorts) if err := ps.add(svcNodePorts); err != nil { @@ -340,12 +339,12 @@ func TestGCMixed(t *testing.T) { syncer := newTestSyncer(fakeGCE) svcNodePorts := []utils.ServicePort{ - {NodePort: 81, Protocol: annotations.ProtocolHTTP}, - {NodePort: 82, Protocol: annotations.ProtocolHTTPS}, - {NodePort: 83, Protocol: annotations.ProtocolHTTP}, - {NodePort: 84, Protocol: annotations.ProtocolHTTP, NEGEnabled: true, L7ILBEnabled: true}, - {NodePort: 85, Protocol: annotations.ProtocolHTTPS, NEGEnabled: true, L7ILBEnabled: true}, - {NodePort: 86, Protocol: annotations.ProtocolHTTP, NEGEnabled: true, L7ILBEnabled: true}, + {NodePort: 81, Protocol: annotations.ProtocolHTTP, BackendNamer: defaultNamer}, + {NodePort: 82, Protocol: annotations.ProtocolHTTPS, BackendNamer: defaultNamer}, + {NodePort: 83, Protocol: annotations.ProtocolHTTP, BackendNamer: defaultNamer}, + {NodePort: 84, Protocol: annotations.ProtocolHTTP, NEGEnabled: true, L7ILBEnabled: true, BackendNamer: defaultNamer}, + {NodePort: 85, Protocol: annotations.ProtocolHTTPS, NEGEnabled: true, L7ILBEnabled: true, BackendNamer: defaultNamer}, + {NodePort: 86, Protocol: annotations.ProtocolHTTP, NEGEnabled: true, L7ILBEnabled: true, BackendNamer: defaultNamer}, } ps := newPortset(svcNodePorts) if err := ps.add(svcNodePorts); err != nil { @@ -391,62 +390,62 @@ func TestSyncQuota(t *testing.T) { desc string }{ { - []utils.ServicePort{{NodePort: 8080}}, - []utils.ServicePort{{NodePort: 8080}}, + []utils.ServicePort{{NodePort: 8080, BackendNamer: defaultNamer}}, + []utils.ServicePort{{NodePort: 8080, BackendNamer: defaultNamer}}, false, "Same port", }, { - []utils.ServicePort{{NodePort: 8080}}, - []utils.ServicePort{{NodePort: 9000}}, + []utils.ServicePort{{NodePort: 8080, BackendNamer: defaultNamer}}, + []utils.ServicePort{{NodePort: 9000, BackendNamer: defaultNamer}}, true, "Different port", }, { - []utils.ServicePort{{NodePort: 8080}}, - []utils.ServicePort{{NodePort: 8080}, {NodePort: 443}}, + []utils.ServicePort{{NodePort: 8080, BackendNamer: defaultNamer}}, + []utils.ServicePort{{NodePort: 8080, BackendNamer: defaultNamer}, {NodePort: 443, BackendNamer: defaultNamer}}, false, "Same port plus additional port", }, { - []utils.ServicePort{{NodePort: 8080}}, - []utils.ServicePort{{NodePort: 3000}, {NodePort: 4000}, {NodePort: 5000}}, + []utils.ServicePort{{NodePort: 8080, BackendNamer: defaultNamer}}, + []utils.ServicePort{{NodePort: 3000, BackendNamer: defaultNamer}, {NodePort: 4000, BackendNamer: defaultNamer}, {NodePort: 5000, BackendNamer: defaultNamer}}, true, "New set of ports not including the same port", }, // Need to fill the TargetPort field on ServicePort to make sure // NEG Backend naming is unique { - []utils.ServicePort{{NodePort: 8080}, {NodePort: 443}}, + []utils.ServicePort{{NodePort: 8080, BackendNamer: defaultNamer}, {NodePort: 443, BackendNamer: defaultNamer}}, []utils.ServicePort{ - {Port: 8080, NodePort: 8080, NEGEnabled: true}, - {Port: 443, NodePort: 443, NEGEnabled: true}, + {Port: 8080, NodePort: 8080, NEGEnabled: true, BackendNamer: defaultNamer}, + {Port: 443, NodePort: 443, NEGEnabled: true, BackendNamer: defaultNamer}, }, true, "Same port converted to NEG, plus one new NEG port", }, { []utils.ServicePort{ - {Port: 80, NodePort: 80, NEGEnabled: true}, - {Port: 90, NodePort: 90}, + {Port: 80, NodePort: 80, NEGEnabled: true, BackendNamer: defaultNamer}, + {Port: 90, NodePort: 90, BackendNamer: defaultNamer}, }, []utils.ServicePort{ - {Port: 80}, - {Port: 90, NEGEnabled: true}, + {Port: 80, BackendNamer: defaultNamer}, + {Port: 90, NEGEnabled: true, BackendNamer: defaultNamer}, }, true, "Mixed NEG and non-NEG ports", }, { []utils.ServicePort{ - {Port: 100, NodePort: 100, NEGEnabled: true}, - {Port: 110, NodePort: 110, NEGEnabled: true}, - {Port: 120, NodePort: 120, NEGEnabled: true}, + {Port: 100, NodePort: 100, NEGEnabled: true, BackendNamer: defaultNamer}, + {Port: 110, NodePort: 110, NEGEnabled: true, BackendNamer: defaultNamer}, + {Port: 120, NodePort: 120, NEGEnabled: true, BackendNamer: defaultNamer}, }, []utils.ServicePort{ - {Port: 100, NodePort: 100}, - {Port: 110, NodePort: 110}, - {Port: 120, NodePort: 120}, + {Port: 100, NodePort: 100, BackendNamer: defaultNamer}, + {Port: 110, NodePort: 110, BackendNamer: defaultNamer}, + {Port: 120, NodePort: 120, BackendNamer: defaultNamer}, }, true, "Same ports as NEG, then non-NEG", @@ -509,12 +508,12 @@ func TestSyncNEG(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) syncer := newTestSyncer(fakeGCE) - svcPort := utils.ServicePort{NodePort: 81, Protocol: annotations.ProtocolHTTP} + svcPort := utils.ServicePort{NodePort: 81, Protocol: annotations.ProtocolHTTP, BackendNamer: defaultNamer} if err := syncer.Sync([]utils.ServicePort{svcPort}); err != nil { t.Errorf("Expected backend pool to add node ports, err: %v", err) } - nodePortName := svcPort.BackendName(defaultNamer) + nodePortName := svcPort.BackendName() _, err := fakeGCE.GetGlobalBackendService(nodePortName) if err != nil { t.Fatalf("Failed to get backend service: %v", err) @@ -526,7 +525,7 @@ func TestSyncNEG(t *testing.T) { t.Errorf("Expected backend pool to add node ports, err: %v", err) } - negName := svcPort.BackendName(defaultNamer) + negName := svcPort.BackendName() _, err = fakeGCE.GetGlobalBackendService(negName) if err != nil { t.Fatalf("Failed to get backend service with name %v: %v", negName, err) @@ -558,7 +557,7 @@ func TestShutdown(t *testing.T) { syncer := newTestSyncer(fakeGCE) // Sync a backend and verify that it doesn't exist after Shutdown() - syncer.Sync([]utils.ServicePort{{NodePort: 80}}) + syncer.Sync([]utils.ServicePort{{NodePort: 80, BackendNamer: defaultNamer}}) syncer.Shutdown() if _, err := fakeGCE.GetGlobalBackendService(defaultNamer.IGBackend(80)); err == nil { t.Fatalf("%v", err) @@ -596,9 +595,9 @@ func TestEnsureBackendServiceProtocol(t *testing.T) { syncer := newTestSyncer(fakeGCE) svcPorts := []utils.ServicePort{ - {NodePort: 80, Protocol: annotations.ProtocolHTTP, ID: utils.ServicePortID{Port: intstr.FromInt(1)}}, - {NodePort: 443, Protocol: annotations.ProtocolHTTPS, ID: utils.ServicePortID{Port: intstr.FromInt(2)}}, - {NodePort: 3000, Protocol: annotations.ProtocolHTTP2, ID: utils.ServicePortID{Port: intstr.FromInt(3)}}, + {NodePort: 80, Protocol: annotations.ProtocolHTTP, ID: utils.ServicePortID{Port: intstr.FromInt(1)}, BackendNamer: defaultNamer}, + {NodePort: 443, Protocol: annotations.ProtocolHTTPS, ID: utils.ServicePortID{Port: intstr.FromInt(2)}, BackendNamer: defaultNamer}, + {NodePort: 3000, Protocol: annotations.ProtocolHTTP2, ID: utils.ServicePortID{Port: intstr.FromInt(3)}, BackendNamer: defaultNamer}, } for _, oldPort := range svcPorts { @@ -607,7 +606,7 @@ func TestEnsureBackendServiceProtocol(t *testing.T) { fmt.Sprintf("Updating Port:%v Protocol:%v to Port:%v Protocol:%v", oldPort.NodePort, oldPort.Protocol, newPort.NodePort, newPort.Protocol), func(t *testing.T) { syncer.Sync([]utils.ServicePort{oldPort}) - be, err := syncer.backendPool.Get(oldPort.BackendName(defaultNamer), features.VersionFromServicePort(&oldPort), features.ScopeFromServicePort(&oldPort)) + be, err := syncer.backendPool.Get(oldPort.BackendName(), features.VersionFromServicePort(&oldPort), features.ScopeFromServicePort(&oldPort)) if err != nil { t.Fatalf("%v", err) } @@ -640,9 +639,9 @@ func TestEnsureBackendServiceDescription(t *testing.T) { syncer := newTestSyncer(fakeGCE) svcPorts := []utils.ServicePort{ - {NodePort: 80, Protocol: annotations.ProtocolHTTP, ID: utils.ServicePortID{Port: intstr.FromInt(1)}}, - {NodePort: 443, Protocol: annotations.ProtocolHTTPS, ID: utils.ServicePortID{Port: intstr.FromInt(2)}}, - {NodePort: 3000, Protocol: annotations.ProtocolHTTP2, ID: utils.ServicePortID{Port: intstr.FromInt(3)}}, + {NodePort: 80, Protocol: annotations.ProtocolHTTP, ID: utils.ServicePortID{Port: intstr.FromInt(1)}, BackendNamer: defaultNamer}, + {NodePort: 443, Protocol: annotations.ProtocolHTTPS, ID: utils.ServicePortID{Port: intstr.FromInt(2)}, BackendNamer: defaultNamer}, + {NodePort: 3000, Protocol: annotations.ProtocolHTTP2, ID: utils.ServicePortID{Port: intstr.FromInt(3)}, BackendNamer: defaultNamer}, } for _, oldPort := range svcPorts { @@ -651,7 +650,7 @@ func TestEnsureBackendServiceDescription(t *testing.T) { fmt.Sprintf("Updating Port:%v Protocol:%v to Port:%v Protocol:%v", oldPort.NodePort, oldPort.Protocol, newPort.NodePort, newPort.Protocol), func(t *testing.T) { syncer.Sync([]utils.ServicePort{oldPort}) - be, err := syncer.backendPool.Get(oldPort.BackendName(defaultNamer), features.VersionFromServicePort(&oldPort), features.ScopeFromServicePort(&oldPort)) + be, err := syncer.backendPool.Get(oldPort.BackendName(), features.VersionFromServicePort(&oldPort), features.ScopeFromServicePort(&oldPort)) if err != nil { t.Fatalf("%v", err) } @@ -677,9 +676,9 @@ func TestEnsureBackendServiceHealthCheckLink(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) syncer := newTestSyncer(fakeGCE) - p := utils.ServicePort{NodePort: 80, Protocol: annotations.ProtocolHTTP, ID: utils.ServicePortID{Port: intstr.FromInt(1)}} + p := utils.ServicePort{NodePort: 80, Protocol: annotations.ProtocolHTTP, ID: utils.ServicePortID{Port: intstr.FromInt(1)}, BackendNamer: defaultNamer} syncer.Sync([]utils.ServicePort{p}) - be, err := syncer.backendPool.Get(p.BackendName(defaultNamer), features.VersionFromServicePort(&p), features.ScopeFromServicePort(&p)) + be, err := syncer.backendPool.Get(p.BackendName(), features.VersionFromServicePort(&p), features.ScopeFromServicePort(&p)) if err != nil { t.Fatalf("%v", err) } diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index b05aad51ae..192bc10ea8 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -101,7 +101,7 @@ func NewLoadBalancerController( Interface: ctx.KubeClient.CoreV1().Events(""), }) - healthChecker := healthchecks.NewHealthChecker(ctx.Cloud, ctx.HealthCheckPath, ctx.DefaultBackendHealthCheckPath, ctx.ClusterNamer, ctx.DefaultBackendSvcPort.ID.Service) + healthChecker := healthchecks.NewHealthChecker(ctx.Cloud, ctx.HealthCheckPath, ctx.DefaultBackendHealthCheckPath, ctx.DefaultBackendSvcPort.ID.Service) instancePool := instances.NewNodePool(ctx.Cloud, ctx.ClusterNamer) backendPool := backends.NewPool(ctx.Cloud, ctx.ClusterNamer) @@ -115,9 +115,9 @@ func NewLoadBalancerController( nodes: NewNodeController(ctx, instancePool), instancePool: instancePool, l7Pool: loadbalancers.NewLoadBalancerPool(ctx.Cloud, ctx.ClusterNamer, ctx), - backendSyncer: backends.NewBackendSyncer(backendPool, healthChecker, ctx.ClusterNamer, ctx.Cloud), - negLinker: backends.NewNEGLinker(backendPool, negtypes.NewAdapter(ctx.Cloud), ctx.ClusterNamer, ctx.Cloud), - igLinker: backends.NewInstanceGroupLinker(instancePool, backendPool, ctx.ClusterNamer), + backendSyncer: backends.NewBackendSyncer(backendPool, healthChecker, ctx.Cloud), + negLinker: backends.NewNEGLinker(backendPool, negtypes.NewAdapter(ctx.Cloud), ctx.Cloud), + igLinker: backends.NewInstanceGroupLinker(instancePool, backendPool), } lbc.ingSyncer = ingsync.NewIngressSyncer(&lbc) @@ -485,7 +485,7 @@ func (lbc *LoadBalancerController) sync(key string) error { } // Bootstrap state for GCP sync. - urlMap, errs := lbc.Translator.TranslateIngress(ing, lbc.ctx.DefaultBackendSvcPort.ID) + urlMap, errs := lbc.Translator.TranslateIngress(ing, lbc.ctx.DefaultBackendSvcPort.ID, lbc.ctx.ClusterNamer) if errs != nil { msg := fmt.Errorf("error while evaluating the ingress spec: %v", utils.JoinErrs(errs)) @@ -617,7 +617,7 @@ func updateAnnotations(client kubernetes.Interface, name, namespace string, anno func (lbc *LoadBalancerController) ToSvcPorts(ings []*v1beta1.Ingress) []utils.ServicePort { var knownPorts []utils.ServicePort for _, ing := range ings { - urlMap, _ := lbc.Translator.TranslateIngress(ing, lbc.ctx.DefaultBackendSvcPort.ID) + urlMap, _ := lbc.Translator.TranslateIngress(ing, lbc.ctx.DefaultBackendSvcPort.ID, lbc.ctx.ClusterNamer) knownPorts = append(knownPorts, urlMap.AllServicePorts()...) } return knownPorts diff --git a/pkg/controller/controller_test.go b/pkg/controller/controller_test.go index 97df61f94d..342a6b433f 100644 --- a/pkg/controller/controller_test.go +++ b/pkg/controller/controller_test.go @@ -18,6 +18,7 @@ package controller import ( "fmt" + "reflect" "strings" "testing" "time" @@ -359,7 +360,7 @@ func TestIngressClassChangeWithFinalizer(t *testing.T) { } // TestIngressesWithSharedResourcesWithFinalizer asserts that `sync` does not return error when -// multiple ingressesToCleanup with shared resources are added or deleted. +// multiple ingresses with shared resources are added or deleted. // Note: This test cannot be run in parallel as it stubs global flags. func TestIngressesWithSharedResourcesWithFinalizer(t *testing.T) { var flagSaver saveFinalizerFlags @@ -398,7 +399,10 @@ func TestIngressesWithSharedResourcesWithFinalizer(t *testing.T) { // Assert service ports are being shared. ingSvcPorts := lbc.ToSvcPorts([]*v1beta1.Ingress{ing}) otherIngSvcPorts := lbc.ToSvcPorts([]*v1beta1.Ingress{otherIng}) - if diff := cmp.Diff(ingSvcPorts, otherIngSvcPorts); diff != "" { + comparer := cmp.Comparer(func(a, b utils.ServicePort) bool { + return reflect.DeepEqual(a, b) + }) + if diff := cmp.Diff(ingSvcPorts, otherIngSvcPorts, comparer); diff != "" { t.Errorf("lbc.ToSVCPorts(_) mismatch (-want +got):\n%s", diff) } @@ -588,7 +592,7 @@ func TestMCIngressIG(t *testing.T) { wantVal := fmt.Sprintf(`[{"Name":%q,"Zone":"zone-a"}]`, instanceGroupName) if val, ok := updatedMcIng.GetAnnotations()[igAnnotationKey]; !ok { t.Errorf("updatedMcIng.GetAnnotations()[%q]= (_, %v), want true; invalid key, updatedMcIng = %v", igAnnotationKey, ok, updatedMcIng) - } else if diff := cmp.Diff(val, wantVal); diff != "" { + } else if diff := cmp.Diff(wantVal, val); diff != "" { t.Errorf("updatedMcIng.GetAnnotations()[%q] mismatch (-want +got):\n%s", igAnnotationKey, diff) } @@ -597,7 +601,7 @@ func TestMCIngressIG(t *testing.T) { if err != nil { t.Errorf("lbc.instancePool.List() = _, %v, want nil", err) } - if diff := cmp.Diff(instanceGroups, []string{instanceGroupName}); diff != "" { + if diff := cmp.Diff([]string{instanceGroupName}, instanceGroups); diff != "" { t.Errorf("lbc.instancePool.List()() mismatch (-want +got):\n%s", diff) } @@ -612,7 +616,7 @@ func TestMCIngressIG(t *testing.T) { if err != nil { t.Errorf("lbc.instancePool.List() = _, %v, want nil", err) } - if diff := cmp.Diff(instanceGroups, []string{instanceGroupName}); diff != "" { + if diff := cmp.Diff([]string{instanceGroupName}, instanceGroups); diff != "" { t.Errorf("lbc.instancePool.List()() mismatch (-want +got):\n%s", diff) } @@ -628,7 +632,7 @@ func TestMCIngressIG(t *testing.T) { t.Errorf("lbc.instancePool.List() = _, %v, want nil", err) } var wantInstanceGroups []string - if diff := cmp.Diff(instanceGroups, wantInstanceGroups); diff != "" { + if diff := cmp.Diff(wantInstanceGroups, instanceGroups); diff != "" { t.Errorf("lbc.instancePool.List()() mismatch (-want +got):\n%s", diff) } } diff --git a/pkg/controller/translator/translator.go b/pkg/controller/translator/translator.go index d0ecc95004..20e8a57eee 100644 --- a/pkg/controller/translator/translator.go +++ b/pkg/controller/translator/translator.go @@ -38,6 +38,7 @@ import ( "k8s.io/ingress-gce/pkg/controller/errors" "k8s.io/ingress-gce/pkg/loadbalancers" "k8s.io/ingress-gce/pkg/utils" + namer_util "k8s.io/ingress-gce/pkg/utils/namer" ) // getServicePortParams allows for passing parameters to getServicePort() @@ -142,7 +143,7 @@ func (t *Translator) maybeEnableBackendConfig(sp *utils.ServicePort, svc *api_v1 // getServicePort looks in the svc store for a matching service:port, // and returns the nodeport. -func (t *Translator) getServicePort(id utils.ServicePortID, params *getServicePortParams) (*utils.ServicePort, error) { +func (t *Translator) getServicePort(id utils.ServicePortID, params *getServicePortParams, namer namer_util.BackendNamer) (*utils.ServicePort, error) { svc, err := t.getCachedService(id) if err != nil { return nil, err @@ -162,6 +163,7 @@ func (t *Translator) getServicePort(id utils.ServicePortID, params *getServicePo Port: int32(port.Port), TargetPort: port.TargetPort.String(), L7ILBEnabled: params.isL7ILB, + BackendNamer: namer, } if err := maybeEnableNEG(svcPort, svc); err != nil { @@ -180,7 +182,7 @@ func (t *Translator) getServicePort(id utils.ServicePortID, params *getServicePo } // TranslateIngress converts an Ingress into our internal UrlMap representation. -func (t *Translator) TranslateIngress(ing *v1beta1.Ingress, systemDefaultBackend utils.ServicePortID) (*utils.GCEURLMap, []error) { +func (t *Translator) TranslateIngress(ing *v1beta1.Ingress, systemDefaultBackend utils.ServicePortID, namer namer_util.BackendNamer) (*utils.GCEURLMap, []error) { var errs []error urlMap := utils.NewGCEURLMap() @@ -194,7 +196,7 @@ func (t *Translator) TranslateIngress(ing *v1beta1.Ingress, systemDefaultBackend pathRules := []utils.PathRule{} for _, p := range rule.HTTP.Paths { - svcPort, err := t.getServicePort(utils.BackendToServicePortID(p.Backend, ing.Namespace), params) + svcPort, err := t.getServicePort(utils.BackendToServicePortID(p.Backend, ing.Namespace), params, namer) if err != nil { errs = append(errs, err) } @@ -217,7 +219,7 @@ func (t *Translator) TranslateIngress(ing *v1beta1.Ingress, systemDefaultBackend } if ing.Spec.Backend != nil { - svcPort, err := t.getServicePort(utils.BackendToServicePortID(*ing.Spec.Backend, ing.Namespace), params) + svcPort, err := t.getServicePort(utils.BackendToServicePortID(*ing.Spec.Backend, ing.Namespace), params, namer) if err == nil { urlMap.DefaultBackend = svcPort return urlMap, errs @@ -227,7 +229,7 @@ func (t *Translator) TranslateIngress(ing *v1beta1.Ingress, systemDefaultBackend return urlMap, errs } - svcPort, err := t.getServicePort(systemDefaultBackend, params) + svcPort, err := t.getServicePort(systemDefaultBackend, params, namer) if err == nil { urlMap.DefaultBackend = svcPort return urlMap, errs diff --git a/pkg/controller/translator/translator_test.go b/pkg/controller/translator/translator_test.go index 9777789162..5cc3cb2688 100644 --- a/pkg/controller/translator/translator_test.go +++ b/pkg/controller/translator/translator_test.go @@ -51,13 +51,13 @@ var ( }, TargetPort: "9376", } + defaultNamer = namer_util.NewNamer("uid1", "") ) func fakeTranslator() *Translator { client := fake.NewSimpleClientset() backendConfigClient := backendconfigclient.NewSimpleClientset() - namer := namer_util.NewNamer("uid1", "") ctxConfig := context.ControllerContextConfig{ Namespace: apiv1.NamespaceAll, ResyncPeriod: 1 * time.Second, @@ -65,7 +65,7 @@ func fakeTranslator() *Translator { HealthCheckPath: "/", DefaultBackendHealthCheckPath: "/healthz", } - ctx := context.NewControllerContext(client, nil, backendConfigClient, nil, nil, namer, ctxConfig) + ctx := context.NewControllerContext(client, nil, backendConfigClient, nil, nil, defaultNamer, ctxConfig) gce := &Translator{ ctx: ctx, } @@ -173,7 +173,7 @@ func TestTranslateIngress(t *testing.T) { for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { - gotGCEURLMap, gotErrs := translator.TranslateIngress(tc.ing, defaultBackend.ID) + gotGCEURLMap, gotErrs := translator.TranslateIngress(tc.ing, defaultBackend.ID, defaultNamer) if len(gotErrs) != tc.wantErrCount { t.Errorf("%s: TranslateIngress() = _, %+v, want %v errs", tc.desc, gotErrs, tc.wantErrCount) } @@ -241,7 +241,7 @@ func TestGetServicePort(t *testing.T) { svcLister.Add(svc) tc.id.Service = svcName - port, gotErr := translator.getServicePort(tc.id, &tc.params) + port, gotErr := translator.getServicePort(tc.id, &tc.params, defaultNamer) if (gotErr != nil) != tc.wantErr { t.Errorf("translator.getServicePort(%+v) = _, %v, want err? %v", tc.id, gotErr, tc.wantErr) } @@ -318,7 +318,7 @@ func TestGetServicePortWithBackendConfigEnabled(t *testing.T) { svcLister.Add(svc) backendConfigLister.Add(backendConfig) - port, gotErr := translator.getServicePort(tc.id, &tc.params) + port, gotErr := translator.getServicePort(tc.id, &tc.params, defaultNamer) if (gotErr != nil) != tc.wantErr { t.Errorf("%s: translator.getServicePort(%+v) = _, %v, want err? %v", tc.desc, tc.id, gotErr, tc.wantErr) } diff --git a/pkg/firewalls/controller.go b/pkg/firewalls/controller.go index 2819c33444..514dc09bc8 100644 --- a/pkg/firewalls/controller.go +++ b/pkg/firewalls/controller.go @@ -127,7 +127,7 @@ func NewFirewallController( func (fwc *FirewallController) ToSvcPorts(ings []*v1beta1.Ingress) []utils.ServicePort { var knownPorts []utils.ServicePort for _, ing := range ings { - urlMap, _ := fwc.translator.TranslateIngress(ing, fwc.ctx.DefaultBackendSvcPort.ID) + urlMap, _ := fwc.translator.TranslateIngress(ing, fwc.ctx.DefaultBackendSvcPort.ID, fwc.ctx.ClusterNamer) knownPorts = append(knownPorts, urlMap.AllServicePorts()...) } return knownPorts diff --git a/pkg/healthchecks/healthchecks.go b/pkg/healthchecks/healthchecks.go index b7073df73e..ee5309030f 100644 --- a/pkg/healthchecks/healthchecks.go +++ b/pkg/healthchecks/healthchecks.go @@ -31,7 +31,6 @@ import ( "k8s.io/ingress-gce/pkg/composite" "k8s.io/ingress-gce/pkg/loadbalancers/features" "k8s.io/ingress-gce/pkg/utils" - namer_util "k8s.io/ingress-gce/pkg/utils/namer" "k8s.io/klog" "k8s.io/legacy-cloud-providers/gce" ) @@ -79,7 +78,6 @@ type HealthChecks struct { path string // defaultBackend is the default health check path for the default backend. defaultBackendPath string - namer *namer_util.Namer // This is a workaround which allows us to not have to maintain // a separate health checker for the default backend. defaultBackendSvc types.NamespacedName @@ -88,8 +86,8 @@ type HealthChecks struct { // NewHealthChecker creates a new health checker. // cloud: the cloud object implementing SingleHealthCheck. // defaultHealthCheckPath: is the HTTP path to use for health checks. -func NewHealthChecker(cloud HealthCheckProvider, healthCheckPath string, defaultBackendHealthCheckPath string, namer *namer_util.Namer, defaultBackendSvc types.NamespacedName) HealthChecker { - return &HealthChecks{cloud, healthCheckPath, defaultBackendHealthCheckPath, namer, defaultBackendSvc} +func NewHealthChecker(cloud HealthCheckProvider, healthCheckPath string, defaultBackendHealthCheckPath string, defaultBackendSvc types.NamespacedName) HealthChecker { + return &HealthChecks{cloud, healthCheckPath, defaultBackendHealthCheckPath, defaultBackendSvc} } // New returns a *HealthCheck with default settings and specified port/protocol @@ -104,7 +102,7 @@ func (h *HealthChecks) New(sp utils.ServicePort) *HealthCheck { } // port is the key for retrieving existing health-check // TODO: rename backend-service and health-check to not use port as key - hc.Name = sp.BackendName(h.namer) + hc.Name = sp.BackendName() hc.Port = sp.NodePort hc.RequestPath = h.pathFromSvcPort(sp) return hc diff --git a/pkg/healthchecks/healthchecks_test.go b/pkg/healthchecks/healthchecks_test.go index a49f413253..b7e2a335bd 100644 --- a/pkg/healthchecks/healthchecks_test.go +++ b/pkg/healthchecks/healthchecks_test.go @@ -39,9 +39,9 @@ var ( func TestHealthCheckAdd(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) - healthChecks := NewHealthChecker(fakeGCE, "/", "/healthz", namer, defaultBackendSvc) + healthChecks := NewHealthChecker(fakeGCE, "/", "/healthz", defaultBackendSvc) - sp := utils.ServicePort{NodePort: 80, Protocol: annotations.ProtocolHTTP, NEGEnabled: false} + sp := utils.ServicePort{NodePort: 80, Protocol: annotations.ProtocolHTTP, NEGEnabled: false, BackendNamer: namer} hc := healthChecks.New(sp) _, err := healthChecks.Sync(hc) if err != nil { @@ -53,7 +53,7 @@ func TestHealthCheckAdd(t *testing.T) { t.Fatalf("expected the health check to exist, err: %v", err) } - sp = utils.ServicePort{NodePort: 443, Protocol: annotations.ProtocolHTTPS, NEGEnabled: false} + sp = utils.ServicePort{NodePort: 443, Protocol: annotations.ProtocolHTTPS, NEGEnabled: false, BackendNamer: namer} hc = healthChecks.New(sp) _, err = healthChecks.Sync(hc) if err != nil { @@ -65,7 +65,7 @@ func TestHealthCheckAdd(t *testing.T) { t.Fatalf("expected the health check to exist, err: %v", err) } - sp = utils.ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP2, NEGEnabled: false} + sp = utils.ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP2, NEGEnabled: false, BackendNamer: namer} hc = healthChecks.New(sp) _, err = healthChecks.Sync(hc) if err != nil { @@ -80,7 +80,7 @@ func TestHealthCheckAdd(t *testing.T) { func TestHealthCheckAddExisting(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) - healthChecks := NewHealthChecker(fakeGCE, "/", "/healthz", namer, defaultBackendSvc) + healthChecks := NewHealthChecker(fakeGCE, "/", "/healthz", defaultBackendSvc) // HTTP // Manually insert a health check @@ -93,7 +93,7 @@ func TestHealthCheckAddExisting(t *testing.T) { } fakeGCE.CreateHealthCheck(v1hc) - sp := utils.ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP, NEGEnabled: false} + sp := utils.ServicePort{NodePort: 3000, Protocol: annotations.ProtocolHTTP, NEGEnabled: false, BackendNamer: namer} // Should not fail adding the same type of health check hc := healthChecks.New(sp) _, err = healthChecks.Sync(hc) @@ -117,7 +117,7 @@ func TestHealthCheckAddExisting(t *testing.T) { } fakeGCE.CreateHealthCheck(v1hc) - sp = utils.ServicePort{NodePort: 4000, Protocol: annotations.ProtocolHTTPS, NEGEnabled: false} + sp = utils.ServicePort{NodePort: 4000, Protocol: annotations.ProtocolHTTPS, NEGEnabled: false, BackendNamer: namer} hc = healthChecks.New(sp) _, err = healthChecks.Sync(hc) if err != nil { @@ -140,7 +140,7 @@ func TestHealthCheckAddExisting(t *testing.T) { } fakeGCE.CreateHealthCheck(v1hc) - sp = utils.ServicePort{NodePort: 5000, Protocol: annotations.ProtocolHTTPS, NEGEnabled: false} + sp = utils.ServicePort{NodePort: 5000, Protocol: annotations.ProtocolHTTPS, NEGEnabled: false, BackendNamer: namer} hc = healthChecks.New(sp) _, err = healthChecks.Sync(hc) if err != nil { @@ -155,7 +155,7 @@ func TestHealthCheckAddExisting(t *testing.T) { func TestHealthCheckDelete(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) - healthChecks := NewHealthChecker(fakeGCE, "/", "/healthz", namer, defaultBackendSvc) + healthChecks := NewHealthChecker(fakeGCE, "/", "/healthz", defaultBackendSvc) // Create HTTP HC for 1234 hc := DefaultHealthCheck(1234, annotations.ProtocolHTTP) @@ -191,7 +191,7 @@ func TestHealthCheckDelete(t *testing.T) { func TestHTTP2HealthCheckDelete(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) - healthChecks := NewHealthChecker(fakeGCE, "/", "/healthz", namer, defaultBackendSvc) + healthChecks := NewHealthChecker(fakeGCE, "/", "/healthz", defaultBackendSvc) // Create HTTP2 HC for 1234 hc := DefaultHealthCheck(1234, annotations.ProtocolHTTP2) @@ -221,7 +221,7 @@ func TestHealthCheckUpdate(t *testing.T) { (fakeGCE.Compute().(*cloud.MockGCE)).MockAlphaHealthChecks.UpdateHook = mock.UpdateAlphaHealthCheckHook (fakeGCE.Compute().(*cloud.MockGCE)).MockBetaHealthChecks.UpdateHook = mock.UpdateBetaHealthCheckHook - healthChecks := NewHealthChecker(fakeGCE, "/", "/healthz", namer, defaultBackendSvc) + healthChecks := NewHealthChecker(fakeGCE, "/", "/healthz", defaultBackendSvc) // HTTP // Manually insert a health check @@ -322,8 +322,8 @@ func TestHealthCheckUpdate(t *testing.T) { func TestAlphaHealthCheck(t *testing.T) { fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) - healthChecks := NewHealthChecker(fakeGCE, "/", "/healthz", namer, defaultBackendSvc) - sp := utils.ServicePort{NodePort: 8000, Protocol: annotations.ProtocolHTTPS, NEGEnabled: true} + healthChecks := NewHealthChecker(fakeGCE, "/", "/healthz", defaultBackendSvc) + sp := utils.ServicePort{NodePort: 8000, Protocol: annotations.ProtocolHTTPS, NEGEnabled: true, BackendNamer: namer} hc := healthChecks.New(sp) _, err := healthChecks.Sync(hc) if err != nil { diff --git a/pkg/instances/instances.go b/pkg/instances/instances.go index f515cfc65f..aaf7e30e0e 100644 --- a/pkg/instances/instances.go +++ b/pkg/instances/instances.go @@ -38,13 +38,13 @@ const ( type Instances struct { cloud InstanceGroups ZoneLister - namer *namer.Namer + namer namer.BackendNamer } // NewNodePool creates a new node pool. // - cloud: implements InstanceGroups, used to sync Kubernetes nodes with // members of the cloud InstanceGroup. -func NewNodePool(cloud InstanceGroups, namer *namer.Namer) NodePool { +func NewNodePool(cloud InstanceGroups, namer namer.BackendNamer) NodePool { return &Instances{ cloud: cloud, namer: namer, diff --git a/pkg/loadbalancers/loadbalancers_test.go b/pkg/loadbalancers/loadbalancers_test.go index 6de6f294a6..600e135ee9 100644 --- a/pkg/loadbalancers/loadbalancers_test.go +++ b/pkg/loadbalancers/loadbalancers_test.go @@ -52,10 +52,6 @@ const ( defaultScope = meta.Global ) -var ( - testDefaultBeNodePort = utils.ServicePort{NodePort: 30000, Protocol: annotations.ProtocolHTTP} -) - type testJig struct { pool LoadBalancerPool fakeGCE *gce.Cloud @@ -165,8 +161,8 @@ func TestCreateHTTPLoadBalancer(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) lbInfo := &L7RuntimeInfo{ Name: j.namer.LoadBalancer(ingressName), AllowHTTP: true, @@ -187,8 +183,8 @@ func TestCreateHTTPILBLoadBalancer(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) lbInfo := &L7RuntimeInfo{ Name: j.namer.LoadBalancer(ingressName), AllowHTTP: true, @@ -209,8 +205,8 @@ func TestCreateHTTPSILBLoadBalancer(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) lbInfo := &L7RuntimeInfo{ Name: j.namer.LoadBalancer(ingressName), AllowHTTP: false, @@ -232,8 +228,8 @@ func TestCreateHTTPSLoadBalancer(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) lbInfo := &L7RuntimeInfo{ Name: j.namer.LoadBalancer(ingressName), AllowHTTP: false, @@ -319,8 +315,8 @@ func TestCertUpdate(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) lbName := j.namer.LoadBalancer(ingressName) certName1 := j.namer.SSLCertName(lbName, GetCertHash("cert")) certName2 := j.namer.SSLCertName(lbName, GetCertHash("cert2")) @@ -357,8 +353,8 @@ func TestMultipleSecretsWithSameCert(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) lbName := j.namer.LoadBalancer(ingressName) lbInfo := &L7RuntimeInfo{ @@ -386,8 +382,8 @@ func TestCertCreationWithCollision(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) lbName := j.namer.LoadBalancer(ingressName) certName1 := j.namer.SSLCertName(lbName, GetCertHash("cert")) certName2 := j.namer.SSLCertName(lbName, GetCertHash("cert2")) @@ -444,8 +440,8 @@ func TestMultipleCertRetentionAfterRestart(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) cert1 := createCert("key", "cert", "name") cert2 := createCert("key2", "cert2", "name2") cert3 := createCert("key3", "cert3", "name3") @@ -495,8 +491,8 @@ func TestUpgradeToNewCertNames(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) lbName := j.namer.LoadBalancer(ingressName) lbInfo := &L7RuntimeInfo{ Name: lbName, @@ -555,8 +551,8 @@ func TestMaxCertsUpload(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) var tlsCerts []*TLSCerts expectCerts := make(map[string]string) expectCertsExtra := make(map[string]string) @@ -619,8 +615,8 @@ func TestIdenticalHostnameCerts(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) var tlsCerts []*TLSCerts expectCerts := make(map[string]string) lbName := j.namer.LoadBalancer(ingressName) @@ -657,8 +653,8 @@ func TestIdenticalHostnameCertsPreShared(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) lbInfo := &L7RuntimeInfo{ Name: j.namer.LoadBalancer(ingressName), AllowHTTP: false, @@ -715,8 +711,8 @@ func TestPreSharedToSecretBasedCertUpdate(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) lbName := j.namer.LoadBalancer(ingressName) certName1 := j.namer.SSLCertName(lbName, GetCertHash("cert")) certName2 := j.namer.SSLCertName(lbName, GetCertHash("cert2")) @@ -893,8 +889,8 @@ func TestCreateHTTPSLoadBalancerAnnotationCert(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) tlsName := "external-cert-name" namer := namer_util.NewNamer(clusterName, "fw1") lbInfo := &L7RuntimeInfo{ @@ -929,8 +925,8 @@ func TestCreateBothLoadBalancers(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) lbInfo := &L7RuntimeInfo{ Name: j.namer.LoadBalancer(ingressName), AllowHTTP: true, @@ -994,15 +990,15 @@ func TestUrlMapChange(t *testing.T) { um1 := utils.NewGCEURLMap() um2 := utils.NewGCEURLMap() - um1.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar2", Backend: utils.ServicePort{NodePort: 30000}}}) - um1.DefaultBackend = &utils.ServicePort{NodePort: 31234} + um1.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar2", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) + um1.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} um2.PutPathRulesForHost("foo.example.com", []utils.PathRule{ - utils.PathRule{Path: "/foo1", Backend: utils.ServicePort{NodePort: 30001}}, - utils.PathRule{Path: "/foo2", Backend: utils.ServicePort{NodePort: 30002}}, + utils.PathRule{Path: "/foo1", Backend: utils.ServicePort{NodePort: 30001, BackendNamer: j.namer}}, + utils.PathRule{Path: "/foo2", Backend: utils.ServicePort{NodePort: 30002, BackendNamer: j.namer}}, }) - um2.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar1", Backend: utils.ServicePort{NodePort: 30003}}}) - um2.DefaultBackend = &utils.ServicePort{NodePort: 30004} + um2.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar1", Backend: utils.ServicePort{NodePort: 30003, BackendNamer: j.namer}}}) + um2.DefaultBackend = &utils.ServicePort{NodePort: 30004, BackendNamer: j.namer} namer := namer_util.NewNamer(clusterName, "fw1") lbInfo := &L7RuntimeInfo{Name: namer.LoadBalancer(ingressName), AllowHTTP: true, UrlMap: um1, Ingress: newIngress()} @@ -1046,18 +1042,18 @@ func TestPoolSyncNoChanges(t *testing.T) { um2 := utils.NewGCEURLMap() um1.PutPathRulesForHost("foo.example.com", []utils.PathRule{ - utils.PathRule{Path: "/foo1", Backend: utils.ServicePort{NodePort: 30000}}, - utils.PathRule{Path: "/foo2", Backend: utils.ServicePort{NodePort: 30001}}, + utils.PathRule{Path: "/foo1", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}, + utils.PathRule{Path: "/foo2", Backend: utils.ServicePort{NodePort: 30001, BackendNamer: j.namer}}, }) - um1.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar1", Backend: utils.ServicePort{NodePort: 30002}}}) - um1.DefaultBackend = &utils.ServicePort{NodePort: 30003} + um1.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar1", Backend: utils.ServicePort{NodePort: 30002, BackendNamer: j.namer}}}) + um1.DefaultBackend = &utils.ServicePort{NodePort: 30003, BackendNamer: j.namer} um2.PutPathRulesForHost("foo.example.com", []utils.PathRule{ - utils.PathRule{Path: "/foo1", Backend: utils.ServicePort{NodePort: 30000}}, - utils.PathRule{Path: "/foo2", Backend: utils.ServicePort{NodePort: 30001}}, + utils.PathRule{Path: "/foo1", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}, + utils.PathRule{Path: "/foo2", Backend: utils.ServicePort{NodePort: 30001, BackendNamer: j.namer}}, }) - um2.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar1", Backend: utils.ServicePort{NodePort: 30002}}}) - um2.DefaultBackend = &utils.ServicePort{NodePort: 30003} + um2.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar1", Backend: utils.ServicePort{NodePort: 30002, BackendNamer: j.namer}}}) + um2.DefaultBackend = &utils.ServicePort{NodePort: 30003, BackendNamer: j.namer} namer := namer_util.NewNamer(clusterName, "fw1") lbInfo := &L7RuntimeInfo{Name: namer.LoadBalancer(ingressName), AllowHTTP: true, UrlMap: um1, Ingress: newIngress()} @@ -1098,8 +1094,8 @@ func TestClusterNameChange(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) lbInfo := &L7RuntimeInfo{ Name: j.namer.LoadBalancer(ingressName), AllowHTTP: true, @@ -1167,8 +1163,8 @@ func TestList(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) lbInfo := &L7RuntimeInfo{ Name: j.namer.LoadBalancer(ingressName), AllowHTTP: true, @@ -1221,8 +1217,8 @@ func TestSecretBasedAndPreSharedCerts(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) namer := namer_util.NewNamer(clusterName, "fw1") lbName := namer.LoadBalancer(ingressName) certName1 := namer.SSLCertName(lbName, GetCertHash("cert")) @@ -1284,8 +1280,8 @@ func TestMaxSecretBasedAndPreSharedCerts(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) var tlsCerts []*TLSCerts expectCerts := make(map[string]string) @@ -1379,8 +1375,8 @@ func TestSecretBasedToPreSharedCertUpdate(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) lbName := j.namer.LoadBalancer(ingressName) certName1 := j.namer.SSLCertName(lbName, GetCertHash("cert")) @@ -1435,8 +1431,8 @@ func TestSecretBasedToPreSharedCertUpdateWithErrors(t *testing.T) { j := newTestJig(t) gceUrlMap := utils.NewGCEURLMap() - gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234} - gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000}}}) + gceUrlMap.DefaultBackend = &utils.ServicePort{NodePort: 31234, BackendNamer: j.namer} + gceUrlMap.PutPathRulesForHost("bar.example.com", []utils.PathRule{utils.PathRule{Path: "/bar", Backend: utils.ServicePort{NodePort: 30000, BackendNamer: j.namer}}}) lbName := j.namer.LoadBalancer(ingressName) certName1 := j.namer.SSLCertName(lbName, GetCertHash("cert")) diff --git a/pkg/loadbalancers/url_maps.go b/pkg/loadbalancers/url_maps.go index c5cce0441d..c8952c1eeb 100644 --- a/pkg/loadbalancers/url_maps.go +++ b/pkg/loadbalancers/url_maps.go @@ -219,7 +219,7 @@ func mapsEqual(a, b *composite.UrlMap) bool { // more frequently than service deletion) we just need to lookup the 1 // pathmatcher of the host. func toCompositeURLMap(lbName string, g *utils.GCEURLMap, namer *namer.Namer, key *meta.Key) *composite.UrlMap { - defaultBackendName := g.DefaultBackend.BackendName(namer) + defaultBackendName := g.DefaultBackend.BackendName() key.Name = defaultBackendName resourceID := cloud.ResourceID{ProjectID: "", Resource: "backendServices", Key: key} m := &composite.UrlMap{ @@ -245,7 +245,7 @@ func toCompositeURLMap(lbName string, g *utils.GCEURLMap, namer *namer.Namer, ke // GCE ensures that matched rule with longest prefix wins. for _, rule := range hostRule.Paths { - beName := rule.Backend.BackendName(namer) + beName := rule.Backend.BackendName() key.Name = beName resourceID := cloud.ResourceID{ProjectID: "", Resource: "backendServices", Key: key} beLink := resourceID.ResourcePath() diff --git a/pkg/loadbalancers/url_maps_test.go b/pkg/loadbalancers/url_maps_test.go index 02c1dd0107..b4c6f5f4af 100644 --- a/pkg/loadbalancers/url_maps_test.go +++ b/pkg/loadbalancers/url_maps_test.go @@ -48,19 +48,20 @@ func TestToComputeURLMap(t *testing.T) { t.Parallel() wantComputeMap := testCompositeURLMap() + namer := namer_util.NewNamer("uid1", "fw1") gceURLMap := &utils.GCEURLMap{ - DefaultBackend: &utils.ServicePort{NodePort: 30000}, + DefaultBackend: &utils.ServicePort{NodePort: 30000, BackendNamer: namer}, HostRules: []utils.HostRule{ { Hostname: "abc.com", Paths: []utils.PathRule{ { Path: "/web", - Backend: utils.ServicePort{NodePort: 32000}, + Backend: utils.ServicePort{NodePort: 32000, BackendNamer: namer}, }, { Path: "/other", - Backend: utils.ServicePort{NodePort: 32500}, + Backend: utils.ServicePort{NodePort: 32500, BackendNamer: namer}, }, }, }, @@ -69,18 +70,17 @@ func TestToComputeURLMap(t *testing.T) { Paths: []utils.PathRule{ { Path: "/", - Backend: utils.ServicePort{NodePort: 33000}, + Backend: utils.ServicePort{NodePort: 33000, BackendNamer: namer}, }, { Path: "/*", - Backend: utils.ServicePort{NodePort: 33500}, + Backend: utils.ServicePort{NodePort: 33500, BackendNamer: namer}, }, }, }, }, } - namer := namer_util.NewNamer("uid1", "fw1") gotComputeURLMap := toCompositeURLMap("lb-name", gceURLMap, namer, meta.GlobalKey("lb-name")) if !mapsEqual(gotComputeURLMap, wantComputeMap) { t.Errorf("toComputeURLMap() = \n%+v\n want\n%+v", gotComputeURLMap, wantComputeMap) diff --git a/pkg/utils/namer/interfaces.go b/pkg/utils/namer/interfaces.go new file mode 100644 index 0000000000..1bce003201 --- /dev/null +++ b/pkg/utils/namer/interfaces.go @@ -0,0 +1,31 @@ +/* +Copyright 2019 The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package namer + +// BackendNamer is an interface to name GCE backend resources. It wraps backend +// naming policy of namer.Namer. +type BackendNamer interface { + // IGBackend constructs the name for a backend service targeting instance groups. + IGBackend(nodePort int64) string + // NEG returns the gce neg name based on the service namespace, name + // and target port. + NEG(namespace, name string, Port int32) string + // InstanceGroup constructs the name for an Instance Group. + InstanceGroup() string + // NamedPort returns the name for a named port. + NamedPort(port int64) string + // NameBelongsToCluster checks if a given backend resource name is tagged with + // this cluster's UID. + NameBelongsToCluster(resourceName string) bool +} diff --git a/pkg/utils/serviceport.go b/pkg/utils/serviceport.go index a866628d2a..3810dc4321 100644 --- a/pkg/utils/serviceport.go +++ b/pkg/utils/serviceport.go @@ -50,6 +50,7 @@ type ServicePort struct { NEGEnabled bool L7ILBEnabled bool BackendConfig *backendconfigv1beta1.BackendConfig + BackendNamer namer.BackendNamer } // GetDescription returns a Description for this ServicePort. @@ -61,12 +62,16 @@ func (sp ServicePort) GetDescription() Description { } // BackendName returns the name of the backend which would be used for this ServicePort. -func (sp ServicePort) BackendName(namer *namer.Namer) string { - if !sp.NEGEnabled { - return namer.IGBackend(sp.NodePort) +func (sp ServicePort) BackendName() string { + if sp.NEGEnabled { + return sp.BackendNamer.NEG(sp.ID.Service.Namespace, sp.ID.Service.Name, sp.Port) } + return sp.BackendNamer.IGBackend(sp.NodePort) +} - return namer.NEG(sp.ID.Service.Namespace, sp.ID.Service.Name, sp.Port) +// IGName returns the name of the instance group which would be used for this ServicePort. +func (sp ServicePort) IGName() string { + return sp.BackendNamer.InstanceGroup() } // BackendToServicePortID creates a ServicePortID from a given IngressBackend and namespace.