diff --git a/pkg/agent/nodeportlocal/k8s/npl_controller.go b/pkg/agent/nodeportlocal/k8s/npl_controller.go index 4c675a8d6ed..51c94278dfa 100644 --- a/pkg/agent/nodeportlocal/k8s/npl_controller.go +++ b/pkg/agent/nodeportlocal/k8s/npl_controller.go @@ -463,6 +463,9 @@ func (c *NPLController) handleAddUpdatePod(key string, obj interface{}) error { } } + // targetPortsInt contains list of all ports that needs to be exposed for the Pod, including container ports + // for named ports present in targetPortsStr. If it is empty, then all existing rules and annotations for the + // Pod have to be cleaned up. if len(targetPortsInt) == 0 { if err := c.deleteAllPortRulesIfAny(podIP); err != nil { return err diff --git a/pkg/agent/nodeportlocal/npl_agent_test.go b/pkg/agent/nodeportlocal/npl_agent_test.go index 2010fdaa141..dbdafbbeeb2 100644 --- a/pkg/agent/nodeportlocal/npl_agent_test.go +++ b/pkg/agent/nodeportlocal/npl_agent_test.go @@ -88,10 +88,29 @@ func getTestPod() *corev1.Pod { } func getTestSvc(targetPorts ...int32) *corev1.Service { - var targetPort int32 - targetPort = defaultPort - if len(targetPorts) > 0 { - targetPort = targetPorts[0] + var ports []corev1.ServicePort + if len(targetPorts) == 0 { + port := corev1.ServicePort{ + Port: 80, + Protocol: "TCP", + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: defaultPort, + }, + } + ports = append(ports, port) + } else { + for i := range targetPorts { + port := corev1.ServicePort{ + Port: 80, + Protocol: "TCP", + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: targetPorts[i], + }, + } + ports = append(ports, port) + } } return &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -102,14 +121,7 @@ func getTestSvc(targetPorts ...int32) *corev1.Service { Spec: corev1.ServiceSpec{ Type: corev1.ServiceTypeClusterIP, Selector: map[string]string{defaultAppSelectorKey: defaultAppSelectorVal}, - Ports: []corev1.ServicePort{{ - Port: 80, - Protocol: "TCP", - TargetPort: intstr.IntOrString{ - Type: intstr.Int, - IntVal: targetPort, - }, - }}, + Ports: ports, }, } } @@ -411,9 +423,35 @@ func TestPodDelete(t *testing.T) { assert.NoError(t, err, "Error when polling for port table update") } -// TestPodAddMultiPort creates a Pod with multiple ports and verifies that the Pod's NPL annotation -// and the local port table are updated with only one port used as target port of the Service. -func TestPodAddMultiPort(t *testing.T) { +// TestPodAddMultiPort creates a Pod with multiple ports and a Service with same ports as target ports. +// It is verified that the Pod's NPL annotationthe local port table are updated correctly. +func TestAddMultiPortPodSvc(t *testing.T) { + newPort := 90 + testSvc := getTestSvc(defaultPort, int32(newPort)) + testPod := getTestPod() + testPod.Spec.Containers[0].Ports = append( + testPod.Spec.Containers[0].Ports, + corev1.ContainerPort{ContainerPort: defaultPort}, + ) + testPod.Spec.Containers[0].Ports = append( + testPod.Spec.Containers[0].Ports, + corev1.ContainerPort{ContainerPort: int32(newPort)}, + ) + testData := setUp(t, testSvc, testPod) + defer testData.tearDown() + + value, err := testData.pollForPodAnnotation(testPod.Name, true) + require.NoError(t, err, "Poll for annotation check failed") + nplData := testData.checkAnnotationValue(value, defaultPort, newPort) + assert.NotEqual(t, nplData[0].NodePort, nplData[1].NodePort) + assert.True(t, testData.portTable.RuleExists(defaultPodIP, defaultPort)) + assert.True(t, testData.portTable.RuleExists(defaultPodIP, newPort)) +} + +// TestPodAddMultiPort creates a Pod with multiple ports and a Service with only one target port. +// It is verified that the Pod's NPL annotation and the local port table are updated +// with only one port used as target port of the Service. +func TestAddMultiPortPodSinglePortSvc(t *testing.T) { testSvc := getTestSvc() testPod := getTestPod() newPort1 := defaultPort @@ -458,6 +496,31 @@ func TestPodAddHostPort(t *testing.T) { assert.False(t, testData.portTable.RuleExists(defaultPodIP, defaultPort)) } +// TestPodAddHostPort creates a Pod with multiple host ports having same value but different protocol. +// It is verified that the Pod's NPL annotation is updated with host port, without allocating extra port +// from NPL pool. No NPL rule is required for this case. +func TestPodAddMultiProtocol(t *testing.T) { + testSvc := getTestSvc() + testPod := getTestPod() + hostPort := 4001 + testPod.Spec.Containers[0].Ports = append( + testPod.Spec.Containers[0].Ports, + corev1.ContainerPort{ContainerPort: int32(defaultPort), HostPort: int32(hostPort), Protocol: corev1.ProtocolTCP}, + ) + testPod.Spec.Containers[0].Ports = append( + testPod.Spec.Containers[0].Ports, + corev1.ContainerPort{ContainerPort: int32(defaultPort), HostPort: int32(hostPort), Protocol: corev1.ProtocolUDP}, + ) + testData := setUp(t, testSvc, testPod) + defer testData.tearDown() + + value, err := testData.pollForPodAnnotation(testPod.Name, true) + require.NoError(t, err, "Poll for annotation check failed") + nplData := testData.checkAnnotationValue(value, defaultPort) + assert.Equal(t, nplData[0].NodePort, hostPort) + assert.False(t, testData.portTable.RuleExists(defaultPodIP, defaultPort)) +} + // TestPodAddHostPortWrongProtocol creates a Pod with a host port but with protocol UDP instead of TCP. // In this case, instead of using host port, a new port should be allocated from NPL port pool. func TestPodAddHostPortWrongProtocol(t *testing.T) { @@ -495,6 +558,12 @@ func TestTargetPortWithName(t *testing.T) { _, err := testData.pollForPodAnnotation(testPod.Name, true) require.NoError(t, err, "Poll for annotation check failed") assert.True(t, testData.portTable.RuleExists(testPod.Status.PodIP, defaultPort)) + + testSvc = getTestSvcWithPortName("wrongPort") + testData.updateServiceOrFail(testSvc) + _, err = testData.pollForPodAnnotation(testPod.Name, false) + require.NoError(t, err, "Poll for annotation check failed") + assert.False(t, testData.portTable.RuleExists(testPod.Status.PodIP, defaultPort)) } // TestMultiplePods creates multiple Pods and verifies that NPL annotations for both Pods are