From 6caded77abf749ed0131e21d8d8a4d31e4bd9cd0 Mon Sep 17 00:00:00 2001 From: Gilberto Bertin Date: Thu, 14 Dec 2023 15:21:45 +0100 Subject: [PATCH 1/2] check: add third client scheduled on a different node for some upcoming tests we will need a new client scheduled on a different node than the first 2 clients. This commit adds this new client. Signed-off-by: Gilberto Bertin --- connectivity/check/deployment.go | 50 +++++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/connectivity/check/deployment.go b/connectivity/check/deployment.go index 6ad955f54a..76f0f6e7d3 100644 --- a/connectivity/check/deployment.go +++ b/connectivity/check/deployment.go @@ -34,6 +34,7 @@ const ( clientDeploymentName = "client" client2DeploymentName = "client2" + client3DeploymentName = "client3" clientCPDeployment = "client-cp" DNSTestServerContainerName = "dns-test-server" @@ -854,7 +855,48 @@ func (ct *ConnectivityTest) deploy(ctx context.Context) error { } } - // 3rd client scheduled on the control plane + if ct.params.MultiCluster == "" && !ct.params.SingleNode { + // 3rd client scheduled on a different node than the first 2 clients + _, err = ct.clients.src.GetDeployment(ctx, ct.params.TestNamespace, client3DeploymentName, metav1.GetOptions{}) + if err != nil { + ct.Logf("✨ [%s] Deploying %s deployment...", ct.clients.src.ClusterName(), client3DeploymentName) + clientDeployment := newDeployment(deploymentParameters{ + Name: client3DeploymentName, + Kind: kindClientName, + NamedPort: "http-8080", + Port: 8080, + Image: ct.params.CurlImage, + Command: []string{"/bin/ash", "-c", "sleep 10000000"}, + Labels: map[string]string{"other": "client-other-node"}, + Annotations: ct.params.DeploymentAnnotations.Match(client3DeploymentName), + Affinity: &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + {Key: "name", Operator: metav1.LabelSelectorOpIn, Values: []string{clientDeploymentName}}, + }, + }, + TopologyKey: corev1.LabelHostname, + }, + }, + }, + }, + NodeSelector: ct.params.NodeSelector, + }) + _, err = ct.clients.src.CreateServiceAccount(ctx, ct.params.TestNamespace, k8s.NewServiceAccount(client3DeploymentName), metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("unable to create service account %s: %s", client3DeploymentName, err) + } + _, err = ct.clients.src.CreateDeployment(ctx, ct.params.TestNamespace, clientDeployment, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("unable to create deployment %s: %s", client3DeploymentName, err) + } + } + } + + // 4th client scheduled on the control plane if ct.params.K8sLocalHostTest { ct.Logf("✨ [%s] Deploying %s deployment...", ct.clients.src.ClusterName(), clientCPDeployment) clientDeployment := newDeployment(deploymentParameters{ @@ -885,6 +927,7 @@ func (ct *ConnectivityTest) deploy(ctx context.Context) error { } if !ct.params.SingleNode || ct.params.MultiCluster != "" { + _, err = ct.clients.dst.GetService(ctx, ct.params.TestNamespace, echoOtherNodeDeploymentName, metav1.GetOptions{}) if err != nil { ct.Logf("✨ [%s] Deploying echo-other-node service...", ct.clients.dst.ClusterName()) @@ -1063,6 +1106,9 @@ func (ct *ConnectivityTest) deploy(ctx context.Context) error { func (ct *ConnectivityTest) deploymentList() (srcList []string, dstList []string) { if !ct.params.Perf { srcList = []string{clientDeploymentName, client2DeploymentName, echoSameNodeDeploymentName} + if ct.params.MultiCluster == "" && !ct.params.SingleNode { + srcList = append(srcList, client3DeploymentName) + } } else { perfNm := newPerfDeploymentNameManager(&ct.params) srcList = []string{perfNm.ClientName(), perfNm.ServerName()} @@ -1099,10 +1145,12 @@ func (ct *ConnectivityTest) deleteDeployments(ctx context.Context, client *k8s.C _ = client.DeleteDeployment(ctx, ct.params.TestNamespace, echoOtherNodeDeploymentName, metav1.DeleteOptions{}) _ = client.DeleteDeployment(ctx, ct.params.TestNamespace, clientDeploymentName, metav1.DeleteOptions{}) _ = client.DeleteDeployment(ctx, ct.params.TestNamespace, client2DeploymentName, metav1.DeleteOptions{}) + _ = client.DeleteDeployment(ctx, ct.params.TestNamespace, client3DeploymentName, metav1.DeleteOptions{}) _ = client.DeleteServiceAccount(ctx, ct.params.TestNamespace, echoSameNodeDeploymentName, metav1.DeleteOptions{}) _ = client.DeleteServiceAccount(ctx, ct.params.TestNamespace, echoOtherNodeDeploymentName, metav1.DeleteOptions{}) _ = client.DeleteServiceAccount(ctx, ct.params.TestNamespace, clientDeploymentName, metav1.DeleteOptions{}) _ = client.DeleteServiceAccount(ctx, ct.params.TestNamespace, client2DeploymentName, metav1.DeleteOptions{}) + _ = client.DeleteServiceAccount(ctx, ct.params.TestNamespace, client3DeploymentName, metav1.DeleteOptions{}) _ = client.DeleteService(ctx, ct.params.TestNamespace, echoSameNodeDeploymentName, metav1.DeleteOptions{}) _ = client.DeleteService(ctx, ct.params.TestNamespace, echoOtherNodeDeploymentName, metav1.DeleteOptions{}) _ = client.DeleteConfigMap(ctx, ct.params.TestNamespace, corednsConfigMapName, metav1.DeleteOptions{}) From 99970b5a206156cd2d5e93b5a19554e5a944b22f Mon Sep 17 00:00:00 2001 From: Gilberto Bertin Date: Fri, 15 Dec 2023 11:45:10 +0100 Subject: [PATCH 2/2] github: eks: bump timeout to 30m Signed-off-by: Gilberto Bertin --- .github/workflows/eks-tunnel.yaml | 2 +- .github/workflows/eks.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/eks-tunnel.yaml b/.github/workflows/eks-tunnel.yaml index 66d4ba48db..3c138d6373 100644 --- a/.github/workflows/eks-tunnel.yaml +++ b/.github/workflows/eks-tunnel.yaml @@ -143,7 +143,7 @@ jobs: - name: Wait for job env: - timeout: 20m + timeout: 30m run: | # Background wait for job to complete or timeout kubectl -n kube-system wait job/cilium-cli --for=condition=complete --timeout=${{ env.timeout }} & diff --git a/.github/workflows/eks.yaml b/.github/workflows/eks.yaml index ed33eb852f..0b771c8c65 100644 --- a/.github/workflows/eks.yaml +++ b/.github/workflows/eks.yaml @@ -143,7 +143,7 @@ jobs: - name: Wait for job env: - timeout: 20m + timeout: 30m run: | # Background wait for job to complete or timeout kubectl -n kube-system wait job/cilium-cli --for=condition=complete --timeout=${{ env.timeout }} &