Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Netpol #27

Merged
merged 5 commits into from
May 11, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 30 additions & 10 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,14 +1,34 @@
## Version (0.0.0)
### Releshed/Unreleased
### Date
# Changelog

Changes:
- List of changes
All notable changes to this project will be documented in this file.

Improvements:
- List of improvements
## Table of Contents

Bug Fixes:
- NA
- [2.0.2](#202)
- [2.0.1](#201)
- [2.0.0](#200)
- [0.1.0](#010)

---
---

## `2.0.2`

- integrate e2e tests with network policies
- fix a bug in udp testing

## `2.0.1`

- fix release naming

## `2.0.0`

- complete rewrite of the tool in Go, with unit and integration tests
- leverages the ephemeral container support in Kubernetes > v1.25
- test case(s) are written in YAML
- support for Pods, StatefulSets, DaemonSets and Deployments which are directly referred through their names in the test suites
- artifacts are available for download

## `0.1.0`

- initial release
- no artifacts available
2 changes: 1 addition & 1 deletion e2e/clusters/gke-dataplanev2/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ provider "google" {

resource "google_container_cluster" "e2etest" {
name = var.cluster_name
initial_node_count = 3
initial_node_count = 4
datapath_provider = var.use_dataplanev2 ? "ADVANCED_DATAPATH" : null
ip_allocation_policy {}
node_config {
Expand Down
13 changes: 10 additions & 3 deletions e2e/clusters/gke-vpc/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,18 @@ provider "google" {

resource "google_container_cluster" "e2etest" {
name = var.cluster_name
initial_node_count = 3
datapath_provider = var.use_dataplanev2 ? "ADVANCED_DATAPATH" : null
initial_node_count = 4
addons_config {
network_policy_config {
disabled = false
}
}
network_policy {
enabled = true
}
ip_allocation_policy {}
node_config {
machine_type = "e2-medium"
machine_type = "e2-standard-2"
}

release_channel {
Expand Down
27 changes: 26 additions & 1 deletion e2e/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -177,8 +177,33 @@ func createTestDestroy(t *testing.T, gc helpers.GenericCluster) {
if err != nil {
t.Fatal(err)
}
// run the tests

// run the tests without network policies
runTests(ctx, t, svc, netAssertTestCases)

if gc.SkipNetPolTests() {
return
}

// create the network policies
k8s.KubectlApply(t, options, "./manifests/networkpolicies.yaml")

// read the tests again for a fresh start
netAssertTestCases, err = data.ReadTestsFromFile(testCasesFile)
if err != nil {
t.Fatal(err)
}

// set the exit to 1 since this time the network policies will block the traffic
for _, tc := range netAssertTestCases {
tc.ExitCode = 1
}

// run the tests with network policies
runTests(ctx, t, svc, netAssertTestCases)
}

func runTests(ctx context.Context, t *testing.T, svc *kubeops.Service, netAssertTestCases data.Tests) {
lg := logger.NewHCLogger("INFO", "netassertv2-e2e", os.Stdout)
testRunner := engine.New(svc, lg)

Expand Down
1 change: 1 addition & 0 deletions e2e/helpers/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,5 @@ type GenericCluster interface {
Create(t *testing.T)
Destroy(t *testing.T)
KubeConfigGet() string
SkipNetPolTests() bool
}
8 changes: 8 additions & 0 deletions e2e/helpers/eks.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"os"
"testing"
"time"

"github.com/controlplaneio/netassert/v2/internal/kubeops"
"github.com/controlplaneio/netassert/v2/internal/logger"
Expand Down Expand Up @@ -110,6 +111,9 @@ func (g *EKSCluster) installCalico(t *testing.T) {
if _, err := terraform.InitAndApplyE(t, newTFOptions); err != nil {
t.Fatalf("failed to run terraform init and apply: %s", err)
}

svc.Log.Info("Sleeping 20 minutes so connectivity from the cluster to the Internet is restored")
time.Sleep(20 * time.Minute)
}

func (g *EKSCluster) Destroy(t *testing.T) {
Expand All @@ -121,3 +125,7 @@ func (g *EKSCluster) Destroy(t *testing.T) {
func (g *EKSCluster) KubeConfigGet() string {
return g.kubeConfigPath
}

func (g *EKSCluster) SkipNetPolTests() bool {
return g.networkMode != Calico
}
4 changes: 4 additions & 0 deletions e2e/helpers/gke.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,3 +60,7 @@ func (g *GKECluster) Destroy(t *testing.T) {
func (g *GKECluster) KubeConfigGet() string {
return g.kubeConfigPath
}

func (g *GKECluster) SkipNetPolTests() bool {
return false // network policies are supported by all gke cluster configurations
}
70 changes: 70 additions & 0 deletions e2e/manifests/networkpolicies.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
namespace: fluentd
name: fluentd-elasticsearch
spec:
podSelector:
matchLabels:
name: fluentd-elasticsearch
ingress:
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
namespace: echoserver
name: echoserver
spec:
podSelector:
matchLabels:
app: echoserver
ingress:
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
namespace: busybox
name: busybox
spec:
podSelector:
matchLabels:
app: busybox
policyTypes:
- Ingress
- Egress
ingress:
egress:
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
namespace: pod1
name: pod1
spec:
podSelector:
matchLabels:
name: pod1
ingress:
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
namespace: pod2
name: pod2
spec:
podSelector:
matchLabels:
name: pod2
ingress:
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
namespace: web
name: nginx
spec:
podSelector:
matchLabels:
app: nginx
ingress:
4 changes: 4 additions & 0 deletions e2e/manifests/pod1-pod2.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ kind: Pod
metadata:
name: pod2
namespace: pod2
labels:
name: pod2
spec:
containers:
- name: webserver
Expand All @@ -27,6 +29,8 @@ kind: Pod
metadata:
name: pod1
namespace: pod1
labels:
name: pod1
spec:
containers:
- name: busybox
Expand Down
14 changes: 8 additions & 6 deletions internal/engine/run_udp.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,10 @@ import (
)

const (
defaultNetInt = `eth0` // default network interface
defaultSnapLen = 1024 // default size of the packet snap length
defaultNetInt = `eth0` // default network interface
defaultSnapLen = 1024 // default size of the packet snap length
ephemeralContainersExtraSeconds = 23 // fixed extra time given for the ephemeral containers to come online
attemptsMultiplier = 3 // increase the attempts to ensure that we send three times the packets
)

// RunUDPTest - runs a UDP test
Expand Down Expand Up @@ -114,7 +116,7 @@ func (e *Engine) RunUDPTest(
string(te.Protocol),
te.Attempts,
networkInterface,
te.TimeoutSeconds+5, // add 5 seconds for the Container to come online
te.TimeoutSeconds,
)
if err != nil {
return fmt.Errorf("failed to build sniffer ephemeral container for test %s: %w", te.Name, err)
Expand All @@ -128,7 +130,7 @@ func (e *Engine) RunUDPTest(
strconv.Itoa(te.TargetPort),
string(te.Protocol),
msg,
te.Attempts*3, // increase the attempts to ensure that we send three times the packets
te.Attempts*attemptsMultiplier,
)
if err != nil {
return fmt.Errorf("unable to build ephemeral scanner container for test %s: %w", te.Name, err)
Expand All @@ -152,7 +154,7 @@ func (e *Engine) RunUDPTest(
exitCodeSnifferCtr, err := e.Service.GetExitStatusOfEphemeralContainer(
ctx,
snifferContainerName,
time.Duration(te.TimeoutSeconds)*time.Second,
time.Duration(te.TimeoutSeconds+ephemeralContainersExtraSeconds)*time.Second,
dstPod.Name,
dstPod.Namespace,
)
Expand All @@ -174,7 +176,7 @@ func (e *Engine) RunUDPTest(
// get the exit status of the scanner container
exitCodeScanner, err := e.Service.GetExitStatusOfEphemeralContainer(
ctx, scannerContainerName,
time.Duration(te.TimeoutSeconds+10)*time.Second,
time.Duration(te.TimeoutSeconds+ephemeralContainersExtraSeconds)*time.Second,
srcPod.Name,
srcPod.Namespace,
)
Expand Down