Skip to content

Commit

Permalink
Convert legacy docker tests from bash to golang (#11357)
Browse files Browse the repository at this point in the history
* Convert the following Docker test from Bash to Go
    - basics
    - bootstraptoken
    - cacerts
    - compat -> skew
    - etcd
    - lazypull
    - upgrade

Signed-off-by: Derek Nola <[email protected]>

* Add Docker go tests to GHA
* Prebuild K3s Go Tests
* Strip go test binaries to reduce size
* Handle complex branch options

Signed-off-by: Derek Nola <[email protected]>

* Implement basic golang tests on arm and arm64 pipelines

Signed-off-by: Derek Nola <[email protected]>
dereknola committed Jan 13, 2025

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
1 parent 2e419c7 commit 3c273b0
Showing 13 changed files with 1,430 additions and 1 deletion.
82 changes: 82 additions & 0 deletions .github/workflows/e2e.yaml
Original file line number Diff line number Diff line change
@@ -121,3 +121,85 @@ jobs:
. ./tests/docker/test-helpers
. ./tests/docker/test-run-${{ matrix.dtest }}
echo "Did test-run-${{ matrix.dtest }} pass $?"
build-go-tests:
name: "Build Go Tests"
runs-on: ubuntu-latest
outputs:
branch_name: ${{ steps.branch_step.outputs.BRANCH_NAME }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install Go
uses: ./.github/actions/setup-go
- name: Build Go Tests
run: |
mkdir -p ./dist/artifacts
go test -c -ldflags="-w -s" -o ./dist/artifacts ./tests/docker/...
- name: Upload Go Tests
uses: actions/upload-artifact@v4
with:
name: docker-go-tests
path: ./dist/artifacts/*.test
compression-level: 9
retention-days: 1
# For upgrade and skew tests, we need to know the branch name this run is based off.
# Since this is predetermined, we can run this step before the docker-go job, saving time.
# For PRs we can use the base_ref (ie the target branch of the PR).
# For pushes to k3s-io/k3s, the branch_name is a valid ref, master or release-x.y.
# For pushes to a fork, we need to determine the branch name by finding the parent branch from git show-branch history.
- name: Determine branch name
id: branch_step
run: |
if [ ${{ github.repository }} = "k3s-io/k3s" ]; then
BRANCH_NAME=$(echo ${{ github.base_ref || github.ref_name }})
elif [ -z "${{ github.base_ref }}" ]; then
# We are in a fork, and need some git history to determine the branch name
git fetch origin --depth=100 +refs/heads/*:refs/remotes/origin/*
BRANCH_NAME=$(git show-branch -a 2> /dev/null | grep '\*' | grep -v `git rev-parse --abbrev-ref HEAD` | head -n1 | sed 's/.*\[\(.*\/\)\(.*\)\].*/\2/' | sed 's/[\^~].*//')
else
BRANCH_NAME=${{ github.base_ref }}
fi
echo "Branch Name is $BRANCH_NAME"
echo "BRANCH_NAME=$BRANCH_NAME" >> $GITHUB_OUTPUT
docker-go:
needs: [build, build-go-tests]
name: Docker Tests In GO
runs-on: ubuntu-latest
timeout-minutes: 20
strategy:
fail-fast: false
matrix:
dtest: [basics, bootstraptoken, cacerts, etcd, lazypull, skew, upgrade]
env:
BRANCH_NAME: ${{ needs.build-go-tests.outputs.branch_name }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: "Download K3s image"
uses: actions/download-artifact@v4
with:
name: k3s
path: ./dist/artifacts
- name: Load and set K3s image
run: |
docker image load -i ./dist/artifacts/k3s-image.tar
IMAGE_TAG=$(docker image ls --format '{{.Repository}}:{{.Tag}}' | grep 'rancher/k3s')
echo "K3S_IMAGE=$IMAGE_TAG" >> $GITHUB_ENV
- name: Download Go Tests
uses: actions/download-artifact@v4
with:
name: docker-go-tests
path: ./dist/artifacts
- name: Run ${{ matrix.dtest }} Test
# Put the compied test binary back in the same place as the test source
run: |
chmod +x ./dist/artifacts/${{ matrix.dtest }}.test
mv ./dist/artifacts/${{ matrix.dtest }}.test ./tests/docker/${{ matrix.dtest }}/
cd ./tests/docker/${{ matrix.dtest }}
if [ ${{ matrix.dtest }} = "upgrade" ] || [ ${{ matrix.dtest }} = "skew" ]; then
./${{ matrix.dtest }}.test -k3sImage=$K3S_IMAGE -branch=$BRANCH_NAME
else
./${{ matrix.dtest }}.test -k3sImage=$K3S_IMAGE
fi
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
@@ -146,6 +146,7 @@ require (
go.etcd.io/etcd/server/v3 v3.5.16
go.uber.org/zap v1.27.0
golang.org/x/crypto v0.27.0
golang.org/x/mod v0.20.0
golang.org/x/net v0.29.0
golang.org/x/sync v0.8.0
golang.org/x/sys v0.28.0
@@ -450,7 +451,6 @@ require (
go.uber.org/mock v0.4.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/mod v0.20.0 // indirect
golang.org/x/oauth2 v0.22.0 // indirect
golang.org/x/term v0.24.0 // indirect
golang.org/x/text v0.18.0 // indirect
4 changes: 4 additions & 0 deletions scripts/test
Original file line number Diff line number Diff line change
@@ -23,6 +23,10 @@ docker ps
# Only run basic tests on non amd64 archs, we use GitHub Actions for amd64
if [ "$ARCH" != 'amd64' ]; then

export K3S_IMAGE="rancher/k3s:${VERSION_TAG}${SUFFIX}"
go test ./tests/docker/basics/basics_test.go -k3sImage="$K3S_IMAGE"
echo "Did go test basics $?"

. ./tests/docker/test-run-basics
echo "Did test-run-basics $?"

121 changes: 121 additions & 0 deletions tests/docker/basics/basics_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
package main

import (
"flag"
"fmt"
"os"
"strings"
"testing"

tester "github.com/k3s-io/k3s/tests/docker"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision containers")
var config *tester.TestConfig

func Test_DockerBasic(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
RunSpecs(t, "Basic Docker Test Suite")
}

var _ = Describe("Basic Tests", Ordered, func() {

Context("Setup Cluster", func() {
It("should provision servers and agents", func() {
var err error
config, err = tester.NewTestConfig(*k3sImage)
Expect(err).NotTo(HaveOccurred())
Expect(config.ProvisionServers(1)).To(Succeed())
Expect(config.ProvisionAgents(1)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
Eventually(func() error {
return tester.NodesReady(config.KubeconfigFile)
}, "40s", "5s").Should(Succeed())
})
})

Context("Use Local Storage Volume", func() {
It("should apply local storage volume", func() {
const volumeTestManifest = "../resources/volume-test.yaml"

// Apply the manifest
cmd := fmt.Sprintf("kubectl apply -f %s --kubeconfig=%s", volumeTestManifest, config.KubeconfigFile)
_, err := tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to apply volume test manifest")
})
It("should validate local storage volume", func() {
Eventually(func() (bool, error) {
return tester.PodReady("volume-test", "kube-system", config.KubeconfigFile)
}, "20s", "5s").Should(BeTrue())
})
})

Context("Verify Binaries and Images", func() {
It("has valid bundled binaries", func() {
for _, server := range config.Servers {
Expect(tester.VerifyValidVersion(server.Name, "kubectl")).To(Succeed())
Expect(tester.VerifyValidVersion(server.Name, "ctr")).To(Succeed())
Expect(tester.VerifyValidVersion(server.Name, "crictl")).To(Succeed())
}
})
It("has valid airgap images", func() {
Expect(config).To(Not(BeNil()))
err := VerifyAirgapImages(config)
Expect(err).NotTo(HaveOccurred())
})
})
})

var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})

var _ = AfterSuite(func() {
if config != nil && !failed {
config.Cleanup()
}
})

// VerifyAirgapImages checks for changes in the airgap image list
func VerifyAirgapImages(config *tester.TestConfig) error {
// This file is generated during the build packaging step
const airgapImageList = "../../../scripts/airgap/image-list.txt"

// Use a map to automatically handle duplicates
imageSet := make(map[string]struct{})

// Collect all images from nodes
for _, node := range config.GetNodeNames() {
cmd := fmt.Sprintf("docker exec %s crictl images -o json | jq -r '.images[].repoTags[0] | select(. != null)'", node)
output, err := tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to execute crictl and jq: %v", err)

for _, line := range strings.Split(strings.TrimSpace(string(output)), "\n") {
if line != "" {
imageSet[line] = struct{}{}
}
}
}

// Convert map keys to slice
uniqueImages := make([]string, 0, len(imageSet))
for image := range imageSet {
uniqueImages = append(uniqueImages, image)
}

existing, err := os.ReadFile(airgapImageList)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to read airgap list file: %v", err)
}

// Sorting doesn't matter with ConsistOf
existingImages := strings.Split(strings.TrimSpace(string(existing)), "\n")
Expect(existingImages).To(ConsistOf(uniqueImages))
return nil
}
67 changes: 67 additions & 0 deletions tests/docker/bootstraptoken/bootstraptoken_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
package main

import (
"flag"
"strings"
"testing"

tester "github.com/k3s-io/k3s/tests/docker"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision containers")
var config *tester.TestConfig

func Test_DockerBootstrapToken(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
RunSpecs(t, "BoostrapToken Docker Test Suite")
}

var _ = Describe("Boostrap Token Tests", Ordered, func() {

Context("Setup Cluster", func() {
It("should provision servers", func() {
var err error
config, err = tester.NewTestConfig(*k3sImage)
Expect(err).NotTo(HaveOccurred())
Expect(config.ProvisionServers(1)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
})
})

Context("Add Agent with Bootstrap token", func() {
var newSecret string
It("creates a bootstrap token", func() {
var err error
newSecret, err = tester.RunCmdOnDocker(config.Servers[0].Name, "k3s token create --ttl=5m --description=Test")
Expect(err).NotTo(HaveOccurred())
Expect(newSecret).NotTo(BeEmpty())
})
It("joins the agent with the new tokens", func() {
newSecret = strings.ReplaceAll(newSecret, "\n", "")
config.Secret = newSecret
Expect(config.ProvisionAgents(1)).To(Succeed())
Eventually(func(g Gomega) {
nodes, err := tester.ParseNodes(config.KubeconfigFile)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(nodes).To(HaveLen(2))
g.Expect(tester.NodesReady(config.KubeconfigFile)).To(Succeed())
}, "40s", "5s").Should(Succeed())
})
})
})

var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})

var _ = AfterSuite(func() {
if config != nil && !failed {
config.Cleanup()
}
})
103 changes: 103 additions & 0 deletions tests/docker/cacerts/cacerts_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
package main

import (
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"testing"

tester "github.com/k3s-io/k3s/tests/docker"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision containers")
var config *tester.TestConfig
var testID string

func Test_DockerCACerts(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
RunSpecs(t, "CA Certs Docker Test Suite")
}

var _ = Describe("CA Certs Tests", Ordered, func() {

Context("Setup Cluster", func() {
// TODO determine if the below is still true
// This test runs in docker mounting the docker socket,
// so we can't directly mount files into the test containers. Instead we have to
// run a dummy container with a volume, copy files into that volume, and then
// share it with the other containers that need the file.
It("should configure CA certs", func() {
var err error
config, err = tester.NewTestConfig(*k3sImage)
Expect(err).NotTo(HaveOccurred())
Expect(os.MkdirAll(filepath.Join(config.TestDir, "pause"), 0755)).To(Succeed())

testID = filepath.Base(config.TestDir)
pauseName := fmt.Sprintf("k3s-pause-%s", strings.ToLower(testID))
tlsMount := fmt.Sprintf("--mount type=volume,src=%s,dst=/var/lib/rancher/k3s/server/tls", pauseName)
cmd := fmt.Sprintf("docker run -d --name %s --hostname %s %s rancher/mirrored-pause:3.6",
pauseName, pauseName, tlsMount)
_, err = tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())

dataDir := filepath.Join(config.TestDir, "pause/k3s")
cmd = fmt.Sprintf("DATA_DIR=%s ../../../contrib/util/generate-custom-ca-certs.sh", dataDir)
_, err = tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())

cmd = fmt.Sprintf("docker cp %s %s:/var/lib/rancher", dataDir, pauseName)
_, err = tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())

// Set SERVER_ARGS to include the custom CA certs
os.Setenv("SERVER_DOCKER_ARGS", tlsMount)
})

It("should provision servers and agents", func() {
Expect(config.ProvisionServers(1)).To(Succeed())
Expect(config.ProvisionAgents(1)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
})
})

Context("Verify Custom CA Certs", func() {
It("should have custom CA certs", func() {
// Add your custom CA certs verification logic here
// Example: Check if the custom CA certs are present in the server container
for _, server := range config.Servers {
cmd := fmt.Sprintf("docker exec %s ls /var/lib/rancher/k3s/server/tls", server.Name)
output, err := tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to list custom CA certs: %v", err)
Expect(output).To(ContainSubstring("ca.crt"))
}
})
})
})

var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})

var _ = AfterSuite(func() {
if config != nil && !failed {
config.Cleanup()
cmd := fmt.Sprintf("docker stop k3s-pause-%s", testID)
_, err := tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
cmd = fmt.Sprintf("docker rm k3s-pause-%s", testID)
_, err = tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
cmd = fmt.Sprintf("docker volume ls -q | grep -F %s | xargs -r docker volume rm -f", testID)
_, err = tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
}

})
79 changes: 79 additions & 0 deletions tests/docker/etcd/etcd_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
package main

import (
"flag"
"os"
"testing"

tester "github.com/k3s-io/k3s/tests/docker"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision containers")
var config *tester.TestConfig

func Test_DockerEtcd(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
RunSpecs(t, "Etcd Docker Test Suite")
}

var _ = Describe("Etcd Tests", Ordered, func() {

Context("Test a 3 server cluster", func() {
It("should setup the cluster configuration", func() {
var err error
config, err = tester.NewTestConfig(*k3sImage)
Expect(err).NotTo(HaveOccurred())
})
It("should provision servers", func() {
Expect(config.ProvisionServers(3)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(tester.ParseNodes(config.KubeconfigFile)).To(HaveLen(3))
g.Expect(tester.NodesReady(config.KubeconfigFile)).To(Succeed())
}, "60s", "5s").Should(Succeed())
})
It("should destroy the cluster", func() {
Expect(config.Cleanup()).To(Succeed())
})
})

Context("Test a Split Role cluster with 3 etcd, 2 control-plane, 1 agents", func() {
It("should setup the cluster configuration", func() {
var err error
config, err = tester.NewTestConfig(*k3sImage)
Expect(err).NotTo(HaveOccurred())
Expect(os.Setenv("SERVER_0_ARGS", "--disable-apiserver --disable-controller-manager --disable-scheduler --cluster-init")).To(Succeed())
Expect(os.Setenv("SERVER_1_ARGS", "--disable-apiserver --disable-controller-manager --disable-scheduler")).To(Succeed())
Expect(os.Setenv("SERVER_2_ARGS", "--disable-apiserver --disable-controller-manager --disable-scheduler")).To(Succeed())
Expect(os.Setenv("SERVER_3_ARGS", "--disable-etcd")).To(Succeed())
Expect(os.Setenv("SERVER_4_ARGS", "--disable-etcd")).To(Succeed())
})
It("should provision servers and agents", func() {
Expect(config.ProvisionServers(5)).To(Succeed())
Expect(config.ProvisionAgents(1)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "90s", "5s").Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(tester.ParseNodes(config.KubeconfigFile)).To(HaveLen(6))
g.Expect(tester.NodesReady(config.KubeconfigFile)).To(Succeed())
}, "60s", "5s").Should(Succeed())
})
})
})

var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})

var _ = AfterSuite(func() {
if config != nil && !failed {
config.Cleanup()
}
})
136 changes: 136 additions & 0 deletions tests/docker/lazypull/lazypull_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
package main

import (
"flag"
"fmt"
"os"
"strings"
"testing"

tester "github.com/k3s-io/k3s/tests/docker"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

var k3sImage = flag.String("k3sImage", "", "The k3s image used to provision containers")
var config *tester.TestConfig

func Test_DockerLazyPull(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
RunSpecs(t, "LazyPull Docker Test Suite")
}

var _ = Describe("LazyPull Tests", Ordered, func() {

Context("Setup Cluster", func() {
It("should provision servers", func() {
var err error
config, err = tester.NewTestConfig(*k3sImage)
Expect(err).NotTo(HaveOccurred())

Expect(os.Setenv("SERVER_ARGS", "--snapshotter=stargz")).To(Succeed())
Expect(config.ProvisionServers(1)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
Eventually(func() error {
return tester.NodesReady(config.KubeconfigFile)
}, "40s", "5s").Should(Succeed())
})
})

Context("Use Snapshot Container", func() {
It("should apply local storage volume", func() {
const snapshotTestManifest = "../resources/snapshot-test.yaml"

// Apply the manifest
cmd := fmt.Sprintf("kubectl apply -f %s --kubeconfig=%s", snapshotTestManifest, config.KubeconfigFile)
_, err := tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to apply volume test manifest")
})
It("should have the pod come up", func() {
Eventually(func() (bool, error) {
return tester.PodReady("stargz-snapshot-test", "default", config.KubeconfigFile)
}, "30s", "5s").Should(BeTrue())
})
var topLayer string
It("extracts the topmost layer of the container", func() {
Eventually(func() (string, error) {
var err error
topLayer, err = getTopmostLayer(config.Servers[0].Name, "stargz-snapshot-test")
topLayer = strings.TrimSpace(topLayer)
return topLayer, err
}, "30s", "5s").ShouldNot(BeEmpty())
fmt.Println("Topmost layer: ", topLayer)
})
It("checks all layers are remote snapshots", func() {
Expect(lookLayers(config.Servers[0].Name, topLayer)).To(Succeed())
})
})
})

var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})

var _ = AfterSuite(func() {
if config != nil && !failed {
config.Cleanup()
}
})

func lookLayers(node, layer string) error {
remoteSnapshotLabel := "containerd.io/snapshot/remote"
layersNum := 0
var err error
for layersNum = 0; layersNum < 100; layersNum++ {
// We use RunCommand instead of RunCmdOnDocker because we pipe the output to jq
cmd := fmt.Sprintf("docker exec -i %s ctr --namespace=k8s.io snapshot --snapshotter=stargz info %s | jq -r '.Parent'", node, layer)
layer, err = tester.RunCommand(cmd)
if err != nil {
return fmt.Errorf("failed to get parent layer: %v", err)
}
layer = strings.TrimSpace(layer)
// If the layer is null, we have reached the topmost layer
if layer == "null" {
break
}
cmd = fmt.Sprintf("docker exec -i %s ctr --namespace=k8s.io snapshots --snapshotter=stargz info %s | jq -r '.Labels.\"%s\"'", node, layer, remoteSnapshotLabel)
label, err := tester.RunCommand(cmd)
if err != nil {
return fmt.Errorf("failed to get layer label: %v", err)
}
label = strings.TrimSpace(label)
fmt.Printf("Checking layer %s : %s\n", layer, label)
if label == "null" {
return fmt.Errorf("layer %s isn't remote snapshot", layer)
}
}

if layersNum == 0 {
return fmt.Errorf("cannot get layers")
} else if layersNum >= 100 {
return fmt.Errorf("testing image contains too many layers > 100")
}

return nil
}

func getTopmostLayer(node, container string) (string, error) {
var targetContainer string
cmd := fmt.Sprintf("docker exec -i %s ctr --namespace=k8s.io c ls -q labels.\"io.kubernetes.container.name\"==\"%s\" | sed -n 1p", node, container)
targetContainer, _ = tester.RunCommand(cmd)
targetContainer = strings.TrimSpace(targetContainer)
fmt.Println("targetContainer: ", targetContainer)
if targetContainer == "" {
return "", fmt.Errorf("failed to get target container")
}
cmd = fmt.Sprintf("docker exec -i %s ctr --namespace=k8s.io c info %s | jq -r '.SnapshotKey'", node, targetContainer)
layer, err := tester.RunCommand(cmd)
if err != nil {
return "", fmt.Errorf("failed to get topmost layer: %v", err)
}
return strings.TrimSpace(layer), nil
}
10 changes: 10 additions & 0 deletions tests/docker/resources/snapshot-test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
apiVersion: v1
kind: Pod
metadata:
name: stargz-snapshot-test
spec:
containers:
- name: stargz-snapshot-test
image: "ghcr.io/stargz-containers/k3s-test-ubuntu:20.04-esgz"
command: ["sleep"]
args: ["infinity"]
30 changes: 30 additions & 0 deletions tests/docker/resources/volume-test.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: local-path-pvc
namespace: kube-system
spec:
accessModes:
- ReadWriteOnce
storageClassName: local-path
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: Pod
metadata:
name: volume-test
namespace: kube-system
spec:
containers:
- name: volume-test
image: rancher/mirrored-pause:3.6
imagePullPolicy: IfNotPresent
volumeMounts:
- name: volv
mountPath: /data
volumes:
- name: volv
persistentVolumeClaim:
claimName: local-path-pvc
146 changes: 146 additions & 0 deletions tests/docker/skew/skew_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,146 @@
package main

import (
"flag"
"fmt"
"strings"
"testing"

"github.com/blang/semver/v4"
tester "github.com/k3s-io/k3s/tests/docker"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

// Using these two flags, we upgrade from the latest release of <branch> to
// the current commit build of K3s defined by <k3sImage>
var k3sImage = flag.String("k3sImage", "", "The current commit build of K3s")
var branch = flag.String("branch", "master", "The release branch to test")
var config *tester.TestConfig

var numServers = 1
var numAgents = 1

func Test_DockerSkew(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
RunSpecs(t, "Skew Docker Test Suite")
}

var lastMinorVersion string
var _ = BeforeSuite(func() {
// If this test runs on v1.31 commit, we want the latest v1.30 release
// For master and unreleased branches, we want the latest stable release
var upgradeChannel string
var err error
if *branch == "master" {
upgradeChannel = "stable"
} else {
upgradeChannel = strings.Replace(*branch, "release-", "v", 1)
// now that it is in v1.1 format, we want to substract one from the minor version
// to get the previous release
sV, err := semver.Parse(upgradeChannel)
Expect(err).NotTo(HaveOccurred())
sV.Minor--
upgradeChannel = sV.String()
}

lastMinorVersion, err = tester.GetVersionFromChannel(upgradeChannel)
Expect(err).NotTo(HaveOccurred())
Expect(lastMinorVersion).To(ContainSubstring("v1."))
fmt.Println("Using last minor version: ", lastMinorVersion)
})

var _ = Describe("Skew Tests", Ordered, func() {

Context("Setup Cluster with Server newer than Agent", func() {
It("should provision new servers and old agents", func() {
var err error
config, err = tester.NewTestConfig(*k3sImage)
Expect(err).NotTo(HaveOccurred())
Expect(config.ProvisionServers(numServers)).To(Succeed())
config.K3sImage = "rancher/k3s:" + lastMinorVersion
Expect(config.ProvisionAgents(numAgents)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
})
It("should match respective versions", func() {
for _, server := range config.Servers {
out, err := tester.RunCmdOnDocker(server.Name, "k3s --version")
Expect(err).NotTo(HaveOccurred())
// The k3s image is in the format rancher/k3s:v1.20.0-k3s1
cVersion := strings.Split(*k3sImage, ":")[1]
cVersion = strings.Replace(cVersion, "-amd64", "", 1)
cVersion = strings.Replace(cVersion, "-", "+", 1)
Expect(out).To(ContainSubstring(cVersion))
}
for _, agent := range config.Agents {
Expect(tester.RunCmdOnDocker(agent.Name, "k3s --version")).
To(ContainSubstring(strings.Replace(lastMinorVersion, "-", "+", 1)))
}
})
It("should deploy a test pod", func() {
const volumeTestManifest = "../resources/volume-test.yaml"

// Apply the manifest
cmd := fmt.Sprintf("kubectl apply -f %s --kubeconfig=%s", volumeTestManifest, config.KubeconfigFile)
_, err := tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to apply volume test manifest")

Eventually(func() (bool, error) {
return tester.PodReady("volume-test", "kube-system", config.KubeconfigFile)
}, "20s", "5s").Should(BeTrue())
})
It("should destroy the cluster", func() {
Expect(config.Cleanup()).To(Succeed())
})
})
Context("Test cluster with 1 Server older and 2 Servers newer", func() {
It("should setup the cluster configuration", func() {
var err error
config, err = tester.NewTestConfig("rancher/k3s:" + lastMinorVersion)
Expect(err).NotTo(HaveOccurred())
})
It("should provision servers", func() {
Expect(config.ProvisionServers(1)).To(Succeed())
config.K3sImage = *k3sImage
Expect(config.ProvisionServers(3)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
Eventually(func(g Gomega) {
g.Expect(tester.ParseNodes(config.KubeconfigFile)).To(HaveLen(3))
g.Expect(tester.NodesReady(config.KubeconfigFile)).To(Succeed())
}, "60s", "5s").Should(Succeed())
})
It("should match respective versions", func() {
out, err := tester.RunCmdOnDocker(config.Servers[0].Name, "k3s --version")
Expect(err).NotTo(HaveOccurred())
Expect(out).To(ContainSubstring(strings.Replace(lastMinorVersion, "-", "+", 1)))
for _, server := range config.Servers[1:] {
out, err := tester.RunCmdOnDocker(server.Name, "k3s --version")
Expect(err).NotTo(HaveOccurred())
// The k3s image is in the format rancher/k3s:v1.20.0-k3s1-amd64
cVersion := strings.Split(*k3sImage, ":")[1]
cVersion = strings.Replace(cVersion, "-amd64", "", 1)
cVersion = strings.Replace(cVersion, "-", "+", 1)
Expect(out).To(ContainSubstring(cVersion))
}
})
It("should destroy the cluster", func() {
Expect(config.Cleanup()).To(Succeed())
})
})
})

var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})

var _ = AfterSuite(func() {
if config != nil && !failed {
config.Cleanup()
}
})
491 changes: 491 additions & 0 deletions tests/docker/test-helpers.go

Large diffs are not rendered by default.

160 changes: 160 additions & 0 deletions tests/docker/upgrade/upgrade_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
package main

import (
"flag"
"fmt"
"net/http"
"os"
"path/filepath"
"strings"
"testing"

tester "github.com/k3s-io/k3s/tests/docker"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)

// Using these two flags, we upgrade from the latest release of <branch> to
// the current commit build of K3s defined by <k3sImage>
var k3sImage = flag.String("k3sImage", "", "The current commit build of K3s")
var branch = flag.String("branch", "master", "The release branch to test")
var config *tester.TestConfig

var numServers = 1
var numAgents = 1

func Test_DockerUpgrade(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
RunSpecs(t, "Upgrade Docker Test Suite")
}

var _ = Describe("Upgrade Tests", Ordered, func() {

Context("Setup Cluster with Lastest Release", func() {
var latestVersion string
It("should determine latest branch version", func() {
var upgradeChannel string
var err error
if *branch == "master" {
upgradeChannel = "latest"
} else {
upgradeChannel = strings.Replace(*branch, "release-", "v", 1)
url := fmt.Sprintf("https://update.k3s.io/v1-release/channels/%s", upgradeChannel)
resp, err := http.Head(url)
// Cover the case where the branch does not exist yet,
// such as a new unreleased minor version
if err != nil || resp.StatusCode != http.StatusOK {
upgradeChannel = "latest"
}
}

latestVersion, err = tester.GetVersionFromChannel(upgradeChannel)
Expect(err).NotTo(HaveOccurred())
Expect(latestVersion).To(ContainSubstring("v1."))
fmt.Println("Using latest version: ", latestVersion)
})
It("should setup environment", func() {
var err error
config, err = tester.NewTestConfig("rancher/k3s:" + latestVersion)
testID := filepath.Base(config.TestDir)
Expect(err).NotTo(HaveOccurred())
for i := 0; i < numServers; i++ {
m1 := fmt.Sprintf("--mount type=volume,src=k3s-server-%d-%s-rancher,dst=/var/lib/rancher/k3s", i, testID)
m2 := fmt.Sprintf("--mount type=volume,src=k3s-server-%d-%s-log,dst=/var/log", i, testID)
m3 := fmt.Sprintf("--mount type=volume,src=k3s-server-%d-%s-etc,dst=/etc/rancher", i, testID)
Expect(os.Setenv(fmt.Sprintf("SERVER_%d_DOCKER_ARGS", i), fmt.Sprintf("%s %s %s", m1, m2, m3))).To(Succeed())
}
for i := 0; i < numAgents; i++ {
m1 := fmt.Sprintf("--mount type=volume,src=k3s-agent-%d-%s-rancher,dst=/var/lib/rancher/k3s", i, testID)
m2 := fmt.Sprintf("--mount type=volume,src=k3s-agent-%d-%s-log,dst=/var/log", i, testID)
m3 := fmt.Sprintf("--mount type=volume,src=k3s-agent-%d-%s-etc,dst=/etc/rancher", i, testID)
Expect(os.Setenv(fmt.Sprintf("AGENT_%d_DOCKER_ARGS", i), fmt.Sprintf("%s %s %s", m1, m2, m3))).To(Succeed())
}
})
It("should provision servers and agents", func() {
Expect(config.ProvisionServers(numServers)).To(Succeed())
Expect(config.ProvisionAgents(numAgents)).To(Succeed())
Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
})
It("should confirm latest version", func() {
for _, server := range config.Servers {
out, err := tester.RunCmdOnDocker(server.Name, "k3s --version")
Expect(err).NotTo(HaveOccurred())
Expect(out).To(ContainSubstring(strings.Replace(latestVersion, "-", "+", 1)))
}
})
It("should deploy a test pod", func() {
const volumeTestManifest = "../resources/volume-test.yaml"

// Apply the manifest
cmd := fmt.Sprintf("kubectl apply -f %s --kubeconfig=%s", volumeTestManifest, config.KubeconfigFile)
_, err := tester.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed to apply volume test manifest")

Eventually(func() (bool, error) {
return tester.PodReady("volume-test", "kube-system", config.KubeconfigFile)
}, "20s", "5s").Should(BeTrue())
})
It("should upgrade to current commit build", func() {
By("Remove old servers and agents")
for _, server := range config.Servers {
cmd := fmt.Sprintf("docker stop %s", server.Name)
Expect(tester.RunCommand(cmd)).Error().NotTo(HaveOccurred())
cmd = fmt.Sprintf("docker rm %s", server.Name)
Expect(tester.RunCommand(cmd)).Error().NotTo(HaveOccurred())
fmt.Printf("Stopped %s\n", server.Name)
}
config.Servers = nil

for _, agent := range config.Agents {
cmd := fmt.Sprintf("docker stop %s", agent.Name)
Expect(tester.RunCommand(cmd)).Error().NotTo(HaveOccurred())
cmd = fmt.Sprintf("docker rm %s", agent.Name)
Expect(tester.RunCommand(cmd)).Error().NotTo(HaveOccurred())
}
config.Agents = nil

config.K3sImage = *k3sImage
Expect(config.ProvisionServers(numServers)).To(Succeed())
Expect(config.ProvisionAgents(numAgents)).To(Succeed())

Eventually(func() error {
return tester.DeploymentsReady([]string{"coredns", "local-path-provisioner", "metrics-server", "traefik"}, config.KubeconfigFile)
}, "60s", "5s").Should(Succeed())
})
It("should confirm commit version", func() {
for _, server := range config.Servers {
Expect(tester.VerifyValidVersion(server.Name, "kubectl")).To(Succeed())
Expect(tester.VerifyValidVersion(server.Name, "ctr")).To(Succeed())
Expect(tester.VerifyValidVersion(server.Name, "crictl")).To(Succeed())

out, err := tester.RunCmdOnDocker(server.Name, "k3s --version")
Expect(err).NotTo(HaveOccurred())
cVersion := strings.Split(*k3sImage, ":")[1]
cVersion = strings.Replace(cVersion, "-amd64", "", 1)
cVersion = strings.Replace(cVersion, "-", "+", 1)
Expect(out).To(ContainSubstring(cVersion))
}
})
It("should confirm test pod is still Running", func() {
Eventually(func() (bool, error) {
return tester.PodReady("volume-test", "kube-system", config.KubeconfigFile)
}, "20s", "5s").Should(BeTrue())
})

})
})

var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})

var _ = AfterSuite(func() {
if config != nil && !failed {
config.Cleanup()
}
})

0 comments on commit 3c273b0

Please sign in to comment.