From eedc4ebce158be35953516838435e72de8f6f506 Mon Sep 17 00:00:00 2001 From: Andrei Kvapil Date: Wed, 12 Jun 2024 19:19:14 +0200 Subject: [PATCH] Add e2e tests Signed-off-by: Andrei Kvapil --- hack/e2e.sh | 305 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 305 insertions(+) create mode 100755 hack/e2e.sh diff --git a/hack/e2e.sh b/hack/e2e.sh new file mode 100755 index 00000000..90c8314d --- /dev/null +++ b/hack/e2e.sh @@ -0,0 +1,305 @@ +#!/bin/bash +if [ "$COZYSTACK_INSTALLER_YAML" = "" ]; then + echo 'COZYSTACK_INSTALLER_YAML variable is not set!' >&2 + echo 'please set it with following command:' >&2 + echo >&2 + echo 'export COZYSTACK_INSTALLER_YAML=$(helm template -n cozy-system installer packages/core/installer)' >&2 + echo >&2 + exit 1 +fi + +set -x +set -e + +kill `cat srv1/qemu.pid srv2/qemu.pid srv3/qemu.pid` || true + +ip link del cozy-br0 || true +ip link add cozy-br0 type bridge +ip link set cozy-br0 up +ip addr add 192.168.123.1/24 dev cozy-br0 + +rm -rf srv1 srv2 srv3 +mkdir -p srv1 srv2 srv3 + +# Prepare cloud-init +for i in 1 2 3; do + echo "local-hostname: srv$i" > "srv$i/meta-data" + echo '#cloud-config' > "srv$i/user-data" + cat > "srv$i/network-config" < patch.yaml <<\EOT +machine: + kubelet: + nodeIP: + validSubnets: + - 192.168.123.0/24 + extraConfig: + maxPods: 512 + kernel: + modules: + - name: openvswitch + - name: drbd + parameters: + - usermode_helper=disabled + - name: zfs + - name: spl + install: + image: ghcr.io/aenix-io/cozystack/talos:v1.7.1 + files: + - content: | + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + device_ownership_from_security_context = true + path: /etc/cri/conf.d/20-customization.part + op: create + +cluster: + network: + cni: + name: none + dnsDomain: cozy.local + podSubnets: + - 10.244.0.0/16 + serviceSubnets: + - 10.96.0.0/16 +EOT + +cat > patch-controlplane.yaml <<\EOT +machine: + network: + interfaces: + - interface: eth0 + vip: + ip: 192.168.123.10 +cluster: + allowSchedulingOnControlPlanes: true + controllerManager: + extraArgs: + bind-address: 0.0.0.0 + scheduler: + extraArgs: + bind-address: 0.0.0.0 + apiServer: + certSANs: + - 127.0.0.1 + proxy: + disabled: true + discovery: + enabled: false + etcd: + advertisedSubnets: + - 192.168.123.0/24 +EOT + +# Gen configuration +if [ ! -f secrets.yaml ]; then + talosctl gen secrets +fi + +rm -f controlplane.yaml worker.yaml talosconfig kubeconfig +talosctl gen config --with-secrets secrets.yaml cozystack https://192.168.123.10:6443 --config-patch=@patch.yaml --config-patch-control-plane @patch-controlplane.yaml +export TALOSCONFIG=$PWD/talosconfig + +# Apply configuration +talosctl apply -f controlplane.yaml -n 192.168.123.11 -e 192.168.123.11 -i +talosctl apply -f controlplane.yaml -n 192.168.123.12 -e 192.168.123.12 -i +talosctl apply -f controlplane.yaml -n 192.168.123.13 -e 192.168.123.13 -i + +# Wait for VM to be configured +timeout 60 sh -c 'until nc -nzv 192.168.123.11 50000 && nc -nzv 192.168.123.12 50000 && nc -nzv 192.168.123.13 50000; do sleep 1; done' + +# Bootstrap +talosctl bootstrap -n 192.168.123.11 -e 192.168.123.11 + +# Wait for etcd +timeout 120 sh -c 'while talosctl etcd members -n 192.168.123.11,192.168.123.12,192.168.123.13 -e 192.168.123.10 2>&1 | grep "rpc error"; do sleep 1; done' + +rm -f kubeconfig +talosctl kubeconfig kubeconfig -e 192.168.123.10 -n 192.168.123.10 +export KUBECONFIG=$PWD/kubeconfig + +# Wait for kubernetes nodes appear +timeout 60 sh -c 'until [ $(kubectl get node -o name | wc -l) = 3 ]; do sleep 1; done' +kubectl create ns cozy-system +kubectl create -f - <<\EOT +apiVersion: v1 +kind: ConfigMap +metadata: + name: cozystack + namespace: cozy-system +data: + bundle-name: "paas-full" + ipv4-pod-cidr: "10.244.0.0/16" + ipv4-pod-gateway: "10.244.0.1" + ipv4-svc-cidr: "10.96.0.0/16" + ipv4-join-cidr: "100.64.0.0/16" +EOT + +# +echo "$COZYSTACK_INSTALLER_YAML" | kubectl apply -f - + +# wait for cozystack pod to start +kubectl wait deploy --timeout=1m --for=condition=available -n cozy-system cozystack + +# wait for helmreleases appear +timeout 60 sh -c 'until kubectl get hr -A | grep cozy; do sleep 1; done' + +sleep 5 + +kubectl get hr -A | awk 'NR>1 {print "kubectl wait --timeout=15m --for=condition=ready -n " $1 " hr/" $2 " &"} END{print "wait"}' | sh -x +# Wait for linstor controller +kubectl wait deploy --timeout=5m --for=condition=available -n cozy-linstor linstor-controller + +# Wait for all linstor nodes become Online +timeout 60 sh -c 'until [ $(kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor node list | grep -c Online) = 3 ]; do sleep 1; done' + +kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv1 /dev/vdc --pool-name data --storage-pool data +kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv2 /dev/vdc --pool-name data --storage-pool data +kubectl exec -n cozy-linstor deploy/linstor-controller -- linstor ps cdp zfs srv3 /dev/vdc --pool-name data --storage-pool data + +kubectl create -f- <