diff --git a/.dockerignore b/.dockerignore index dfd10064..c8a818f2 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,4 +2,3 @@ Cargo.toml Cargo.lock rust-toolchain validator/ -target/ diff --git a/.github/workflows/cni-plugin-integration.yml b/.github/workflows/integration-cni-plugin.yml similarity index 77% rename from .github/workflows/cni-plugin-integration.yml rename to .github/workflows/integration-cni-plugin.yml index b0e6e6a2..87200306 100644 --- a/.github/workflows/cni-plugin-integration.yml +++ b/.github/workflows/integration-cni-plugin.yml @@ -1,13 +1,13 @@ -name: cni-plugin-integration +name: integration-cni-plugin on: workflow_dispatch: pull_request: paths: + - .github/workflows/integration-cni-plugin.yml - Dockerfile-cni-plugin - - cni-plugin/integration/flannel/Dockerfile-tester - - cni-plugin/integration/run.sh - cni-plugin/** + - reinitialize-pods/** jobs: cni-flannel-test: @@ -46,3 +46,11 @@ jobs: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - name: Run CNI ordering tests run: just cni-plugin-test-ordering + reinitialize-pods: + timeout-minutes: 15 + runs-on: ubuntu-latest + steps: + - uses: linkerd/dev/actions/setup-tools@v42 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac + - name: Run reinitialize-pods tests + run: just reinitialize-pods-integration diff --git a/.github/workflows/release-reinitialize-pods.yml b/.github/workflows/release-reinitialize-pods.yml index 321b8aff..d633fb89 100644 --- a/.github/workflows/release-reinitialize-pods.yml +++ b/.github/workflows/release-reinitialize-pods.yml @@ -37,7 +37,9 @@ jobs: container: ghcr.io/linkerd/dev:v42-rust-musl steps: - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac - - run: just reinitialize-pods arch=${{ matrix.arch }} profile=release version=${{ needs.meta.outputs.version }} package + - run: just --justfile=justfile-rust arch=${{ matrix.arch }} profile=release version=${{ needs.meta.outputs.version }} package + env: + TARGETCRATE: reinitialize-pods - uses: actions/upload-artifact@v3 with: name: ${{ matrix.arch }}-artifacts diff --git a/Dockerfile-cni-plugin b/Dockerfile-cni-plugin index b35623a7..18e2c0d5 100644 --- a/Dockerfile-cni-plugin +++ b/Dockerfile-cni-plugin @@ -16,12 +16,17 @@ FROM --platform=$BUILDPLATFORM curlimages/curl:7.86.0 as fetch WORKDIR /build ARG TARGETARCH ARG LINKERD_REINITIALIZE_PODS_VERSION=v0.1.0 -# TODO: replace org with linkerd, once we make a first release -RUN curl -LO https://github.com/alpeb/linkerd2-proxy-init/releases/download/reinitialize-pods%2F${LINKERD_REINITIALIZE_PODS_VERSION}/linkerd-reinitialize-pods-${LINKERD_REINITIALIZE_PODS_VERSION}-${TARGETARCH}.tgz +RUN curl -LO https://github.com/linkerd/linkerd2-proxy-init/releases/download/reinitialize-pods%2F${LINKERD_REINITIALIZE_PODS_VERSION}/linkerd-reinitialize-pods-${LINKERD_REINITIALIZE_PODS_VERSION}-${TARGETARCH}.tgz RUN tar -zxvf linkerd-reinitialize-pods-${LINKERD_REINITIALIZE_PODS_VERSION}-${TARGETARCH}.tgz && \ mv linkerd-reinitialize-pods-${LINKERD_REINITIALIZE_PODS_VERSION}-${TARGETARCH}/linkerd-reinitialize-pods . -FROM --platform=$TARGETPLATFORM alpine:3.18.5 as runtime +FROM --platform=$BUILDPLATFORM golang:1.21-alpine as copy-test +WORKDIR /build +COPY ./target/package/linkerd-reinitialize-pods-test-amd64.tgz . +RUN tar -zxvf linkerd-reinitialize-pods-test-amd64.tgz && \ + mv ./linkerd-reinitialize-pods-test-amd64/linkerd-reinitialize-pods . + +FROM --platform=$TARGETPLATFORM alpine:3.18.5 as runtime-base WORKDIR /linkerd RUN apk add \ # For inotifywait @@ -32,10 +37,15 @@ RUN apk add \ jq COPY --from=go /go/bin/linkerd-cni /opt/cni/bin/ -COPY --from=fetch /build/linkerd-reinitialize-pods /usr/lib/linkerd/ COPY LICENSE . COPY cni-plugin/deployment/scripts/install-cni.sh . COPY cni-plugin/deployment/linkerd-cni.conf.default . COPY cni-plugin/deployment/scripts/filter.jq . ENV PATH=/linkerd:/opt/cni/bin:$PATH CMD ["install-cni.sh"] + +FROM --platform=$TARGETPLATFORM runtime-base as runtime-test +COPY --from=copy-test /build/linkerd-reinitialize-pods /usr/lib/linkerd/ + +FROM --platform=$TARGETPLATFORM runtime-base as runtime +COPY --from=fetch /build/linkerd-reinitialize-pods /usr/lib/linkerd/ diff --git a/cni-plugin/integration/manifests/calico/k3s-images.json b/cni-plugin/integration/manifests/calico/k3s-images.json new file mode 100644 index 00000000..4288dd9b --- /dev/null +++ b/cni-plugin/integration/manifests/calico/k3s-images.json @@ -0,0 +1,11 @@ +{ + "name": "docker.io/rancher/k3s", + "channels": { + "stable": "v1.27.6-k3s1", + "latest": "v1.27.6-k3s1", + "v1.27": "v1.27.6-k3s1" + }, + "digests": { + "v1.27.6-k3s1": "sha256:9486bbb9ca9b81c098ecd07f1c45441e143dab12577e22cf062586edcfd9d952" + } +} diff --git a/justfile b/justfile index 66558e43..b72c5282 100644 --- a/justfile +++ b/justfile @@ -17,7 +17,7 @@ lint: sh-lint md-lint rs-clippy action-lint action-dev-check go-lint *flags: (proxy-init-lint flags) (cni-plugin-lint flags) -test: rs-test proxy-init-test-unit proxy-init-test-integration +test: rs-test proxy-init-test-unit proxy-init-test-integration reinitialize-pods-integration # Check whether the Go code is formatted. go-fmt-check: @@ -78,9 +78,18 @@ validator *args: ## reinitialize-pods ## -reinitialize-pods *args: +reinitialize-pods version *args: TARGETCRATE=linkerd-reinitialize-pods \ - {{ just_executable() }} --justfile=justfile-rust {{ args }} + {{ just_executable() }} --justfile=justfile-rust version={{version}} {{ args }} + +# The K3S_IMAGES_JSON file used instructs the creation of a cluster on version +# v1.27.6-k3s1, because after that Calico won't work. +# See https://github.com/k3d-io/k3d/issues/1375 +reinitialize-pods-integration $K3S_IMAGES_JSON='./cni-plugin/integration/manifests/calico/k3s-images.json': (reinitialize-pods "test" "package") (build-cni-plugin-image "--target=runtime-test" "--load") + @{{ just_executable() }} K3D_CREATE_FLAGS='{{ _K3D_CREATE_FLAGS_NO_CNI }}' _k3d-cni-create + @just-k3d use + @just-k3d import {{ cni-plugin-image }} + ./reinitialize-pods/integration/run.sh {{ cni-plugin-image }} ## ## cni-plugin diff --git a/reinitialize-pods/integration/linkerd-cni-config.yml b/reinitialize-pods/integration/linkerd-cni-config.yml new file mode 100644 index 00000000..8f20ea58 --- /dev/null +++ b/reinitialize-pods/integration/linkerd-cni-config.yml @@ -0,0 +1,4 @@ +extraInitContainers: +- name: sleep + image: busybox + command: ["/bin/sh", "-c", "sleep 15"] diff --git a/reinitialize-pods/integration/pause-ds.yml b/reinitialize-pods/integration/pause-ds.yml new file mode 100644 index 00000000..1b8e5d7c --- /dev/null +++ b/reinitialize-pods/integration/pause-ds.yml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: pause +spec: + selector: + matchLabels: + app: pause-app + template: + metadata: + annotations: + linkerd.io/inject: enabled + labels: + app: pause-app + spec: + priorityClassName: system-node-critical + containers: + - name: pause-container + image: k8s.gcr.io/pause diff --git a/reinitialize-pods/integration/run.sh b/reinitialize-pods/integration/run.sh new file mode 100755 index 00000000..05e19071 --- /dev/null +++ b/reinitialize-pods/integration/run.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# shellcheck disable=SC2086 +function step() { + repeat=$(seq 1 ${#1}) + printf "%0.s#" $repeat + printf "#####\n# %s...\n" "$1" + printf "%0.s#" $repeat + printf "#####\n" +} + +if [[ ! "$1" =~ (.*):(.*) ]]; then + echo 'Usage: run.sh name:tag' + exit 1 +fi +cni_plugin_image=${BASH_REMATCH[1]} +cni_image_version=${BASH_REMATCH[2]} + +cd "${BASH_SOURCE[0]%/*}" + +step 'Installing Calico' +kubectl apply -f https://k3d.io/v5.1.0/usage/advanced/calico.yaml +kubectl --namespace=kube-system wait --for=condition=available --timeout=120s \ + deploy/calico-kube-controllers + +step 'Installing latest linkerd edge' +scurl https://run.linkerd.io/install-edge | sh +export PATH=$PATH:$HOME/.linkerd2/bin +linkerd install --crds | kubectl apply -f - +# The linkerd-cni-config.yml config adds an extra initContainer that will make +# linkerd-cni to delay its start for 15s, so to allow time for the pause +# DaemonSet to start before the full CNI config is ready and enter a failure +# mode +linkerd install-cni \ + --use-wait-flag \ + --cni-image "$cni_plugin_image" \ + --cni-image-version "$cni_image_version" \ + --set reinitializePods.image.name="$cni_plugin_image" \ + --set reinitializePods.image.version="$cni_image_version" \ + -f linkerd-cni-config.yml \ + | kubectl apply -f - +linkerd check --pre --linkerd-cni-enabled +linkerd install --linkerd-cni-enabled | kubectl apply -f - +linkerd check + +step 'Installing pause DaemonSet' +kubectl apply -f pause-ds.yml +kubectl wait --for=condition=ready --timeout=120s -l app=pause-app po + +step 'Adding a node' +cluster=$(just-k3d --evaluate K3D_CLUSTER_NAME) +image=$(just --evaluate cni-plugin-image) +k3d node create node2 --cluster "$cluster" +k3d image import --cluster "$cluster" "$image" + +step 'Checking new DS replica fails with code 95' +sleep 10 +kubectl wait \ + --for=jsonpath='{.status.initContainerStatuses[0].lastState.terminated.exitCode}'=95 \ + --field-selector=spec.nodeName=k3d-node2-0 \ + pod + +step 'Checking new DS replica gets replaced' +for _ in {1..5}; do + if kubectl wait --for=condition=ready --timeout=10s -l app=pause-app po; then + break + fi +done +kubectl wait --for=condition=ready --timeout=10s -l app=pause-app po;