Skip to content

Cilium IPsec upgrade (ci-ipsec-upgrade) #198

Cilium IPsec upgrade (ci-ipsec-upgrade)

Cilium IPsec upgrade (ci-ipsec-upgrade) #198

name: Cilium IPsec upgrade (ci-ipsec-upgrade)
# Any change in triggers needs to be reflected in the concurrency group.
on:
workflow_dispatch:
inputs:
PR-number:
description: "Pull request number."
required: true
context-ref:
description: "Context in which the workflow runs. If PR is from a fork, will be the PR target branch (general case). If PR is NOT from a fork, will be the PR branch itself (this allows committers to test changes to workflows directly from PRs)."
required: true
SHA:
description: "SHA under test (head of the PR branch)."
required: true
extra-args:
description: "[JSON object] Arbitrary arguments passed from the trigger comment via regex capture group. Parse with 'fromJson(inputs.extra-args).argName' in workflow."
required: false
default: '{}'
# Run every 6 hours
schedule:
- cron: '0 5/6 * * *'
# By specifying the access of one of the scopes, all of those that are not
# specified are set to 'none'.
permissions:
# To be able to access the repository with actions/checkout
contents: read
# To allow retrieving information from the PR API
pull-requests: read
# To be able to set commit status
statuses: write
concurrency:
# Structure:
# - Workflow name
# - Event type
# - A unique identifier depending on event type:
# - schedule: SHA
# - workflow_dispatch: PR number
#
# This structure ensures a unique concurrency group name is generated for each
# type of testing, such that re-runs will cancel the previous run.
group: |
${{ github.workflow }}
${{ github.event_name }}
${{
(github.event_name == 'schedule' && github.sha) ||
(github.event_name == 'workflow_dispatch' && github.event.inputs.PR-number)
}}
cancel-in-progress: true
env:
cilium_cli_ci_version:
check_url: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}
# renovate: datasource=docker depName=quay.io/cilium/kindest-node
k8s_version: v1.29.0-rc.1
jobs:
commit-status-start:
name: Commit Status Start
runs-on: ubuntu-latest
steps:
- name: Set initial commit status
uses: myrotvorets/set-commit-status-action@38f3f27c7d52fb381273e95542f07f0fba301307 # v2.0.0
with:
sha: ${{ inputs.SHA || github.sha }}
setup-and-test:
runs-on: ubuntu-latest-4cores-16gb
name: 'Setup & Test'
env:
job_name: 'Setup & Test'
strategy:
fail-fast: false
max-parallel: 16
matrix:
config: ['5.4', '5.10', '6.1', 'bpf-next']
mode: ['minor', 'patch']
include:
# Define three config sets
- config: '5.4'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: '5.4-20231128.100518'
kube-proxy: 'iptables'
kpr: 'disabled'
tunnel: 'disabled'
encryption: 'ipsec'
- config: '5.10'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: '5.10-20231128.100518'
kube-proxy: 'iptables'
kpr: 'disabled'
tunnel: 'disabled'
encryption: 'ipsec'
endpoint-routes: 'true'
- config: '6.1'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: '6.1-20231128.100518'
kube-proxy: 'iptables'
kpr: 'disabled'
tunnel: 'vxlan'
encryption: 'ipsec'
endpoint-routes: 'false'
- config: 'bpf-next'
# renovate: datasource=docker depName=quay.io/lvh-images/kind
kernel: 'bpf-next-20231211.012942'
kube-proxy: 'iptables'
kpr: 'disabled'
tunnel: 'vxlan'
encryption: 'ipsec'
endpoint-routes: 'true'
# Add names to matrix combinations of {config, mode}
- config: '5.4'
mode: 'minor'
name: '1'
- config: '5.10'
mode: 'minor'
name: '2'
- config: '6.1'
mode: 'minor'
name: '3'
- config: 'bpf-next'
mode: 'minor'
name: '4'
- config: '5.4'
mode: 'patch'
name: '5'
- config: '5.10'
mode: 'patch'
name: '6'
- config: '6.1'
mode: 'patch'
name: '7'
- config: 'bpf-next'
mode: 'patch'
name: '8'
timeout-minutes: 60
steps:
- name: Checkout context ref (trusted)
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
ref: ${{ inputs.context-ref || github.sha }}
persist-credentials: false
- name: Set Environment Variables
uses: ./.github/actions/set-env-variables
- name: Set up job variables
id: vars
run: |
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
SHA="${{ inputs.SHA }}"
else
SHA="${{ github.sha }}"
fi
echo sha=${SHA} >> $GITHUB_OUTPUT
if [ "${{ matrix.mode }}" = "minor" ]; then
CILIUM_DOWNGRADE_VERSION=$(contrib/scripts/print-downgrade-version.sh)
IMAGE_TAG=${CILIUM_DOWNGRADE_VERSION}
else
# Upgrade from / downgrade to patch release.
# In some cases we expect to fail to get the version number, do not
# fail the workflow in such case. This includes:
# - on main branch where we don't have preceeding patch releases
# - on stable branches on top of release preparation commits, where
# we bump the patch version number to 90 and we can't easily
# derive the number of the previous patch release version from
# that.
CILIUM_DOWNGRADE_VERSION=$(contrib/scripts/print-downgrade-version.sh patch || true)
# Pass an empty tag to the cilium-config action to fall back to the
# default release image, without crafting an image path with the
# "-ci" suffix
IMAGE_TAG=''
fi
echo downgrade_version=${CILIUM_DOWNGRADE_VERSION} >> $GITHUB_OUTPUT
echo image_tag=${IMAGE_TAG} >> $GITHUB_OUTPUT
- name: Derive stable Cilium installation config
id: cilium-stable-config
if: ${{ steps.vars.outputs.downgrade_version != '' }}
uses: ./.github/actions/cilium-config
with:
image-tag: ${{ steps.vars.outputs.image_tag }}
chart-dir: './cilium-${{ steps.vars.outputs.downgrade_version }}/install/kubernetes/cilium/'
tunnel: ${{ matrix.tunnel }}
endpoint-routes: ${{ matrix.endpoint-routes }}
ipv6: ${{ matrix.ipv6 }}
kpr: ${{ matrix.kpr }}
lb-mode: ${{ matrix.lb-mode }}
lb-acceleration: ${{ matrix.lb-acceleration }}
encryption: ${{ matrix.encryption }}
encryption-node: ${{ matrix.encryption-node }}
egress-gateway: ${{ matrix.egress-gateway }}
host-fw: ${{ matrix.host-fw }}
mutual-auth: false
misc: 'bpfClockProbe=false,cni.uninstall=false'
- name: Derive newest Cilium installation config
id: cilium-newest-config
if: ${{ steps.vars.outputs.downgrade_version != '' }}
uses: ./.github/actions/cilium-config
with:
image-tag: ${{ steps.vars.outputs.sha }}
chart-dir: './install/kubernetes/cilium'
tunnel: ${{ matrix.tunnel }}
endpoint-routes: ${{ matrix.endpoint-routes }}
ipv6: ${{ matrix.ipv6 }}
kpr: ${{ matrix.kpr }}
lb-mode: ${{ matrix.lb-mode }}
lb-acceleration: ${{ matrix.lb-acceleration }}
encryption: ${{ matrix.encryption }}
encryption-node: ${{ matrix.encryption-node }}
egress-gateway: ${{ matrix.egress-gateway }}
host-fw: ${{ matrix.host-fw }}
mutual-auth: false
misc: 'bpfClockProbe=false,cni.uninstall=false'
# Warning: since this is a privileged workflow, subsequent workflow job
# steps must take care not to execute untrusted code.
- name: Checkout pull request branch (NOT TRUSTED)
if: ${{ steps.vars.outputs.downgrade_version != '' }}
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
ref: ${{ steps.vars.outputs.sha }}
persist-credentials: false
- name: Checkout ${{ steps.vars.outputs.downgrade_version }} branch to get the Helm chart
if: ${{ steps.vars.outputs.downgrade_version != '' }}
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
path: cilium-${{ steps.vars.outputs.downgrade_version }}
ref: ${{ steps.vars.outputs.downgrade_version }}
persist-credentials: false
- name: Install Cilium CLI-cli
if: ${{ steps.vars.outputs.downgrade_version != '' }}
uses: cilium/cilium-cli@c1fa8afda634f0457e9905d4f638e5164c4e0fc8 # v0.15.17
with:
repository: ${{ env.CILIUM_CLI_RELEASE_REPO }}
release-version: ${{ env.CILIUM_CLI_VERSION }}
ci-version: ${{ env.cilium_cli_ci_version }}
binary-name: cilium-cli
binary-dir: ./
- name: Set Kind params
if: ${{ steps.vars.outputs.downgrade_version != '' }}
id: kind-params
shell: bash
run: |
IP_FAM="dual"
if [ "${{ matrix.ipv6 }}" == "false" ]; then
IP_FAM="ipv4"
fi
echo params="\"\" 3 \"\" \"\" ${{ matrix.kube-proxy }} $IP_FAM" >> $GITHUB_OUTPUT
- name: Provision K8s on LVH VM
if: ${{ steps.vars.outputs.downgrade_version != '' }}
uses: cilium/cilium/.github/actions/lvh-kind@main
with:
test-name: e2e-conformance
kernel: ${{ matrix.kernel }}
kind-params: "${{ steps.kind-params.outputs.params }}"
kind-image-vsn: ${k8s_version}
- name: Wait for images to be available
if: ${{ steps.vars.outputs.downgrade_version != '' }}
timeout-minutes: 30
shell: bash
run: |
for image in cilium-ci operator-generic-ci hubble-relay-ci ; do
until docker manifest inspect quay.io/${{ env.QUAY_ORGANIZATION_DEV }}/$image:${{ steps.vars.outputs.sha }} &> /dev/null; do sleep 45s; done
done
- name: Install Cilium ${{ steps.vars.outputs.downgrade_version }} (${{ matrix.name }})
if: ${{ steps.vars.outputs.downgrade_version != '' }}
shell: bash
run: |
kubectl patch node kind-worker3 --type=json -p='[{"op":"add","path":"/metadata/labels/cilium.io~1no-schedule","value":"true"}]'
kubectl create -n kube-system secret generic cilium-ipsec-keys \
--from-literal=keys="3 rfc4106(gcm(aes)) $(echo $(dd if=/dev/urandom count=20 bs=1 2> /dev/null | xxd -p -c 64)) 128"
mkdir -p cilium-junits
CILIUM_CLI_MODE=helm ./cilium-cli install \
${{ steps.cilium-stable-config.outputs.config }}
./cilium-cli status --wait
kubectl get pods --all-namespaces -o wide
# TODO: After Cilium 1.15 release, update to cilium-dbg
kubectl -n kube-system exec daemonset/cilium -- cilium status
- name: Start conn-disrupt-test
if: ${{ steps.vars.outputs.downgrade_version != '' }}
shell: bash
run: |
# Create pods which establish long lived connections. It will be used by
# subsequent connectivity tests with --include-conn-disrupt-test to catch any
# interruption in such flows.
./cilium-cli connectivity test --include-conn-disrupt-test --conn-disrupt-test-setup \
--conn-disrupt-dispatch-interval 0ms
- name: Upgrade Cilium & Test (${{ matrix.name }})
if: ${{ steps.vars.outputs.downgrade_version != '' }}
uses: cilium/cilium/.github/actions/conn-disrupt-test@main
with:
job-name: ipsec-upgrade-${{ matrix.name }}
operation-cmd: |
CILIUM_CLI_MODE=helm ./cilium-cli upgrade \
${{ steps.cilium-newest-config.outputs.config }}
./cilium-cli status --wait
kubectl get pods --all-namespaces -o wide
kubectl -n kube-system exec daemonset/cilium -- cilium-dbg status
- name: Downgrade Cilium to ${{ steps.vars.outputs.downgrade_version }} & Test (${{ matrix.name }})
if: ${{ steps.vars.outputs.downgrade_version != '' }}
uses: cilium/cilium/.github/actions/conn-disrupt-test@main
with:
job-name: ipsec-downgrade-${{ matrix.name }}
operation-cmd: |
CILIUM_CLI_MODE=helm ./cilium-cli upgrade \
${{ steps.cilium-stable-config.outputs.config }}
./cilium-cli status --wait
kubectl get pods --all-namespaces -o wide
# TODO: After Cilium 1.15 release, update to cilium-dbg
kubectl -n kube-system exec daemonset/cilium -- cilium status
- name: Fetch artifacts
if: ${{ steps.vars.outputs.downgrade_version != '' && !success() }}
shell: bash
run: |
kubectl get pods --all-namespaces -o wide
./cilium-cli status
mkdir -p cilium-sysdumps
./cilium-cli sysdump --output-filename cilium-sysdump-${{ matrix.name }}-final
- name: Upload artifacts
if: ${{ steps.vars.outputs.downgrade_version != '' && !success() }}
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
with:
name: cilium-sysdumps
path: cilium-sysdump-*.zip
retention-days: 5
- name: Upload JUnits [junit]
if: ${{ steps.vars.outputs.downgrade_version != '' && always() }}
uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3
with:
name: cilium-junits
path: cilium-junits/*.xml
retention-days: 2
- name: Publish Test Results As GitHub Summary
if: ${{ steps.vars.outputs.downgrade_version != '' && always() }}
uses: aanm/junit2md@332ebf0fddd34e91b03a832cfafaa826306558f9 # v0.0.3
with:
junit-directory: "cilium-junits"
commit-status-final:
if: ${{ always() }}
name: Commit Status Final
needs: setup-and-test
runs-on: ubuntu-latest
steps:
- name: Set final commit status
uses: myrotvorets/set-commit-status-action@38f3f27c7d52fb381273e95542f07f0fba301307 # v2.0.0
with:
sha: ${{ inputs.SHA || github.sha }}
status: ${{ needs.setup-and-test.result }}