Skip to content

Prepare for v1.16.4 release #1435

Prepare for v1.16.4 release

Prepare for v1.16.4 release #1435

name: Run integration tests
# Any change in triggers needs to be reflected in the concurrency group.
on:
push:
branches:
- main
- v*
pull_request:
branches:
- main
- v*
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.event.after }}
cancel-in-progress: true
env:
# renovate: datasource=github-releases depName=helm/helm
HELM_VERSION: v3.16.3
# renovate: datasource=github-releases depName=kubernetes-sigs/kind
KIND_VERSION: v0.25.0
KIND_CONFIG: .github/kind-config.yaml
# renovate: datasource=github-releases depName=cilium/cilium
CILIUM_VERSION: v1.16.3
CILIUM_VALUES: .github/cilium-values.yaml
jobs:
integration-test:
runs-on: ubuntu-24.04
timeout-minutes: 20
steps:
- name: Checkout the repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup go
uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0
with:
go-version-file: 'go.mod'
- name: Build hubble CLI
run: make
- name: Set up Helm
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # renovate: tag=v3.5
with:
version: ${{ env.HELM_VERSION }}
- name: Create kind cluster
uses: helm/kind-action@0025e74a8c7512023d06dc019c617aa3cf561fde # v1.10.0
with:
version: ${{ env.KIND_VERSION }}
config: ${{ env.KIND_CONFIG }}
cluster_name: kind
- name: Deploy Cilium
run: |
# Deploy cilium with KPR
MASTER_IP="$(docker inspect kind-control-plane | jq '.[0].NetworkSettings.Networks.kind.IPAddress' -r)"
helm repo add cilium https://helm.cilium.io
helm repo update
helm install cilium cilium/cilium \
--wait \
--namespace kube-system \
--version ${{ env.CILIUM_VERSION }} \
--values ${{ env.CILIUM_VALUES }} \
--set kubeProxyReplacement=true \
--set k8sServiceHost="${MASTER_IP}" \
--set k8sServicePort=6443
- name: Wait for hubble-relay to be running
run: |
kubectl -n kube-system rollout status deployment/hubble-relay
- name: Run integration tests
timeout-minutes: 5
# work around for hubble CLI thinking we're piping something in via
# stdin, even though we aren't
shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"'
run: |
set -ex
./hubble --version
kubectl -n kube-system port-forward service/hubble-relay 4245:80 &
# wait until the port-forward is running
until [ $(pgrep --count --full "kubectl.*port-forward.*service\/hubble-relay.*4245:80") -eq 1 ]; do
sleep 1
done
# give relay a little bit more time to actually connect to agent before running commands.
sleep 5
./hubble status
# query hubble until we receive flows, or timeout
flowCount=0
until [ $flowCount -gt 0 ]; do
./hubble observe -n kube-system -o jsonpb | tee flows.json
flowCount=$(jq -r --slurp 'length' flows.json)
sleep 5
done
# verify we got some flows
test $(jq -r --slurp 'length' flows.json) -gt 0
# test piping flows into the CLI
test $(./hubble observe < flows.json -o json | jq -r --slurp 'length') -eq $(jq -r --slurp 'length' flows.json)
- name: Post-test information gathering
if: ${{ !success() }}
run: |
echo "Gathering information about KIND cluster"
function get_logs() {
CMD=(kubectl logs --timestamps --since 30m $@)
"${CMD[@]}" || "${CMD[@]}" -p || echo "Unable to get logs for $@"
}
echo "==================== CURRENT TIME ===================="
date -u
echo "==================== ALL NODES ===================="
kubectl get nodes --output wide --show-labels
echo "==================== ALL PODS ===================="
kubectl get pods --all-namespaces --output wide
echo "==================== CILIUM AGENT LOGS ===================="
get_logs -l "k8s-app=cilium" -n kube-system -c cilium-agent
echo "==================== HUBBLE RELAY LOGS ===================="
get_logs -l "k8s-app=hubble-relay" -n kube-system -c hubble-relay
echo "==================== CILIUM STATUS ===================="
kubectl -n kube-system exec ds/cilium -c cilium-agent -- cilium status --verbose
shell: bash {0} # Disable default fail-fast behaviour so that all commands run independently