Skip to content

Add support for expanding volumes #48

Add support for expanding volumes

Add support for expanding volumes #48

name: "Build, Test, and Publish"
on:
workflow_dispatch:
push:
branches:
- "master"
tags:
- "v*"
pull_request:
branches:
- "master"
paths-ignore:
- "**/*.md"
- "**/*.txt"
env:
# Equivalent of BUILD_PLATFORMS in the Makefile and release-tools build.make. We cannot just set
# this as a default inside the project Makefile because it will be overridden by the release-tools
# build.make. We can't update release-tools because it there is a check to prevent modifying
# release-tools. Note release-tools specifies the buildx_platform without the os (i.e., arm64
# instead of linux/arm64).
RELEASE_TOOLS_BUILD_PLATFORMS: "linux amd64 amd64 amd64;linux arm64 arm64 arm64"
# Used as the list of platforms for Docker buildx when building and pushing multiarch images.
DOCKER_BUILDX_BUILD_PLATFORMS: "linux/amd64,linux/arm64"
# Container image registry to publish images to:
REGISTRY: ghcr.io
# Where to push an image of the CSI driver that will be retained (for master builds or releases) without a specific tag:
IMAGE_NAME: ghcr.io/thinkparq/beegfs-csi-driver
# Where to push an image of the CSI driver for testing (including the operator) without a specific tag:
TEST_IMAGE_NAME: ghcr.io/thinkparq/test-beegfs-csi-driver
# Where to push an image of the operator that will be retained (for master builds or releases) without a specific tag:
OPERATOR_IMAGE_NAME: ghcr.io/thinkparq/beegfs-csi-driver-operator
# Where to push an image of the operator for testing without a specific tag:
OPERATOR_TEST_IMAGE_NAME: ghcr.io/thinkparq/test-beegfs-csi-driver-operator
# Where to push an image of the bundle for testing without a specific tag:
OPERATOR_TEST_BUNDLE_NAME: ghcr.io/thinkparq/test-beegfs-csi-driver-operator-bundle
# Note all images are tagged with the GitHub sha to ensure consistency when testing images.
# Additional tags are applied depending on what event caused the image to be built.
jobs:
build-test-and-push-images:
runs-on: ubuntu-20.04
timeout-minutes: 10
permissions:
packages: write
contents: read
steps:
- uses: actions/[email protected]
with:
# Work around for how release-tools verify-subtree.sh verifies release-tools has not been modified.
fetch-depth: "0"
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.22.0
# Dependencies are cached by default: https://github.com/actions/setup-go#v4
# This can be explicitly disabled if it ever causes problems.
- name: Build the BeeGFS CSI driver binaries and assemble chwrap tar files for each architecture
run: |
export SHELL=/bin/bash
make BUILD_PLATFORMS="${{ env.RELEASE_TOOLS_BUILD_PLATFORMS }}" all
echo -n "built artifacts:"
ls -alh bin/
- name: Install test dependencies
run: |
go install github.com/onsi/ginkgo/v2/[email protected]
go install github.com/google/[email protected]
timeout-minutes: 5
- name: Verify license compliance and the NOTICE file is updated
run: |
make test-licenses
- name: Run unit tests
run: |
ACK_GINKGO_DEPRECATIONS=1.16.5 TESTARGS="-v -ginkgo.v" make test
# TODO: Consider if we should write the results to a file and keep it as an artifact.
# For example using: https://github.com/marketplace/actions/junit-report-action
# TODO: Can we cache anything here? test-vendor downloads a lot of stuff.
# https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go#caching-dependencies
- name: Set up Docker Buildx
uses: docker/[email protected]
- name: Log into the GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# We only retain a limited number of test images created by PRs (non-master/release builds).
- name: Determine image names depending if they should be automatically cleaned up or retained
id: determine_image_name
run: |
if [ "${{ github.event_name }}" = "pull_request" ]; then
echo "PR triggered the workflow (only publish test images)"
driver_image=${{ env.TEST_IMAGE_NAME }}
operator_image=${{ env.OPERATOR_TEST_IMAGE_NAME }}
else
echo "Non-PR event triggered the workflow"
driver_image=${{ env.IMAGE_NAME }}
operator_image=${{ env.OPERATOR_IMAGE_NAME }}
fi
echo "DRIVER_IMAGE=$driver_image" >> $GITHUB_OUTPUT
echo "OPERATOR_IMAGE=$operator_image" >> $GITHUB_OUTPUT
# Release images are tagged on a push tag event:
# https://github.com/docker/metadata-action#semver Otherwise the image will be tagged with the
# branch or PR. Images created for a PR are also tagged with the commit ID to ensure
# subsequent jobs in this workflow run use the correct image for testing:
# https://github.com/docker/metadata-action?tab=readme-ov-file#typesha
- name: Determine metadata for CSI driver image
id: meta_driver
uses: docker/metadata-action@v4
with:
images: |
${{ steps.determine_image_name.outputs.DRIVER_IMAGE }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}},prefix=v
type=semver,pattern={{major}}.{{minor}},prefix=v
type=sha,prefix=,suffix=,format=long
- name: Build and push driver container images for each supported platform
uses: docker/[email protected]
id: build_and_push_driver
with:
context: .
platforms: "${{ env.DOCKER_BUILDX_BUILD_PLATFORMS }}"
push: true
tags: ${{ steps.meta_driver.outputs.tags }}
labels: ${{ steps.meta_driver.outputs.labels }}
# If provenance is not set to false then the manifest list will contain unknown platform
# entries that are also displayed in GitHub. Some detail on why this is needed in:
# https://github.com/docker/buildx/issues/1509 and
# https://github.com/docker/build-push-action/issues/755#issuecomment-1607792956.
provenance: false
# Reference: https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry#adding-a-description-to-multi-arch-images
outputs: type=image,name=target,annotation-index.org.opencontainers.image.description=The BeeGFS Container Storage Interface (CSI) driver provides high performing and scalable storage for workloads running in Kubernetes,org.opencontainers.image.source=https://github.com/ThinkParQ/beegfs-csi-driver,org.opencontainers.image.licenses=Apache-2.0
- name: Install Cosign
uses: sigstore/[email protected]
with:
cosign-release: "v2.1.1"
# Adapted from:
# https://github.blog/2021-12-06-safeguard-container-signing-capability-actions/
# https://github.com/sigstore/cosign-installer#usage
# Note we only sign the multi-platform image manifest, not the individual platform specific images.
- name: Sign CSI driver image with Cosign
run: |
images=""
for tag in ${TAGS}; do
images+="${tag}@${DIGEST} "
done
cosign sign --yes --key env://COSIGN_PRIVATE_KEY \
-a "repo=${{ github.repository }}" \
-a "run=${{ github.run_id }}" \
-a "ref=${{ github.sha }}" \
${images}
env:
TAGS: ${{ steps.meta_driver.outputs.tags }}
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
DIGEST: ${{ steps.build_and_push_driver.outputs.digest }}
# TODO: Cache this dependency for reuse here and in e2e tests.
# https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go#caching-dependencies
# Adapted from https://sdk.operatorframework.io/docs/installation/#install-from-github-release
- name: Install the Operator SDK
run: |
export ARCH=$(case $(uname -m) in x86_64) echo -n amd64 ;; aarch64) echo -n arm64 ;; *) echo -n $(uname -m) ;; esac)
export OS=$(uname | awk '{print tolower($0)}')
export OPERATOR_SDK_DL_URL=https://github.com/operator-framework/operator-sdk/releases/download/v1.25.0
curl -LO ${OPERATOR_SDK_DL_URL}/operator-sdk_${OS}_${ARCH}
chmod +x operator-sdk_${OS}_${ARCH} && sudo mv operator-sdk_${OS}_${ARCH} /usr/local/bin/operator-sdk
- name: Build and test operator
run: |
cd operator
make BUILD_PLATFORMS="${{ env.RELEASE_TOOLS_BUILD_PLATFORMS }}" build test
# Build bundle without modification to verify that generated code and manifests are up to date.
make bundle
if ! git diff --exit-code > /dev/null; then
# The above make steps have run all generators. The developer making changes should also
# have run all generators and committed the result. Do not proceed if the generators run
# here produce different output than the developer committed.
echo "ERROR: Generated code and/or manifests are not up to date"
git diff
exit 1
fi
# Release images are tagged on a push tag event:
# https://github.com/docker/metadata-action#semver Otherwise the image will be tagged with the
# branch or PR. Images created for a PR are also tagged with the commit ID to ensure
# subsequent jobs in this workflow run use the correct image for testing:
# https://github.com/docker/metadata-action?tab=readme-ov-file#typesha
- name: Determine metadata for operator image
id: meta_operator
uses: docker/metadata-action@v4
with:
images: |
${{ steps.determine_image_name.outputs.OPERATOR_IMAGE }}
tags: |
type=ref,event=branch
type=ref,event=pr
type=semver,pattern={{version}},prefix=v
type=semver,pattern={{major}}.{{minor}},prefix=v
type=sha,prefix=,suffix=,format=long,enable=true
- name: Build and push operator container images for each supported platform
uses: docker/[email protected]
id: build_and_push_operator
with:
context: operator/
platforms: "${{ env.DOCKER_BUILDX_BUILD_PLATFORMS }}"
push: true
tags: ${{ steps.meta_operator.outputs.tags }}
labels: ${{ steps.meta_operator.outputs.labels }}
# If provenance is not set to false then the manifest list will contain unknown platform
# entries that are also displayed in GitHub. Some detail on why this is needed in:
# https://github.com/docker/buildx/issues/1509 and
# https://github.com/docker/build-push-action/issues/755#issuecomment-1607792956.
provenance: false
# Reference: https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry#adding-a-description-to-multi-arch-images
outputs: type=image,name=target,annotation-index.org.opencontainers.image.description=The BeeGFS CSI Driver Operator is used to deploy the driver to Operator Lifecycle Manager enabled clusters,org.opencontainers.image.source=https://github.com/ThinkParQ/beegfs-csi-driver-operator,org.opencontainers.image.licenses=Apache-2.0
# Adapted from:
# https://github.blog/2021-12-06-safeguard-container-signing-capability-actions/
# https://github.com/sigstore/cosign-installer#usage
# Note we only sign the multi-platform image manifest, not the individual platform specific images.
- name: Sign the operator image with Cosign
run: |
images=""
for tag in ${TAGS}; do
images+="${tag}@${DIGEST} "
done
cosign sign --yes --key env://COSIGN_PRIVATE_KEY \
-a "repo=${{ github.repository }}" \
-a "run=${{ github.run_id }}" \
-a "ref=${{ github.sha }}" \
${images}
env:
TAGS: ${{ steps.meta_operator.outputs.tags }}
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
DIGEST: ${{ steps.build_and_push_operator.outputs.digest }}
# The bundle container built in this step can only be used for testing. This is because it
# references an operator image tag that will be cleaned up after this workflow completes. This
# is fine because a bundle container is not actually used to release the operator (the
# pristine bundle directory is used instead). We always push a bundle regardless of what
# triggered the workflow run as this is often useful for manual testing.
- name: Build and push the operator bundle as a test package
run: |
cd operator
make -e IMG=${{ steps.determine_image_name.outputs.OPERATOR_IMAGE }}:${{ github.sha }} -e BUNDLE_IMG=${{ env.OPERATOR_TEST_BUNDLE_NAME }}:${{ github.sha }} bundle bundle-build bundle-push
e2e-tests:
runs-on: ubuntu-20.04
timeout-minutes: 10
needs: build-test-and-push-images
if: github.event_name == 'pull_request'
strategy:
fail-fast: true
matrix:
k8s-version: [1.25.16, 1.26.14, 1.27.11, 1.28.7]
beegfs-version: [7.3.4, 7.4.2]
permissions:
packages: read
contents: read
steps:
- uses: actions/[email protected]
- name: Deploy Kubernetes ${{ matrix.k8s-version }} using Minikube
uses: medyagh/setup-minikube@latest
with:
driver: none
kubernetes-version: ${{ matrix.k8s-version }}
# Starting with BeeGFS 7.4.1 the container registry used to host BeeGFS images switched from DockerHub
# to ghcr.io. This can be removed once we no longer test with BeeGFS 7.3.4.
- name: Determine BeeGFS image registry
id: determine_registry
run: |
if [ "${{ matrix.beegfs-version }}" = "7.3.4" ]; then
echo "BEEGFS_REGISTRY=beegfs/" >> $GITHUB_OUTPUT
else
echo "BEEGFS_REGISTRY=ghcr.io/thinkparq/" >> $GITHUB_OUTPUT
fi
- name: Deploy BeeGFS ${{ matrix.beegfs-version }} for testing
run: |
export BEEGFS_VERSION=$(echo ${{ matrix.beegfs-version }})
export BEEGFS_SECRET=$(echo ${{ secrets.CONN_AUTH_SECRET }})
export BEEGFS_REGISTRY=$(echo ${{ steps.determine_registry.outputs.BEEGFS_REGISTRY }})
envsubst < test/env/beegfs-ubuntu/beegfs-fs-1.yaml | kubectl apply -f -
kubectl get pods -A
# TODO: Cache BeeGFS packages https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows
# https://stackoverflow.com/questions/59269850/caching-apt-packages-in-github-actions-workflow
- name: Install the BeeGFS ${{ matrix.beegfs-version }} DKMS client
run: |
sudo wget -P /etc/apt/sources.list.d/. https://www.beegfs.io/release/beegfs_${{ matrix.beegfs-version }}/dists/beegfs-focal.list
sudo wget -q https://www.beegfs.io/release/beegfs_${{ matrix.beegfs-version }}/gpg/GPG-KEY-beegfs -O- | sudo apt-key add -
sudo apt-get update && sudo apt-get install beegfs-client-dkms beegfs-helperd beegfs-utils -y
sudo sed -i 's/connDisableAuthentication = false/connDisableAuthentication = true/' /etc/beegfs/beegfs-helperd.conf
sudo systemctl start beegfs-helperd && sudo systemctl enable beegfs-helperd
- name: Deploy the BeeGFS CSI driver
run: |
export BEEGFS_SECRET=$(echo ${{ secrets.CONN_AUTH_SECRET }})
envsubst < test/env/beegfs-ubuntu/csi-beegfs-connauth.yaml > deploy/k8s/overlays/default/csi-beegfs-connauth.yaml
# TODO: Enable once the K8s versions in the matrix are added to versions/
# sed -i 's?/versions/latest?/versions/v${{ matrix.k8s-version }}?g' deploy/k8s/overlays/default/kustomization.yaml
echo -e "\nimages:\n - name: ${{ env.IMAGE_NAME }}\n newName: ${{ env.TEST_IMAGE_NAME }}\n newTag: ${{ github.sha }}" >> deploy/k8s/overlays/default/kustomization.yaml
kubectl apply -k deploy/k8s/overlays/default
# TODO (https://github.com/ThinkParQ/beegfs-csi-driver/issues/21): Actually run e2e tests using Ginko with an appropriate timeout.
- name: Deploy all examples to verify the driver is available
run: |
echo "${{ secrets.CONN_AUTH_SECRET }}" | sudo tee /etc/beegfs/connAuth > /dev/null
sudo sed -i '0,/connAuthFile[[:space:]]*=[[:space:]]*/s//connAuthFile = \/etc\/beegfs\/connAuth/' /etc/beegfs/beegfs-client.conf
sudo sed -i '0,/sysMgmtdHost[[:space:]]*=[[:space:]]*/s//sysMgmtdHost = localhost/' /etc/beegfs/beegfs-client.conf
sudo beegfs-ctl --cfgFile=/etc/beegfs/beegfs-client.conf --unmounted --createdir /k8s
sudo beegfs-ctl --cfgFile=/etc/beegfs/beegfs-client.conf --unmounted --createdir /k8s/all
sudo beegfs-ctl --cfgFile=/etc/beegfs/beegfs-client.conf --unmounted --createdir /k8s/all/static
sudo beegfs-ctl --cfgFile=/etc/beegfs/beegfs-client.conf --unmounted --createdir /k8s/all/static-ro
kubectl apply -f examples/k8s/all
# If the controller or node service failed to start, our test pod would still be in phase pending.
# We'll check periodically if the pod has started and if we reach the max number of attempts fail with debug output.
- name: Wait and verify the test pod is running
run: |
MAX_ATTEMPTS=36
SLEEP_TIME=5
COUNTER=0
while [ $COUNTER -lt $MAX_ATTEMPTS ]; do
POD_STATUS=$(kubectl get pods csi-beegfs-all-app -o jsonpath='{.status.phase}')
echo "Pod status: ${POD_STATUS}"
if [ "${POD_STATUS}" == "Running" ]; then
echo "Verified test pod is running."
break
else
echo "Pod is not running, waiting for ${SLEEP_TIME} seconds..."
sleep ${SLEEP_TIME}
COUNTER=$((COUNTER+1))
fi
done
if [ $COUNTER -eq $MAX_ATTEMPTS ]; then
echo "Test pod did not reach 'Running' status within the maximum allowed time. Outputting debug information and exiting with error..."
kubectl get pods -A
kubectl describe pod -n beegfs-csi csi-beegfs-controller-0
POD_NAME=$(kubectl get pods -n beegfs-csi -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep 'csi-beegfs-node-')
kubectl describe pod -n beegfs-csi $POD_NAME
kubectl describe pod csi-beegfs-all-app
docker images
exit 1
fi
operator-e2e-tests:
runs-on: ubuntu-20.04
timeout-minutes: 10
needs: build-test-and-push-images
if: github.event_name == 'pull_request'
strategy:
fail-fast: true
matrix:
k8s-version: [1.25.16, 1.26.14, 1.27.11, 1.28.7]
beegfs-version: [7.3.4, 7.4.2]
permissions:
packages: read
contents: read
steps:
- uses: actions/[email protected]
- name: Deploy Kubernetes ${{ matrix.k8s-version }} using Minikube
uses: medyagh/setup-minikube@latest
with:
#driver: none
# Cannot use "none" driver with OLM.
kubernetes-version: ${{ matrix.k8s-version }}
mount-path: "/etc/beegfs:/etc/beegfs"
# TODO: Cache this dependency for reuse here and above.
# https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go#caching-dependencies
# Adapted from https://sdk.operatorframework.io/docs/installation/#install-from-github-release
- name: Install the Operator SDK
run: |
export ARCH=$(case $(uname -m) in x86_64) echo -n amd64 ;; aarch64) echo -n arm64 ;; *) echo -n $(uname -m) ;; esac)
export OS=$(uname | awk '{print tolower($0)}')
export OPERATOR_SDK_DL_URL=https://github.com/operator-framework/operator-sdk/releases/download/v1.25.0
curl -LO ${OPERATOR_SDK_DL_URL}/operator-sdk_${OS}_${ARCH}
chmod +x operator-sdk_${OS}_${ARCH} && sudo mv operator-sdk_${OS}_${ARCH} /usr/local/bin/operator-sdk
- name: Run Operator Scorecard
run: |
operator-sdk scorecard ./operator/bundle -w 180s > /tmp/scorecard.txt 2>&1 || (echo "SCORECARD FAILURE!" && exit 1)
- name: Save the Operator Scorecard results as an artifact
uses: actions/[email protected]
if: ${{ always() }}
with:
name: operator-scorecard-k8s${{ matrix.k8s-version }}-beegfs${{ matrix.beegfs-version }}
path: /tmp/scorecard.txt
- name: Install Operator Lifecycle Manager (OLM)
run: |
curl -L https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.25.0/install.sh -o install.sh
chmod +x install.sh
./install.sh v0.25.0
# Starting with BeeGFS 7.4.1 the container registry used to host BeeGFS images switched from DockerHub
# to ghcr.io. This can be removed once we no longer test with BeeGFS 7.3.4.
- name: Determine BeeGFS image registry
id: determine_registry
run: |
if [ "${{ matrix.beegfs-version }}" = "7.3.4" ]; then
echo "BEEGFS_REGISTRY=beegfs/" >> $GITHUB_OUTPUT
else
echo "BEEGFS_REGISTRY=ghcr.io/thinkparq/" >> $GITHUB_OUTPUT
fi
# Test using a base64 encoded secret for the Operator based deployment to
# ensure the broadest coverage for how we handle base64 encoded secrets.
- name: Deploy BeeGFS ${{ matrix.beegfs-version }} for testing and expose as a service to the host OS
run: |
export BEEGFS_VERSION=$(echo ${{ matrix.beegfs-version }})
export BEEGFS_SECRET=$(echo ${{ secrets.CONN_AUTH_BASE64_SECRET }})
export BEEGFS_REGISTRY=$(echo ${{ steps.determine_registry.outputs.BEEGFS_REGISTRY }})
envsubst < test/env/beegfs-ubuntu/beegfs-fs-2.yaml | kubectl apply -f -
MAX_ATTEMPTS=36
SLEEP_TIME=5
COUNTER=0
# If we try to expose the service to the host OS before the pod is ready we'll get an error.
# Make sure the BeeGFS FS started before we continue.
while [ $COUNTER -lt $MAX_ATTEMPTS ]; do
POD_STATUS=$(kubectl get pods beegfs-fs-2-0 -o jsonpath='{.status.phase}')
echo "Pod status: ${POD_STATUS}"
if [ "${POD_STATUS}" == "Running" ]; then
echo "Verified BeeGFS FS pod is running."
break
else
echo "Pod is not running, waiting for ${SLEEP_TIME} seconds..."
sleep ${SLEEP_TIME}
COUNTER=$((COUNTER+1))
fi
done
if [ $COUNTER -eq $MAX_ATTEMPTS ]; then
echo "BeeGFS FS pod did not reach 'Running' status within the maximum allowed time. Outputting debug information and exiting with error..."
kubectl get pods -A
kubectl describe pod beegfs-fs-2-0
docker images
exit 1
fi
# Adapted from https://minikube.sigs.k8s.io/docs/handbook/accessing/
# Exposes the service directly to the host operating system.
# This is required to mount BeeGFS since the kernel module is outside the container.
# For some reason we don't need to override the ephemeral port and can use the actual 800* ports.
minikube service beegfs-fs-2-svc
# TODO: Cache BeeGFS packages https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows
# https://stackoverflow.com/questions/59269850/caching-apt-packages-in-github-actions-workflow
- name: Install the BeeGFS ${{ matrix.beegfs-version }} DKMS client
run: |
sudo wget -P /etc/apt/sources.list.d/. https://www.beegfs.io/release/beegfs_${{ matrix.beegfs-version }}/dists/beegfs-focal.list
sudo wget -q https://www.beegfs.io/release/beegfs_${{ matrix.beegfs-version }}/gpg/GPG-KEY-beegfs -O- | sudo apt-key add -
sudo apt-get update && sudo apt-get install beegfs-client-dkms beegfs-helperd beegfs-utils -y
sudo sed -i 's/connDisableAuthentication = false/connDisableAuthentication = true/' /etc/beegfs/beegfs-helperd.conf
sudo systemctl start beegfs-helperd && sudo systemctl enable beegfs-helperd
- name: Install BeeGFS ${{ matrix.beegfs-version }} beegfs-ctl tool into the Minikube container
run: |
minikube ssh "sudo apt-get update"
minikube ssh "sudo apt-get install wget -y"
minikube ssh "sudo wget -q https://www.beegfs.io/release/beegfs_${{ matrix.beegfs-version }}/gpg/GPG-KEY-beegfs -O- | sudo apt-key add -"
minikube ssh "sudo wget -P /etc/apt/sources.list.d/ https://www.beegfs.io/release/beegfs_${{ matrix.beegfs-version }}/dists/beegfs-focal.list"
minikube ssh "sudo apt-get update"
minikube ssh "sudo apt-get install beegfs-utils -y"
- name: Use operator-sdk to create a pod to serve the bundle to OLM via subscription
run: |
operator-sdk run bundle ${{ env.OPERATOR_TEST_BUNDLE_NAME }}:${{ github.sha }}
# TODO (https://github.com/ThinkParQ/beegfs-csi-driver/issues/21): Actually run e2e tests using Ginko with an appropriate timeout.
- name: Deploy a BeeGFSDriver object
run: |
export CSI_IMAGE_NAME=$(echo ${{ env.TEST_IMAGE_NAME }})
export CSI_IMAGE_TAG=$(echo ${{ github.sha }})
export BEEGFS_SECRET=$(echo ${{ secrets.CONN_AUTH_BASE64_SECRET }})
export BEEGFS_MGMTD=$(kubectl get nodes -o=jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}')
envsubst < test/env/beegfs-ubuntu/csi-beegfs-cr.yaml | kubectl apply -f -
- name: Deploy all examples to verify the driver is available
run: |
minikube ssh "sudo echo ${{ secrets.CONN_AUTH_BASE64_SECRET }} | base64 --decode | sudo tee /etc/beegfs/connAuth > /dev/null"
minikube ssh "sudo sed -i '0,/connAuthFile[[:space:]]*=[[:space:]]*/s//connAuthFile = \/etc\/beegfs\/connAuth/' /etc/beegfs/beegfs-client.conf"
minikube ssh "sudo sed -i '0,/sysMgmtdHost[[:space:]]*=[[:space:]]*/s//sysMgmtdHost = localhost/' /etc/beegfs/beegfs-client.conf"
minikube ssh "sudo beegfs-ctl --cfgFile=/etc/beegfs/beegfs-client.conf --unmounted --createdir /k8s"
minikube ssh "sudo beegfs-ctl --cfgFile=/etc/beegfs/beegfs-client.conf --unmounted --createdir /k8s/all"
minikube ssh "sudo beegfs-ctl --cfgFile=/etc/beegfs/beegfs-client.conf --unmounted --createdir /k8s/all/static"
minikube ssh "sudo beegfs-ctl --cfgFile=/etc/beegfs/beegfs-client.conf --unmounted --createdir /k8s/all/static-ro"
export BEEGFS_MGMTD=$(kubectl get nodes -o=jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}')
for file in examples/k8s/all/*; do sed -i 's/localhost/'"${BEEGFS_MGMTD}"'/g' "$file"; done
kubectl apply -f examples/k8s/all
# If the controller or node service failed to start, our test pod would still be in phase pending.
# We'll check periodically if the pod has started and if we reach the max number of attempts fail with debug output.
- name: Wait and verify the test pod is running
run: |
MAX_ATTEMPTS=36
SLEEP_TIME=5
COUNTER=0
while [ $COUNTER -lt $MAX_ATTEMPTS ]; do
POD_STATUS=$(kubectl get pods csi-beegfs-all-app -o jsonpath='{.status.phase}')
echo "Pod status: ${POD_STATUS}"
if [ "${POD_STATUS}" == "Running" ]; then
echo "Verified test pod is running."
break
else
echo "Pod is not running, waiting for ${SLEEP_TIME} seconds..."
sleep ${SLEEP_TIME}
COUNTER=$((COUNTER+1))
fi
done
if [ $COUNTER -eq $MAX_ATTEMPTS ]; then
echo "Test pod did not reach 'Running' status within the maximum allowed time. Outputting debug information and exiting with error..."
kubectl get pods -A
kubectl describe pod csi-beegfs-controller-0
POD_NAME=$(kubectl get pods -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep 'csi-beegfs-node-')
kubectl describe pod $POD_NAME
kubectl describe pod csi-beegfs-all-app
docker images
exit 1
fi
# We'll keep around a few old test packages to (a) avoid deleting image for workflows running in parallel,
# and (b) it may be useful to pull a package to troubleshoot workflow failures.
cleanup-test-images:
runs-on: ubuntu-20.04
timeout-minutes: 3
needs: [build-test-and-push-images, e2e-tests, operator-e2e-tests]
if: always()
steps:
- name: Extract CSI driver test package name
id: extract_driver
run: |
test_image_name="${{ env.TEST_IMAGE_NAME }}"
test_image_pkg=${test_image_name##*/}
echo "TEST_IMAGE_PKG=$test_image_pkg" >> $GITHUB_OUTPUT
- name: Cleanup old ${{ steps.extract_driver.outputs.TEST_IMAGE_PKG }} packages
uses: actions/delete-package-versions@v5
with:
package-name: "${{ steps.extract_driver.outputs.TEST_IMAGE_PKG }}"
package-type: "container"
min-versions-to-keep: 10
- name: Extract operator test package names
id: extract_operator
run: |
operator_test_image_name="${{ env.OPERATOR_TEST_IMAGE_NAME }}"
operator_test_image_pkg=${operator_test_image_name##*/}
echo "OPERATOR_TEST_IMAGE_PKG=$operator_test_image_pkg" >> $GITHUB_OUTPUT
operator_test_bundle_name="${{ env.OPERATOR_TEST_IMAGE_NAME }}"
operator_test_bundle_pkg=${operator_test_bundle_name##*/}
echo "OPERATOR_TEST_BUNDLE_PKG=$operator_test_bundle_pkg" >> $GITHUB_OUTPUT
- name: Cleanup old ${{ steps.extract_operator.outputs.OPERATOR_TEST_IMAGE_PKG }} packages
uses: actions/delete-package-versions@v5
with:
package-name: "${{ steps.extract_operator.outputs.OPERATOR_TEST_IMAGE_PKG }}"
package-type: "container"
min-versions-to-keep: 10
- name: Cleanup old ${{ steps.extract_operator.outputs.OPERATOR_TEST_BUNDLE_PKG }} packages
uses: actions/delete-package-versions@v5
with:
package-name: "${{ steps.extract_operator.outputs.OPERATOR_TEST_BUNDLE_PKG }}"
package-type: "container"
min-versions-to-keep: 10