From 1f70f548d54b0464eaef67264136ccb78023ee8f Mon Sep 17 00:00:00 2001 From: Matthew Wong Date: Tue, 13 Jul 2021 14:29:25 -0700 Subject: [PATCH] Fast-forward to latest ebs hack/e2e scripts with eksctl support, k8s 1.20, etc. --- hack/e2e/ebs.sh | 6 +- hack/e2e/ecr.sh | 2 +- hack/e2e/eksctl.sh | 109 ++++++++++++++++++++++++++++++ hack/e2e/kops.sh | 148 +++++++++++++++++++++++++++++++--------- hack/e2e/run.sh | 163 +++++++++++++++++++++++++++++++++------------ hack/e2e/util.sh | 10 +++ 6 files changed, 359 insertions(+), 79 deletions(-) create mode 100644 hack/e2e/eksctl.sh diff --git a/hack/e2e/ebs.sh b/hack/e2e/ebs.sh index 0ca3646bc..9a80611d2 100644 --- a/hack/e2e/ebs.sh +++ b/hack/e2e/ebs.sh @@ -6,11 +6,13 @@ BASE_DIR=$(dirname "$(realpath "${BASH_SOURCE[0]}")") source "${BASE_DIR}"/util.sh function ebs_check_migration() { + KUBECONFIG=${1} + loudecho "Checking migration" # There should have been no calls to the in-tree driver kubernetes.io/aws-ebs but many calls to ebs.csi.aws.com # Find the controller-manager log and read its metrics to verify - NODE=$(kubectl get node -l kubernetes.io/role=master -o json | jq -r ".items[].metadata.name") - kubectl port-forward kube-controller-manager-"${NODE}" 10252:10252 -n kube-system & + NODE=$(kubectl get node -l kubernetes.io/role=master -o json --kubeconfig "${KUBECONFIG}" | jq -r ".items[].metadata.name") + kubectl port-forward kube-controller-manager-"${NODE}" 10252:10252 -n kube-system --kubeconfig "${KUBECONFIG}" & # Ensure port forwarding succeeded n=0 diff --git a/hack/e2e/ecr.sh b/hack/e2e/ecr.sh index 8f710d95b..df7b973b7 100644 --- a/hack/e2e/ecr.sh +++ b/hack/e2e/ecr.sh @@ -11,7 +11,7 @@ function ecr_build_and_push() { IMAGE_NAME=${3} IMAGE_TAG=${4} set +e - if docker images | grep "${IMAGE_NAME}" | grep "${IMAGE_TAG}"; then + if docker images --format "{{.Repository}}:{{.Tag}}" | grep "${IMAGE_NAME}:${IMAGE_TAG}"; then set -e loudecho "Assuming ${IMAGE_NAME}:${IMAGE_TAG} has been built and pushed" else diff --git a/hack/e2e/eksctl.sh b/hack/e2e/eksctl.sh new file mode 100644 index 000000000..a185f7f2e --- /dev/null +++ b/hack/e2e/eksctl.sh @@ -0,0 +1,109 @@ +#!/bin/bash + +set -euo pipefail + +function eksctl_install() { + INSTALL_PATH=${1} + EKSCTL_VERSION=${2} + if [[ ! -e ${INSTALL_PATH}/eksctl ]]; then + EKSCTL_DOWNLOAD_URL="https://github.com/weaveworks/eksctl/releases/download/${EKSCTL_VERSION}/eksctl_$(uname -s)_amd64.tar.gz" + curl --silent --location "${EKSCTL_DOWNLOAD_URL}" | tar xz -C "${INSTALL_PATH}" + chmod +x "${INSTALL_PATH}"/eksctl + fi +} + +function eksctl_create_cluster() { + SSH_KEY_PATH=${1} + CLUSTER_NAME=${2} + BIN=${3} + ZONES=${4} + INSTANCE_TYPE=${5} + K8S_VERSION=${6} + CLUSTER_FILE=${7} + KUBECONFIG=${8} + EKSCTL_PATCH_FILE=${9} + EKSCTL_ADMIN_ROLE=${10} + + generate_ssh_key "${SSH_KEY_PATH}" + + CLUSTER_NAME="${CLUSTER_NAME//./-}" + + if eksctl_cluster_exists "${CLUSTER_NAME}" "${BIN}"; then + loudecho "Upgrading cluster $CLUSTER_NAME with $CLUSTER_FILE" + ${BIN} upgrade cluster -f "${CLUSTER_FILE}" + else + loudecho "Creating cluster $CLUSTER_NAME with $CLUSTER_FILE (dry run)" + ${BIN} create cluster \ + --managed \ + --ssh-access \ + --ssh-public-key "${SSH_KEY_PATH}".pub \ + --zones "${ZONES}" \ + --nodes=3 \ + --instance-types="${INSTANCE_TYPE}" \ + --version="${K8S_VERSION}" \ + --disable-pod-imds \ + --dry-run \ + "${CLUSTER_NAME}" > "${CLUSTER_FILE}" + + if test -f "$EKSCTL_PATCH_FILE"; then + eksctl_patch_cluster_file "$CLUSTER_FILE" "$EKSCTL_PATCH_FILE" + fi + + loudecho "Creating cluster $CLUSTER_NAME with $CLUSTER_FILE" + ${BIN} create cluster -f "${CLUSTER_FILE}" --kubeconfig "${KUBECONFIG}" + fi + + loudecho "Cluster ${CLUSTER_NAME} kubecfg written to ${KUBECONFIG}" + + loudecho "Getting cluster ${CLUSTER_NAME}" + ${BIN} get cluster "${CLUSTER_NAME}" + + if [ -n "$EKSCTL_ADMIN_ROLE" ]; then + AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) + ADMIN_ARN="arn:aws:iam::${AWS_ACCOUNT_ID}:role/${EKSCTL_ADMIN_ROLE}" + loudecho "Granting ${ADMIN_ARN} admin access to the cluster" + ${BIN} create iamidentitymapping --cluster "${CLUSTER_NAME}" --arn "${ADMIN_ARN}" --group system:masters --username admin + fi + + return $? +} + +function eksctl_cluster_exists() { + CLUSTER_NAME=${1} + BIN=${2} + set +e + if ${BIN} get cluster "${CLUSTER_NAME}"; then + set -e + return 0 + else + set -e + return 1 + fi +} + +function eksctl_delete_cluster() { + BIN=${1} + CLUSTER_NAME=${2} + loudecho "Deleting cluster ${CLUSTER_NAME}" + ${BIN} delete cluster "${CLUSTER_NAME}" +} + +function eksctl_patch_cluster_file() { + CLUSTER_FILE=${1} # input must be yaml + EKSCTL_PATCH_FILE=${2} # input must be yaml + + loudecho "Patching cluster $CLUSTER_NAME with $EKSCTL_PATCH_FILE" + + # Temporary intermediate files for patching + CLUSTER_FILE_0=$CLUSTER_FILE.0 + CLUSTER_FILE_1=$CLUSTER_FILE.1 + + cp "$CLUSTER_FILE" "$CLUSTER_FILE_0" + + # Patch only the Cluster + kubectl patch -f "$CLUSTER_FILE_0" --local --type merge --patch "$(cat "$EKSCTL_PATCH_FILE")" -o yaml > "$CLUSTER_FILE_1" + mv "$CLUSTER_FILE_1" "$CLUSTER_FILE_0" + + # Done patching, overwrite original CLUSTER_FILE + mv "$CLUSTER_FILE_0" "$CLUSTER_FILE" # output is yaml +} diff --git a/hack/e2e/kops.sh b/hack/e2e/kops.sh index 4dd75099f..ef12d5ba5 100644 --- a/hack/e2e/kops.sh +++ b/hack/e2e/kops.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -uo pipefail +set -euo pipefail OS_ARCH=$(go env GOOS)-amd64 @@ -19,53 +19,137 @@ function kops_install() { function kops_create_cluster() { SSH_KEY_PATH=${1} - KOPS_STATE_FILE=${2} - CLUSTER_NAME=${3} - KOPS_BIN=${4} - ZONES=${5} + CLUSTER_NAME=${2} + BIN=${3} + ZONES=${4} + NODE_COUNT=${5} INSTANCE_TYPE=${6} K8S_VERSION=${7} - TEST_DIR=${8} - KOPS_FEATURE_GATES_FILE=${10} - KOPS_ADDITIONAL_POLICIES_FILE=${11} + CLUSTER_FILE=${8} + KUBECONFIG=${9} + KOPS_PATCH_FILE=${10} + KOPS_PATCH_NODE_FILE=${11} + KOPS_STATE_FILE=${12} - loudecho "Generating SSH key $SSH_KEY_PATH" - if [[ ! -e ${SSH_KEY_PATH} ]]; then - ssh-keygen -P csi-e2e -f "${SSH_KEY_PATH}" - fi + generate_ssh_key "${SSH_KEY_PATH}" - set +e - if ${KOPS_BIN} get cluster --state "${KOPS_STATE_FILE}" "${CLUSTER_NAME}"; then - set -e - loudecho "Updating cluster $CLUSTER_NAME" + if kops_cluster_exists "${CLUSTER_NAME}" "${BIN}" "${KOPS_STATE_FILE}"; then + loudecho "Replacing cluster $CLUSTER_NAME with $CLUSTER_FILE" + ${BIN} replace --state "${KOPS_STATE_FILE}" -f "${CLUSTER_FILE}" else - set -e - loudecho "Creating cluster $CLUSTER_NAME" - ${KOPS_BIN} create cluster --state "${KOPS_STATE_FILE}" \ + loudecho "Creating cluster $CLUSTER_NAME with $CLUSTER_FILE (dry run)" + ${BIN} create cluster --state "${KOPS_STATE_FILE}" \ + --ssh-public-key="${SSH_KEY_PATH}".pub \ --zones "${ZONES}" \ - --node-count=3 \ + --node-count="${NODE_COUNT}" \ --node-size="${INSTANCE_TYPE}" \ --kubernetes-version="${K8S_VERSION}" \ - --ssh-public-key="${SSH_KEY_PATH}".pub \ - "${CLUSTER_NAME}" + --dry-run \ + -o yaml \ + "${CLUSTER_NAME}" > "${CLUSTER_FILE}" + + if test -f "$KOPS_PATCH_FILE"; then + kops_patch_cluster_file "$CLUSTER_FILE" "$KOPS_PATCH_FILE" "Cluster" "" + fi + if test -f "$KOPS_PATCH_NODE_FILE"; then + kops_patch_cluster_file "$CLUSTER_FILE" "$KOPS_PATCH_NODE_FILE" "InstanceGroup" "Node" + fi + + loudecho "Creating cluster $CLUSTER_NAME with $CLUSTER_FILE" + ${BIN} create --state "${KOPS_STATE_FILE}" -f "${CLUSTER_FILE}" + kops create secret --state "${KOPS_STATE_FILE}" --name "${CLUSTER_NAME}" sshpublickey admin -i "${SSH_KEY_PATH}".pub fi - CLUSTER_YAML_PATH=${TEST_DIR}/${CLUSTER_NAME}.yaml - ${KOPS_BIN} get cluster --state "${KOPS_STATE_FILE}" "${CLUSTER_NAME}" -o yaml > "${CLUSTER_YAML_PATH}" - [ -r "$KOPS_FEATURE_GATES_FILE" ] && cat "${KOPS_FEATURE_GATES_FILE}" >> "${CLUSTER_YAML_PATH}" - [ -r "$KOPS_ADDITIONAL_POLICIES_FILE" ] && cat "${KOPS_ADDITIONAL_POLICIES_FILE}" >> "${CLUSTER_YAML_PATH}" - ${KOPS_BIN} replace --state "${KOPS_STATE_FILE}" -f "${CLUSTER_YAML_PATH}" - ${KOPS_BIN} update cluster --state "${KOPS_STATE_FILE}" "${CLUSTER_NAME}" --yes + loudecho "Updating cluster $CLUSTER_NAME with $CLUSTER_FILE" + ${BIN} update cluster --state "${KOPS_STATE_FILE}" "${CLUSTER_NAME}" --yes - loudecho "Validating cluster $CLUSTER_NAME" - ${KOPS_BIN} validate cluster --state "${KOPS_STATE_FILE}" --wait 10m + loudecho "Exporting cluster ${CLUSTER_NAME} kubecfg to ${KUBECONFIG}" + ${BIN} export kubecfg --state "${KOPS_STATE_FILE}" "${CLUSTER_NAME}" --admin --kubeconfig "${KUBECONFIG}" + + loudecho "Validating cluster ${CLUSTER_NAME}" + ${BIN} validate cluster --state "${KOPS_STATE_FILE}" --wait 10m --kubeconfig "${KUBECONFIG}" return $? } +function kops_cluster_exists() { + CLUSTER_NAME=${1} + BIN=${2} + KOPS_STATE_FILE=${3} + set +e + if ${BIN} get cluster --state "${KOPS_STATE_FILE}" "${CLUSTER_NAME}"; then + set -e + return 0 + else + set -e + return 1 + fi +} + function kops_delete_cluster() { - KOPS_BIN=${1} + BIN=${1} CLUSTER_NAME=${2} KOPS_STATE_FILE=${3} loudecho "Deleting cluster ${CLUSTER_NAME}" - ${KOPS_BIN} delete cluster --name "${CLUSTER_NAME}" --state "${KOPS_STATE_FILE}" --yes + ${BIN} delete cluster --name "${CLUSTER_NAME}" --state "${KOPS_STATE_FILE}" --yes +} + +# TODO switch this to python, work exclusively with yaml, use kops toolbox +# template/kops set?, all this hacking with jq stinks! +function kops_patch_cluster_file() { + CLUSTER_FILE=${1} # input must be yaml + KOPS_PATCH_FILE=${2} # input must be yaml + KIND=${3} # must be either Cluster or InstanceGroup + ROLE=${4} # must be either Master or Node + + loudecho "Patching cluster $CLUSTER_NAME with $KOPS_PATCH_FILE" + + # Temporary intermediate files for patching, don't mutate CLUSTER_FILE until + # the end + CLUSTER_FILE_JSON=$CLUSTER_FILE.json + CLUSTER_FILE_0=$CLUSTER_FILE.0 + CLUSTER_FILE_1=$CLUSTER_FILE.1 + + # HACK convert the multiple yaml documents to an array of json objects + yaml_to_json "$CLUSTER_FILE" "$CLUSTER_FILE_JSON" + + # Find the json objects to patch + FILTER=".[] | select(.kind==\"$KIND\")" + if [ -n "$ROLE" ]; then + FILTER="$FILTER | select(.spec.role==\"$ROLE\")" + fi + jq "$FILTER" "$CLUSTER_FILE_JSON" > "$CLUSTER_FILE_0" + + # Patch only the json objects + kubectl patch -f "$CLUSTER_FILE_0" --local --type merge --patch "$(cat "$KOPS_PATCH_FILE")" -o json > "$CLUSTER_FILE_1" + mv "$CLUSTER_FILE_1" "$CLUSTER_FILE_0" + + # Delete the original json objects, add the patched + # TODO Cluster must always be first? + jq "del($FILTER)" "$CLUSTER_FILE_JSON" | jq ". + \$patched | sort" --slurpfile patched "$CLUSTER_FILE_0" > "$CLUSTER_FILE_1" + mv "$CLUSTER_FILE_1" "$CLUSTER_FILE_0" + + # HACK convert the array of json objects to multiple yaml documents + json_to_yaml "$CLUSTER_FILE_0" "$CLUSTER_FILE_1" + mv "$CLUSTER_FILE_1" "$CLUSTER_FILE_0" + + # Done patching, overwrite original yaml CLUSTER_FILE + mv "$CLUSTER_FILE_0" "$CLUSTER_FILE" # output is yaml + + # Clean up + rm "$CLUSTER_FILE_JSON" +} + +function yaml_to_json() { + IN=${1} + OUT=${2} + kubectl patch -f "$IN" --local -p "{}" --type merge -o json | jq '.' -s > "$OUT" +} + +function json_to_yaml() { + IN=${1} + OUT=${2} + for ((i = 0; i < $(jq length "$IN"); i++)); do + echo "---" >> "$OUT" + jq ".[$i]" "$IN" | kubectl patch -f - --local -p "{}" --type merge -o yaml >> "$OUT" + done } diff --git a/hack/e2e/run.sh b/hack/e2e/run.sh index bb26e4116..9de96cf4e 100755 --- a/hack/e2e/run.sh +++ b/hack/e2e/run.sh @@ -19,45 +19,60 @@ set -euo pipefail BASE_DIR=$(dirname "$(realpath "${BASH_SOURCE[0]}")") source "${BASE_DIR}"/ebs.sh source "${BASE_DIR}"/ecr.sh +source "${BASE_DIR}"/eksctl.sh source "${BASE_DIR}"/helm.sh source "${BASE_DIR}"/kops.sh source "${BASE_DIR}"/util.sh DRIVER_NAME=${DRIVER_NAME:-aws-ebs-csi-driver} CONTAINER_NAME=${CONTAINER_NAME:-ebs-plugin} +DRIVER_START_TIME_THRESHOLD_SECONDS=60 TEST_ID=${TEST_ID:-$RANDOM} CLUSTER_NAME=test-cluster-${TEST_ID}.k8s.local +CLUSTER_TYPE=${CLUSTER_TYPE:-kops} TEST_DIR=${BASE_DIR}/csi-test-artifacts BIN_DIR=${TEST_DIR}/bin SSH_KEY_PATH=${TEST_DIR}/id_rsa +CLUSTER_FILE=${TEST_DIR}/${CLUSTER_NAME}.${CLUSTER_TYPE}.yaml +KUBECONFIG=${KUBECONFIG:-"${TEST_DIR}/${CLUSTER_NAME}.${CLUSTER_TYPE}.kubeconfig"} REGION=${AWS_REGION:-us-west-2} ZONES=${AWS_AVAILABILITY_ZONES:-us-west-2a,us-west-2b,us-west-2c} +FIRST_ZONE=$(echo "${ZONES}" | cut -d, -f1) +NODE_COUNT=${NODE_COUNT:-3} INSTANCE_TYPE=${INSTANCE_TYPE:-c4.large} AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) IMAGE_NAME=${IMAGE_NAME:-${AWS_ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com/${DRIVER_NAME}} IMAGE_TAG=${IMAGE_TAG:-${TEST_ID}} -K8S_VERSION=${K8S_VERSION:-1.18.16} -KOPS_VERSION=${KOPS_VERSION:-1.18.2} +# kops: must include patch version (e.g. 1.19.1) +# eksctl: mustn't include patch version (e.g. 1.19) +K8S_VERSION=${K8S_VERSION:-1.20.8} + +KOPS_VERSION=${KOPS_VERSION:-1.21.0} KOPS_STATE_FILE=${KOPS_STATE_FILE:-s3://k8s-kops-csi-e2e} -KOPS_FEATURE_GATES_FILE=${KOPS_FEATURE_GATES_FILE:-./hack/feature-gates.yaml} -KOPS_ADDITIONAL_POLICIES_FILE=${KOPS_ADDITIONAL_POLICIES_FILE:-./hack/additional-policies.yaml} +KOPS_PATCH_FILE=${KOPS_PATCH_FILE:-./hack/kops-patch.yaml} +KOPS_PATCH_NODE_FILE=${KOPS_PATCH_NODE_FILE:-./hack/kops-patch-node.yaml} + +EKSCTL_VERSION=${EKSCTL_VERSION:-0.56.0-rc.1} +EKSCTL_PATCH_FILE=${EKSCTL_PATCH_FILE:-./hack/eksctl-patch.yaml} +EKSCTL_ADMIN_ROLE=${EKSCTL_ADMIN_ROLE:-} HELM_VALUES_FILE=${HELM_VALUES_FILE:-./hack/values.yaml} +HELM_EXTRA_FLAGS=${HELM_EXTRA_FLAGS:-} TEST_PATH=${TEST_PATH:-"./tests/e2e/..."} -KUBECONFIG=${KUBECONFIG:-"${HOME}/.kube/config"} ARTIFACTS=${ARTIFACTS:-"${TEST_DIR}/artifacts"} GINKGO_FOCUS=${GINKGO_FOCUS:-"\[ebs-csi-e2e\]"} GINKGO_SKIP=${GINKGO_SKIP:-"\[Disruptive\]"} GINKGO_NODES=${GINKGO_NODES:-4} TEST_EXTRA_FLAGS=${TEST_EXTRA_FLAGS:-} -EBS_SNAPSHOT_CRD=${EBS_SNAPSHOT_CRD:-"./deploy/kubernetes/cluster/crd_snapshotter.yaml"} +EBS_INSTALL_SNAPSHOT=${EBS_INSTALL_SNAPSHOT:-"false"} +EBS_INSTALL_SNAPSHOT_VERSION=${EBS_INSTALL_SNAPSHOT_VERSION:-"v4.1.1"} EBS_CHECK_MIGRATION=${EBS_CHECK_MIGRATION:-"false"} CLEAN=${CLEAN:-"true"} @@ -66,9 +81,18 @@ loudecho "Testing in region ${REGION} and zones ${ZONES}" mkdir -p "${BIN_DIR}" export PATH=${PATH}:${BIN_DIR} -loudecho "Installing kops ${KOPS_VERSION} to ${BIN_DIR}" -kops_install "${BIN_DIR}" "${KOPS_VERSION}" -KOPS_BIN=${BIN_DIR}/kops +if [[ "${CLUSTER_TYPE}" == "kops" ]]; then + loudecho "Installing kops ${KOPS_VERSION} to ${BIN_DIR}" + kops_install "${BIN_DIR}" "${KOPS_VERSION}" + KOPS_BIN=${BIN_DIR}/kops +elif [[ "${CLUSTER_TYPE}" == "eksctl" ]]; then + loudecho "Installing eksctl ${EKSCTL_VERSION} to ${BIN_DIR}" + eksctl_install "${BIN_DIR}" "${EKSCTL_VERSION}" + EKSCTL_BIN=${BIN_DIR}/eksctl +else + loudecho "${CLUSTER_TYPE} must be kops or eksctl!" + exit 1 +fi loudecho "Installing helm to ${BIN_DIR}" helm_install "${BIN_DIR}" @@ -87,41 +111,84 @@ ecr_build_and_push "${REGION}" \ "${IMAGE_NAME}" \ "${IMAGE_TAG}" -kops_create_cluster \ - "$SSH_KEY_PATH" \ - "$KOPS_STATE_FILE" \ - "$CLUSTER_NAME" \ - "$KOPS_BIN" \ - "$ZONES" \ - "$INSTANCE_TYPE" \ - "$K8S_VERSION" \ - "$TEST_DIR" \ - "$BASE_DIR" \ - "$KOPS_FEATURE_GATES_FILE" \ - "$KOPS_ADDITIONAL_POLICIES_FILE" -if [[ $? -ne 0 ]]; then - exit 1 +if [[ "${CLUSTER_TYPE}" == "kops" ]]; then + kops_create_cluster \ + "$SSH_KEY_PATH" \ + "$CLUSTER_NAME" \ + "$KOPS_BIN" \ + "$ZONES" \ + "$NODE_COUNT" \ + "$INSTANCE_TYPE" \ + "$K8S_VERSION" \ + "$CLUSTER_FILE" \ + "$KUBECONFIG" \ + "$KOPS_PATCH_FILE" \ + "$KOPS_PATCH_NODE_FILE" \ + "$KOPS_STATE_FILE" + if [[ $? -ne 0 ]]; then + exit 1 + fi +elif [[ "${CLUSTER_TYPE}" == "eksctl" ]]; then + eksctl_create_cluster \ + "$SSH_KEY_PATH" \ + "$CLUSTER_NAME" \ + "$EKSCTL_BIN" \ + "$ZONES" \ + "$INSTANCE_TYPE" \ + "$K8S_VERSION" \ + "$CLUSTER_FILE" \ + "$KUBECONFIG" \ + "$EKSCTL_PATCH_FILE" \ + "$EKSCTL_ADMIN_ROLE" + if [[ $? -ne 0 ]]; then + exit 1 + fi +fi + +if [[ "${EBS_INSTALL_SNAPSHOT}" == true ]]; then + loudecho "Installing snapshot controller and CRDs" + kubectl apply --kubeconfig "${KUBECONFIG}" -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/"${EBS_INSTALL_SNAPSHOT_VERSION}"/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml + kubectl apply --kubeconfig "${KUBECONFIG}" -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/"${EBS_INSTALL_SNAPSHOT_VERSION}"/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml + kubectl apply --kubeconfig "${KUBECONFIG}" -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/"${EBS_INSTALL_SNAPSHOT_VERSION}"/client/config/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml + kubectl apply --kubeconfig "${KUBECONFIG}" -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/"${EBS_INSTALL_SNAPSHOT_VERSION}"/client/config/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml + kubectl apply --kubeconfig "${KUBECONFIG}" -f https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/"${EBS_INSTALL_SNAPSHOT_VERSION}"/client/config/crd/snapshot.storage.k8s.io_volumesnapshots.yaml fi loudecho "Deploying driver" -"${HELM_BIN}" upgrade --install "${DRIVER_NAME}" \ - --namespace kube-system \ - --set image.repository="${IMAGE_NAME}" \ - --set image.tag="${IMAGE_TAG}" \ - -f "${HELM_VALUES_FILE}" \ - ./charts/"${DRIVER_NAME}" - -if [[ -r "${EBS_SNAPSHOT_CRD}" ]]; then - loudecho "Deploying snapshot CRD" - kubectl apply -f "$EBS_SNAPSHOT_CRD" - # TODO deploy snapshot controller too instead of including in helm chart +startSec=$(date +'%s') + +HELM_ARGS=(upgrade --install "${DRIVER_NAME}" + --namespace kube-system + --set image.repository="${IMAGE_NAME}" + --set image.tag="${IMAGE_TAG}" + --wait + --kubeconfig "${KUBECONFIG}" + ./charts/"${DRIVER_NAME}") +if [[ -f "$HELM_VALUES_FILE" ]]; then + HELM_ARGS+=(-f "${HELM_VALUES_FILE}") +fi +eval "EXPANDED_HELM_EXTRA_FLAGS=$HELM_EXTRA_FLAGS" +if [[ -n "$EXPANDED_HELM_EXTRA_FLAGS" ]]; then + HELM_ARGS+=("${EXPANDED_HELM_EXTRA_FLAGS}") fi +set -x +"${HELM_BIN}" "${HELM_ARGS[@]}" +set +x + +endSec=$(date +'%s') +secondUsed=$(((endSec - startSec) / 1)) +# Set timeout threshold as 20 seconds for now, usually it takes less than 10s to startup +if [ $secondUsed -gt $DRIVER_START_TIME_THRESHOLD_SECONDS ]; then + loudecho "Driver start timeout, took $secondUsed but the threshold is $DRIVER_START_TIME_THRESHOLD_SECONDS. Fail the test." + exit 1 +fi +loudecho "Driver deployment complete, time used: $secondUsed seconds" loudecho "Testing focus ${GINKGO_FOCUS}" eval "EXPANDED_TEST_EXTRA_FLAGS=$TEST_EXTRA_FLAGS" set -x set +e -${GINKGO_BIN} -p -nodes="${GINKGO_NODES}" -v --focus="${GINKGO_FOCUS}" --skip="${GINKGO_SKIP}" "${TEST_PATH}" -- -kubeconfig="${KUBECONFIG}" -report-dir="${ARTIFACTS}" -gce-zone="${ZONES%,*}" "${EXPANDED_TEST_EXTRA_FLAGS}" +${GINKGO_BIN} -p -nodes="${GINKGO_NODES}" -v --focus="${GINKGO_FOCUS}" --skip="${GINKGO_SKIP}" "${TEST_PATH}" -- -kubeconfig="${KUBECONFIG}" -report-dir="${ARTIFACTS}" -gce-zone="${FIRST_ZONE}" "${EXPANDED_TEST_EXTRA_FLAGS}" TEST_PASSED=$? set -e set +x @@ -130,7 +197,7 @@ loudecho "TEST_PASSED: ${TEST_PASSED}" OVERALL_TEST_PASSED="${TEST_PASSED}" if [[ "${EBS_CHECK_MIGRATION}" == true ]]; then exec 5>&1 - OUTPUT=$(ebs_check_migration | tee /dev/fd/5) + OUTPUT=$(ebs_check_migration "${KUBECONFIG}" | tee /dev/fd/5) MIGRATION_PASSED=$(echo "${OUTPUT}" | tail -1) loudecho "MIGRATION_PASSED: ${MIGRATION_PASSED}" if [ "${TEST_PASSED}" == 0 ] && [ "${MIGRATION_PASSED}" == 0 ]; then @@ -142,12 +209,13 @@ if [[ "${EBS_CHECK_MIGRATION}" == true ]]; then fi fi -PODS=$(kubectl get pod -n kube-system -l "app.kubernetes.io/name=${DRIVER_NAME},app.kubernetes.io/instance=${DRIVER_NAME}" -o json | jq -r .items[].metadata.name) +PODS=$(kubectl get pod -n kube-system -l "app.kubernetes.io/name=${DRIVER_NAME},app.kubernetes.io/instance=${DRIVER_NAME}" -o json --kubeconfig "${KUBECONFIG}" | jq -r .items[].metadata.name) while IFS= read -r POD; do loudecho "Printing pod ${POD} ${CONTAINER_NAME} container logs" set +e - kubectl logs "${POD}" -n kube-system "${CONTAINER_NAME}" + kubectl logs "${POD}" -n kube-system "${CONTAINER_NAME}" \ + --kubeconfig "${KUBECONFIG}" set -e done <<< "${PODS}" @@ -156,12 +224,19 @@ if [[ "${CLEAN}" == true ]]; then loudecho "Removing driver" ${HELM_BIN} del "${DRIVER_NAME}" \ - --namespace kube-system - - kops_delete_cluster \ - "${KOPS_BIN}" \ - "${CLUSTER_NAME}" \ - "${KOPS_STATE_FILE}" + --namespace kube-system \ + --kubeconfig "${KUBECONFIG}" + + if [[ "${CLUSTER_TYPE}" == "kops" ]]; then + kops_delete_cluster \ + "${KOPS_BIN}" \ + "${CLUSTER_NAME}" \ + "${KOPS_STATE_FILE}" + elif [[ "${CLUSTER_TYPE}" == "eksctl" ]]; then + eksctl_delete_cluster \ + "${EKSCTL_BIN}" \ + "${CLUSTER_NAME}" + fi else loudecho "Not cleaning" fi diff --git a/hack/e2e/util.sh b/hack/e2e/util.sh index 46bb5b16e..56f23e3f8 100644 --- a/hack/e2e/util.sh +++ b/hack/e2e/util.sh @@ -7,3 +7,13 @@ function loudecho() { echo "## ${1}" echo "#" } + +function generate_ssh_key() { + SSH_KEY_PATH=${1} + if [[ ! -e ${SSH_KEY_PATH} ]]; then + loudecho "Generating SSH key $SSH_KEY_PATH" + ssh-keygen -P csi-e2e -f "${SSH_KEY_PATH}" + else + loudecho "Reusing SSH key $SSH_KEY_PATH" + fi +}