diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 38166e8e..00741ec5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -66,7 +66,9 @@ You can now deploy a local development environment for Kubernetes using [Docker 1. Navigate to Docker preferences - 1. Click on "Advanced" and slide the "Memory" bar to 6 + 1. Click on "Resources" and slide the "Memory" bar to 6 + +1. If you intend to deploy the Secrets Provider via Helm, you will need to install the Helm CLI. See [here](https://helm.sh/docs/intro/install/) for instructions on how to do so. #### Deploy @@ -80,9 +82,9 @@ Run ` kubectl config current-context` to verify which context you are currently Run `kubectl config use-context docker-desktop` to switch to a local context. This is the context you will need to run the development environment -1. Navigate to `bootstrap.env` and uncomment the `Local DEV Env` section, ensuring that `DEV=true` +1. Navigate to `bootstrap.env` and uncomment the `Local DEV Env` section, ensuring that `DEV=true`. Additionally, you can deploy the Secrets Provider locally using HELM. To do so, _also_ uncomment `DEV_HELM` -1. Run `./bin/start --dev --gke`, appending `--oss` or `--dap` according to the environment that needs to be deployed +1. Run `./bin/start --dev`, appending `--oss` or `--dap` according to the environment that needs to be deployed 1. To view the pod(s) that were deployed and the Secrets Provider logs, run `kubectl get pods` and `kubectl logs -c cyberark-secrets-provider` respectively. You can also view Conjur/DAP pod logs by running `kubectl get pods -n local-conjur` and `kubectl logs -n local-conjur` diff --git a/Jenkinsfile b/Jenkinsfile index e45f68a9..bd7efeb9 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -48,9 +48,9 @@ pipeline { tasks["Kubernetes GKE, oss"] = { sh "./bin/start --docker --oss --gke" } - tasks["Openshift v3.11, oss"] = { - sh "./bin/start --docker --oss --oc311" - } + // tasks["Openshift v3.11, oss"] = { + // sh "./bin/start --docker --oss --oc311" + // } // skip oc310 tests until the environment will be ready to use // tasks["Openshift v3.10, oss"] = { // sh "./bin/start --docker --oss --oc310" diff --git a/bin/build_utils b/bin/build_utils index d9529924..7a2e2f50 100644 --- a/bin/build_utils +++ b/bin/build_utils @@ -8,21 +8,21 @@ set -euo pipefail readonly VERSION_GO_FILE="pkg/secrets/version.go" -function short_version_tag() { +short_version_tag() { grep -v '^//' "${VERSION_GO_FILE}" | grep 'var Version =' | awk -F'= ' '{print $2}' | tr -d '"' } -function git_tag() { +git_tag() { git rev-parse --short HEAD } -function full_version_tag() { +full_version_tag() { echo "$(short_version_tag)-$(git_tag)" } # generate less specific versions, eg. given 1.2.3 will print 1.2 and 1 # (note: the argument itself is not printed, append it explicitly if needed) -function gen_versions() { +gen_versions() { local version=$1 while [[ $version = *.* ]]; do version=${version%.*} diff --git a/bin/start b/bin/start index dbecf974..5710101a 100755 --- a/bin/start +++ b/bin/start @@ -1,6 +1,6 @@ #!/bin/bash -ex -function print_help() { +print_help() { cat << EOF Test the secrets-provider-for-k8s image. This script sets up a Conjur cluster in k8s and deploys a k8s environment with an app container and a secrets-provider-for-k8s init container. Finally it tests that the outcome is as expected (for example, @@ -29,7 +29,7 @@ EOF exit } -function runScriptWithSummon() { +runScriptWithSummon() { summon --environment $SUMMON_ENV -f ./summon/secrets.yml $1 } diff --git a/bootstrap.env b/bootstrap.env index 15a2bdcf..c88e7a68 100644 --- a/bootstrap.env +++ b/bootstrap.env @@ -18,8 +18,10 @@ export APP_NAMESPACE_NAME=app-$UNIQUE_TEST_ID # export AUTHENTICATOR_ID=authn-dev-env # export APP_NAMESPACE_NAME=local-secrets-provider # export CONJUR_NAMESPACE_NAME=local-conjur -# export DEV=true # export STOP_RUNNING_ENV=true # export CONJUR_ACCOUNT=cucumber # export CONJUR_LOG_LEVEL=debug - +# export CONJUR_AUTHENTICATORS=authn-k8s/${AUTHENTICATOR_ID} +# export DEV=true +# Uncomment to deploy the Secrets Provider using HELM +# export DEV_HELM=true diff --git a/deploy/2_create_app_namespace.sh b/deploy/2_create_app_namespace.sh index c31e3273..dc249ef4 100755 --- a/deploy/2_create_app_namespace.sh +++ b/deploy/2_create_app_namespace.sh @@ -26,10 +26,7 @@ fi $cli_with_timeout delete --ignore-not-found rolebinding app-conjur-authenticator-role-binding-$CONJUR_NAMESPACE_NAME -CONFIG_DIR="config/k8s" -if [[ "$PLATFORM" = "openshift" ]]; then - CONFIG_DIR="config/openshift" -fi +set_config_directory_path wait_for_it 600 "./$CONFIG_DIR/app-conjur-authenticator-role-binding.sh.yml | $cli_without_timeout apply -f -" diff --git a/deploy/dev/5_load_environment.sh b/deploy/dev/5_load_environment.sh index 340ddb73..2b5da635 100755 --- a/deploy/dev/5_load_environment.sh +++ b/deploy/dev/5_load_environment.sh @@ -3,30 +3,40 @@ set -euxo pipefail . utils.sh -function main() { +main() { + export DEV_HELM=${DEV_HELM:-"false"} ./teardown_resources.sh set_namespace "$APP_NAMESPACE_NAME" - configure_secret + if [ "${DEV_HELM}" = "true" ]; then + setup_helm_environment - deploy_env -} + create_k8s_secret + export IMAGE_PULL_POLICY="Never" + export IMAGE="secrets-provider-for-k8s" + export TAG="dev" + deploy_chart + + deploy_helm_app + else + create_k8s_secret -function configure_secret() { - announce "Configuring K8s Secret and access." + create_secret_access_role - export CONFIG_DIR="$PWD/config/k8s" - if [[ "$PLATFORM" = "openshift" ]]; then - export CONFIG_DIR="$PWD/config/openshift" + create_secret_access_role_binding + + deploy_init_env fi +} - echo "Create secret k8s-secret" - $cli_with_timeout create -f $CONFIG_DIR/k8s-secret.yml +create_k8s_secret() { + announce "Creating K8s Secret." - create_secret_access_role + set_config_directory_path - create_secret_access_role_binding + echo "Create secret k8s-secret" + $cli_with_timeout create -f $CONFIG_DIR/k8s-secret.yml } main diff --git a/deploy/dev/config/k8s/secrets-provider-init-container.sh.yml b/deploy/dev/config/k8s/secrets-provider-init-container.sh.yml index 26e88d6a..1500b03b 100755 --- a/deploy/dev/config/k8s/secrets-provider-init-container.sh.yml +++ b/deploy/dev/config/k8s/secrets-provider-init-container.sh.yml @@ -7,17 +7,17 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - app: init-env - name: init-env + app: test-env + name: test-env spec: replicas: 1 selector: matchLabels: - app: init-env + app: test-env template: metadata: labels: - app: init-env + app: test-env spec: serviceAccountName: ${APP_NAMESPACE_NAME}-sa containers: diff --git a/deploy/dev/reload.sh b/deploy/dev/reload.sh index b6d80d75..04b15aa4 100755 --- a/deploy/dev/reload.sh +++ b/deploy/dev/reload.sh @@ -3,32 +3,49 @@ set -xeuo pipefail . utils.sh -# Script for making it easy to make a change locally and redeploy -pushd .. - ./bin/build -popd +main() { + export DEV_HELM=${DEV_HELM:-"false"} -set_namespace $APP_NAMESPACE_NAME + # Clean-up previous run + if [ "$(helm ls -aq | wc -l | tr -d ' ')" != 0 ]; then + helm delete $(helm ls -aq) + fi + $cli_with_timeout "delete deployment test-env --ignore-not-found=true" -docker tag "secrets-provider-for-k8s:dev" "${APP_NAMESPACE_NAME}/secrets-provider" + pushd .. + ./bin/build + popd -selector="role=follower" -cert_location="/opt/conjur/etc/ssl/conjur.pem" -if [ "$CONJUR_DEPLOYMENT" = "oss" ]; then - selector="app=conjur-cli" - cert_location="/root/conjur-${CONJUR_ACCOUNT}.pem" -fi + set_namespace $APP_NAMESPACE_NAME -conjur_pod_name=$($cli_with_timeout get pods --selector=$selector --namespace $CONJUR_NAMESPACE_NAME --no-headers | awk '{ print $1 }' | head -1) -ssl_cert=$($cli_with_timeout "exec ${conjur_pod_name} --namespace $CONJUR_NAMESPACE_NAME cat $cert_location") + if [ "${DEV_HELM}" = "true" ]; then + setup_helm_environment -export CONJUR_SSL_CERTIFICATE=$ssl_cert + export IMAGE_PULL_POLICY="Never" + export IMAGE="secrets-provider-for-k8s" + export TAG="dev" + deploy_chart -export CONFIG_DIR="$PWD/config/k8s" -if [[ "$PLATFORM" = "openshift" ]]; then - export CONFIG_DIR="$PWD/config/openshift" -fi + deploy_helm_app + else + selector="role=follower" + cert_location="/opt/conjur/etc/ssl/conjur.pem" + if [ "$CONJUR_DEPLOYMENT" = "oss" ]; then + selector="app=conjur-cli" + cert_location="/root/conjur-${CONJUR_ACCOUNT}.pem" + fi -$cli_with_timeout "delete deployment init-env --ignore-not-found=true" + conjur_pod_name=$($cli_with_timeout get pods --selector=$selector --namespace $CONJUR_NAMESPACE_NAME --no-headers | awk '{ print $1 }' | head -1) + ssl_cert=$($cli_with_timeout "exec ${conjur_pod_name} --namespace $CONJUR_NAMESPACE_NAME cat $cert_location") -deploy_env + export CONJUR_SSL_CERTIFICATE=$ssl_cert + + set_config_directory_path + + $cli_with_timeout "delete deployment init-env --ignore-not-found=true" + + deploy_init_env + fi +} + +main diff --git a/deploy/platform_login.sh b/deploy/platform_login.sh index 01b2d9d2..9ebf4309 100755 --- a/deploy/platform_login.sh +++ b/deploy/platform_login.sh @@ -3,7 +3,7 @@ set -euo pipefail IFS=$'\n\t' -function main() { +main() { # Log in to platform if [[ "$PLATFORM" = "kubernetes" ]]; then gcloud auth activate-service-account \ diff --git a/deploy/policy/templates/conjur-secrets.template.sh.yml b/deploy/policy/templates/conjur-secrets.template.sh.yml index 5375cb5c..ea1de1d4 100755 --- a/deploy/policy/templates/conjur-secrets.template.sh.yml +++ b/deploy/policy/templates/conjur-secrets.template.sh.yml @@ -8,6 +8,7 @@ cat << EOL body: - &variables - !variable test_secret + - !variable another_test_secret - !variable var with spaces - !variable var+with+pluses diff --git a/deploy/run.sh b/deploy/run.sh index 71f19559..43cfac9f 100755 --- a/deploy/run.sh +++ b/deploy/run.sh @@ -4,12 +4,12 @@ set -xeuo pipefail . utils.sh printenv > /tmp/printenv_local.debug -function main() { +main() { deployConjur ./run_with_summon.sh } -function deployConjur() { +deployConjur() { pushd .. git clone git@github.com:cyberark/kubernetes-conjur-deploy kubernetes-conjur-deploy-$UNIQUE_TEST_ID diff --git a/deploy/run_with_summon.sh b/deploy/run_with_summon.sh index 7ff48b4f..4cd51ed6 100755 --- a/deploy/run_with_summon.sh +++ b/deploy/run_with_summon.sh @@ -4,15 +4,18 @@ set -xeuo pipefail . utils.sh # Clean up when script completes and fails -function finish { +finish() { # There is a TRAP in test_in_docker.sh to account for Docker deployments so we do not need to add another one here # Stop the running processes if [[ $RUN_IN_DOCKER = false && $DEV = false ]]; then announce 'Wrapping up and removing environment' - ./stop - cd ./kubernetes-conjur-deploy-$UNIQUE_TEST_ID && ./stop + repo_root_path=$(git rev-parse --show-toplevel) + "$repo_root_path/deploy/stop" + pushd $repo_root_path/kubernetes-conjur-deploy-$UNIQUE_TEST_ID + ./stop + popd # Remove the deploy directory - rm -rf "../kubernetes-conjur-deploy-$UNIQUE_TEST_ID" + rm -rf "$repo_root_path/kubernetes-conjur-deploy-$UNIQUE_TEST_ID" fi } trap finish EXIT @@ -59,7 +62,7 @@ ssl_cert=$($cli_with_timeout "exec ${conjur_pod_name} --namespace $CONJUR_NAMESP export CONJUR_SSL_CERTIFICATE=$ssl_cert -if [ "${DEV}" = "false" ]; then +if [[ "${DEV}" = "false" ]]; then pushd ./test/test_cases > /dev/null ./run_tests.sh popd > /dev/null diff --git a/deploy/teardown_resources.sh b/deploy/teardown_resources.sh index 920235c2..b4cf45a4 100755 --- a/deploy/teardown_resources.sh +++ b/deploy/teardown_resources.sh @@ -7,31 +7,62 @@ set -euxo pipefail set_namespace $CONJUR_NAMESPACE_NAME configure_cli_pod -if [ "${DEV}" = "false" ]; then - $cli_with_timeout "exec $(get_conjur_cli_pod_name) -- conjur variable values add secrets/test_secret \"supersecret\"" + +## Helm Chart clean-up +#rm -f conjur.pem + +helm_ci_path="../helm/secrets-provider/ci" +if [[ "${DEV}" = "false" || "${RUN_IN_DOCKER}" = "true" ]]; then + helm_ci_path="../../../helm/secrets-provider/ci" +fi +pushd $helm_ci_path + find . -type f ! -name '*template.yaml' -delete +popd + +# Delete Helm Chart if already exists +set_namespace $APP_NAMESPACE_NAME +if [ "$(helm ls -aq | wc -l | tr -d ' ')" != 0 ]; then + helm delete $(helm ls -aq) fi +set_namespace $CONJUR_NAMESPACE_NAME + +$cli_with_timeout "exec $(get_conjur_cli_pod_name) -- conjur variable values add secrets/test_secret \"supersecret\"" + set_namespace $APP_NAMESPACE_NAME $cli_with_timeout "delete secret dockerpullsecret --ignore-not-found=true" $cli_with_timeout "delete clusterrole secrets-access-${UNIQUE_TEST_ID} --ignore-not-found=true" +$cli_with_timeout "delete role another-secrets-provider-role --ignore-not-found=true" + $cli_with_timeout "delete secret test-k8s-secret --ignore-not-found=true" +$cli_with_timeout "delete secret another-test-k8s-secret --ignore-not-found=true" + $cli_with_timeout "delete serviceaccount ${APP_NAMESPACE_NAME}-sa --ignore-not-found=true" +$cli_with_timeout "delete serviceaccount another-secrets-provider-service-account --ignore-not-found=true" + $cli_with_timeout "delete rolebinding secrets-access-role-binding --ignore-not-found=true" +$cli_with_timeout "delete rolebinding another-secrets-provider-role-binding --ignore-not-found=true" + if [ "${PLATFORM}" = "kubernetes" ]; then $cli_with_timeout "delete deployment test-env --ignore-not-found=true" + $cli_with_timeout "delete deployment another-test-env --ignore-not-found=true" elif [ "${PLATFORM}" = "openshift" ]; then $cli_with_timeout "delete deploymentconfig test-env --ignore-not-found=true" + $cli_with_timeout "delete deploymentconfig another-test-env --ignore-not-found=true" fi $cli_with_timeout "delete configmap conjur-master-ca-env --ignore-not-found=true" -if [ "${DEV}" = "false" ]; then - echo "Verifying there are no (terminating) pods of type test-env" - $cli_with_timeout "get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-env --no-headers | wc -l | tr -d ' ' | grep '^0$'" -fi +echo "Verifying there are no (terminating) pods of type test-env" +$cli_with_timeout "get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-env --no-headers | wc -l | tr -d ' ' | grep '^0$'" + +$cli_with_timeout "get pods --namespace=$APP_NAMESPACE_NAME --selector app=another-test-env --no-headers | wc -l | tr -d ' ' | grep '^0$'" + +echo "Verifying there are no (terminating) pods for Secrets Provider deployed with Helm" +$cli_with_timeout "get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-helm --no-headers | wc -l | tr -d ' ' | grep '^0$'" diff --git a/deploy/test/Dockerfile b/deploy/test/Dockerfile index 30e0b13f..01c7146d 100644 --- a/deploy/test/Dockerfile +++ b/deploy/test/Dockerfile @@ -24,3 +24,13 @@ RUN mkdir -p ocbin && \ tar xvf oc.tar.gz --strip-components=1 -C ocbin && \ mv ocbin/oc /usr/local/bin/oc && \ rm -rf ocbin oc.tar.gz + +# Install Helm +RUN curl https://baltocdn.com/helm/signing.asc | apt-key add - && \ + apt-get install apt-transport-https --yes && \ + echo "deb https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list && \ + apt-get update && \ + apt-get install helm=3.2.* + +# Adds ability to perform mathematical operations with floats for testing +RUN apt-get install -y bc \ No newline at end of file diff --git a/deploy/test/config/k8s/helm-app.yaml b/deploy/test/config/k8s/helm-app.yaml new file mode 100644 index 00000000..30432639 --- /dev/null +++ b/deploy/test/config/k8s/helm-app.yaml @@ -0,0 +1,30 @@ +# This app is created to run end-to-end tests with the Secrets Provider Job to ensure the updated K8s Secret appear as +# environment variables +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: {{ ID }}test-env + name: {{ ID }}test-env +spec: + replicas: 1 + selector: + matchLabels: + app: {{ ID }}test-env + template: + metadata: + labels: + app: {{ ID }}test-env + spec: + serviceAccountName: {{ SERVICE_ACCOUNT }} + containers: + - image: centos:7 + name: {{ ID }}test-app + command: ["sleep"] + args: ["infinity"] + env: + - name: {{ ID }}TEST_SECRET + valueFrom: + secretKeyRef: + name: {{ K8S_SECRET }} + key: secret diff --git a/deploy/test/config/k8s/secrets-access-role.yaml b/deploy/test/config/k8s/secrets-access-role.yaml new file mode 100644 index 00000000..a4554b7e --- /dev/null +++ b/deploy/test/config/k8s/secrets-access-role.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ ID }}secrets-provider-service-account + namespace: {{ APP_NAMESPACE_NAME }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ ID }}secrets-provider-role + namespace: {{ APP_NAMESPACE_NAME }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: [ "get", "update" ] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ ID }}secrets-provider-role-binding + namespace: {{ APP_NAMESPACE_NAME }} +subjects: + - kind: ServiceAccount + name: {{ ID }}secrets-provider-service-account +roleRef: + kind: Role + apiGroup: rbac.authorization.k8s.io + name: {{ ID }}secrets-provider-role diff --git a/deploy/test/config/k8s_secret.yml b/deploy/test/config/k8s_secret.yml new file mode 100644 index 00000000..5956a3e1 --- /dev/null +++ b/deploy/test/config/k8s_secret.yml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: another-test-k8s-secret +type: Opaque +stringData: + conjur-map: |- + secret: secrets/another_test_secret diff --git a/deploy/test/config/openshift/helm-app.yaml b/deploy/test/config/openshift/helm-app.yaml new file mode 100644 index 00000000..322dc3cc --- /dev/null +++ b/deploy/test/config/openshift/helm-app.yaml @@ -0,0 +1,29 @@ +# This app is created to run end-to-end tests with the Secrets Provider Job to ensure the updated K8s Secret appear as +# environment variables +apiVersion: v1 +kind: DeploymentConfig +metadata: + labels: + app: {{ ID }}test-env + name: {{ ID }}test-env +spec: + replicas: 1 + selector: + app: {{ ID }}test-env + template: + metadata: + labels: + app: {{ ID }}test-env + spec: + serviceAccountName: {{ SERVICE_ACCOUNT }} + containers: + - image: centos:7 + name: {{ ID }}test-app + command: ["sleep"] + args: ["infinity"] + env: + - name: {{ ID }}TEST_SECRET + valueFrom: + secretKeyRef: + name: {{ K8S_SECRET }} + key: secret diff --git a/deploy/test/config/openshift/secrets-access-role.yaml b/deploy/test/config/openshift/secrets-access-role.yaml new file mode 100644 index 00000000..ddacf839 --- /dev/null +++ b/deploy/test/config/openshift/secrets-access-role.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ ID }}secrets-provider-service-account + namespace: {{ APP_NAMESPACE_NAME }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ ID }}secrets-provider-role + namespace: {{ APP_NAMESPACE_NAME }} +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: [ "get", "update" ] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ ID }}secrets-provider-role-binding + namespace: {{ APP_NAMESPACE_NAME }} +subjects: + - kind: ServiceAccount + name: {{ ID }}secrets-provider-service-account + namespace: {{ APP_NAMESPACE_NAME }} +roleRef: + kind: Role + apiGroup: rbac.authorization.k8s.io + name: {{ ID }}secrets-provider-role diff --git a/deploy/test/test_cases/TEST_ID_10_SECRETS_DESTINATION_not_exist.sh b/deploy/test/test_cases/TEST_ID_10_SECRETS_DESTINATION_not_exist.sh index 140ef892..da8ccb34 100755 --- a/deploy/test/test_cases/TEST_ID_10_SECRETS_DESTINATION_not_exist.sh +++ b/deploy/test/test_cases/TEST_ID_10_SECRETS_DESTINATION_not_exist.sh @@ -7,7 +7,7 @@ create_secret_access_role_binding echo "Create test-env pod. SECRETS_DESTINATION is with invalid value 'incorrect_secrets'" export SECRETS_DESTINATION_KEY_VALUE=$KEY_VALUE_NOT_EXIST -deploy_env +deploy_init_env pod_name=$(cli_get_pods_test_env | awk '{print $1}') diff --git a/deploy/test/test_cases/TEST_ID_11_conjur_authn_failure.sh b/deploy/test/test_cases/TEST_ID_11_conjur_authn_failure.sh index 089a1151..97c536c6 100755 --- a/deploy/test/test_cases/TEST_ID_11_conjur_authn_failure.sh +++ b/deploy/test/test_cases/TEST_ID_11_conjur_authn_failure.sh @@ -7,7 +7,7 @@ create_secret_access_role_binding export CONJUR_AUTHN_LOGIN="host/some-policy/non-existing-namespace/*/*" -deploy_env +deploy_init_env echo "Expecting secrets provider to fail with error CAKC015E Login failed" pod_name=$(cli_get_pods_test_env | awk '{print $1}') diff --git a/deploy/test/test_cases/TEST_ID_12_no_conjur_secrets_permissions.sh b/deploy/test/test_cases/TEST_ID_12_no_conjur_secrets_permissions.sh index b0b9ffb0..eeb178a6 100755 --- a/deploy/test/test_cases/TEST_ID_12_no_conjur_secrets_permissions.sh +++ b/deploy/test/test_cases/TEST_ID_12_no_conjur_secrets_permissions.sh @@ -7,7 +7,7 @@ create_secret_access_role_binding export CONJUR_AUTHN_LOGIN="host/conjur/authn-k8s/${AUTHENTICATOR_ID}/apps/${APP_NAMESPACE_NAME}/service_account/${APP_NAMESPACE_NAME}-sa" -deploy_env +deploy_init_env echo "Expecting secrets provider to fail with error CSPFK034E Failed to retrieve Conjur secrets" pod_name=$(cli_get_pods_test_env | awk '{print $1}') diff --git a/deploy/test/test_cases/TEST_ID_13_host_not_in_apps.sh b/deploy/test/test_cases/TEST_ID_13_host_not_in_apps.sh index 82d487bb..52b3787a 100755 --- a/deploy/test/test_cases/TEST_ID_13_host_not_in_apps.sh +++ b/deploy/test/test_cases/TEST_ID_13_host_not_in_apps.sh @@ -7,7 +7,7 @@ create_secret_access_role_binding export CONJUR_AUTHN_LOGIN="host/some-apps/${APP_NAMESPACE_NAME}/*/*" -deploy_env +deploy_init_env echo "Verifying pod test_env has environment variable 'TEST_SECRET' with value 'supersecret'" pod_name=$(cli_get_pods_test_env | awk '{print $1}') diff --git a/deploy/test/test_cases/TEST_ID_14_host_in_root_policy.sh b/deploy/test/test_cases/TEST_ID_14_host_in_root_policy.sh index a048fcbc..bb32541a 100755 --- a/deploy/test/test_cases/TEST_ID_14_host_in_root_policy.sh +++ b/deploy/test/test_cases/TEST_ID_14_host_in_root_policy.sh @@ -7,7 +7,7 @@ create_secret_access_role_binding export CONJUR_AUTHN_LOGIN="host/${APP_NAMESPACE_NAME}/*/*" -deploy_env +deploy_init_env echo "Verifying pod test_env has environment variable 'TEST_SECRET' with value 'supersecret'" pod_name=$(cli_get_pods_test_env | awk '{print $1}') diff --git a/deploy/test/test_cases/TEST_ID_15_host_with_application_identity_in_annotations.sh b/deploy/test/test_cases/TEST_ID_15_host_with_application_identity_in_annotations.sh index 0acb3c3d..a393699e 100755 --- a/deploy/test/test_cases/TEST_ID_15_host_with_application_identity_in_annotations.sh +++ b/deploy/test/test_cases/TEST_ID_15_host_with_application_identity_in_annotations.sh @@ -7,7 +7,7 @@ create_secret_access_role_binding export CONJUR_AUTHN_LOGIN="host/some-apps/annotations-app" -deploy_env +deploy_init_env echo "Verifying pod test_env has environment variable 'TEST_SECRET' with value 'supersecret'" pod_name=$(cli_get_pods_test_env | awk '{print $1}') diff --git a/deploy/test/test_cases/TEST_ID_16_non_conjur_keys_stay_intact_in_k8s_secret.sh b/deploy/test/test_cases/TEST_ID_16_non_conjur_keys_stay_intact_in_k8s_secret.sh index ae7188c6..6188cf7a 100755 --- a/deploy/test/test_cases/TEST_ID_16_non_conjur_keys_stay_intact_in_k8s_secret.sh +++ b/deploy/test/test_cases/TEST_ID_16_non_conjur_keys_stay_intact_in_k8s_secret.sh @@ -8,7 +8,7 @@ create_secret_access_role create_secret_access_role_binding -deploy_env +deploy_init_env k8s_secret_key="NON_CONJUR_SECRET" secret_value="some-value" diff --git a/deploy/test/test_cases/TEST_ID_17_helm_job_deploys_successfully.sh b/deploy/test/test_cases/TEST_ID_17_helm_job_deploys_successfully.sh new file mode 100755 index 00000000..7ddb9188 --- /dev/null +++ b/deploy/test/test_cases/TEST_ID_17_helm_job_deploys_successfully.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -euxo pipefail + +# This test verifies that the Secrets Provider Job deploys successfully and Conjur secret appears in pod +setup_helm_environment + +pushd ../../ + fill_helm_chart + helm install -f "../helm/secrets-provider/ci/test-values.yaml" \ + secrets-provider ../helm/secrets-provider \ + --set-file environment.conjur.sslCertificate.value="test/test_cases/conjur.pem" +popd + +# Deploy app to test against +deploy_helm_app + +# Check for Job completion +helm_chart_name="secrets-provider" +$cli_with_timeout wait --for=condition=complete job/$helm_chart_name + +pod_name=$($cli_with_timeout get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-env --no-headers | awk '{print $1}' ) +verify_secret_value_in_pod $pod_name "TEST_SECRET" "supersecret" + +pod_name=$($cli_with_timeout get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-helm --no-headers | awk '{print $1}' ) +echo "Expecting the Secrets provider to succeed with proper success log 'CSPFK009I DAP/Conjur Secrets updated in Kubernetes successfully'" +$cli_with_timeout "logs $pod_name | grep CSPFK009I" diff --git a/deploy/test/test_cases/TEST_ID_18_helm_multiple_provider_multiple_secrets.sh b/deploy/test/test_cases/TEST_ID_18_helm_multiple_provider_multiple_secrets.sh new file mode 100755 index 00000000..6c6cf155 --- /dev/null +++ b/deploy/test/test_cases/TEST_ID_18_helm_multiple_provider_multiple_secrets.sh @@ -0,0 +1,51 @@ +#!/bin/bash +set -euxo pipefail + +# This test verifies that two Secrets Provider Jobs deploy successfully in the same namespace +setup_helm_environment + +echo "Create second secret" +create_k8s_secret_for_helm_deployment +set_conjur_secret secrets/another_test_secret another-some-secret-value + +# Deploy first Secrets Provider Job +pushd ../../ + fill_helm_chart + helm install -f "../helm/secrets-provider/ci/test-values.yaml" \ + secrets-provider ../helm/secrets-provider \ + --set-file environment.conjur.sslCertificate.value="test/test_cases/conjur.pem" +popd + +helm_chart_name="secrets-provider" +$cli_with_timeout wait --for=condition=complete job/$helm_chart_name + +deploy_helm_app + +# Deployed twice to ensure conjur.pem exists +setup_helm_environment +# Deploy second Secrets Provider Job +pushd ../../ + export SECRETS_PROVIDER_ROLE=another-secrets-provider-role + export SECRETS_PROVIDER_ROLE_BINDING=another-secrets-provider-role-binding + export SERVICE_ACCOUNT=another-secrets-provider-service-account + export K8S_SECRETS=another-test-k8s-secret + export SECRETS_PROVIDER_SSL_CONFIG_MAP=another-secrets-provider-ssl-config-map + fill_helm_chart "another-" + helm install -f "../helm/secrets-provider/ci/another-test-values.yaml" \ + another-secrets-provider ../helm/secrets-provider \ + --set-file environment.conjur.sslCertificate.value="test/test_cases/conjur.pem" +popd + +helm_chart_name="another-secrets-provider" +$cli_with_timeout wait --for=condition=complete job/$helm_chart_name + +export K8S_SECRET=another-test-k8s-secret +deploy_helm_app "another-" + +$cli_with_timeout "get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-env --no-headers | grep Running" +pod_name=$($cli_with_timeout get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-env --no-headers | awk '{print $1}' | head -1) +verify_secret_value_in_pod $pod_name "TEST_SECRET" "supersecret" + +$cli_with_timeout "get pods --namespace=$APP_NAMESPACE_NAME --selector app=another-test-env --no-headers | grep Running" +pod_name=$($cli_with_timeout get pods --namespace=$APP_NAMESPACE_NAME --selector app=another-test-env --no-headers | awk '{print $1}' | head -1) +verify_secret_value_in_pod $pod_name "another-TEST_SECRET" "another-some-secret-value" diff --git a/deploy/test/test_cases/TEST_ID_19_helm_multiple_provider_same_secret.sh b/deploy/test/test_cases/TEST_ID_19_helm_multiple_provider_same_secret.sh new file mode 100755 index 00000000..868170e1 --- /dev/null +++ b/deploy/test/test_cases/TEST_ID_19_helm_multiple_provider_same_secret.sh @@ -0,0 +1,38 @@ +#!/bin/bash +set -euxo pipefail + +# This test verifies that two Secrets Provider Jobs deploy successfully in the same namespace, can access the same Conjur secret, and update the same K8s Secret +setup_helm_environment + +pushd ../../ + fill_helm_chart + helm install -f "../helm/secrets-provider/ci/test-values.yaml" \ + secrets-provider ../helm/secrets-provider \ + --set-file environment.conjur.sslCertificate.value="test/test_cases/conjur.pem" +popd + +# Check for Job completion +helm_chart_name="secrets-provider" +$cli_with_timeout wait --for=condition=complete job/$helm_chart_name + +setup_helm_environment +pushd ../../ + export SECRETS_PROVIDER_ROLE=another-secrets-provider-role + export SECRETS_PROVIDER_ROLE_BINDING=another-secrets-provider-role-binding + export SERVICE_ACCOUNT=another-secrets-provider-service-account + export SECRETS_PROVIDER_SSL_CONFIG_MAP=another-secrets-provider-ssl-config-map + fill_helm_chart "another-" + helm install -f "../helm/secrets-provider/ci/another-test-values.yaml" \ + another-secrets-provider ../helm/secrets-provider \ + --set-file environment.conjur.sslCertificate.value="test/test_cases/conjur.pem" +popd + +helm_chart_name="another-secrets-provider" +$cli_with_timeout wait --for=condition=complete job/$helm_chart_name + +# Deploy app to test against +deploy_helm_app + +$cli_with_timeout "get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-env --no-headers | grep Running" +pod_name=$($cli_with_timeout get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-env --no-headers | awk '{print $1}' | head -1) +verify_secret_value_in_pod $pod_name "TEST_SECRET" "supersecret" diff --git a/deploy/test/test_cases/TEST_ID_20_helm_multiple_provider_same_serviceaccount.sh b/deploy/test/test_cases/TEST_ID_20_helm_multiple_provider_same_serviceaccount.sh new file mode 100755 index 00000000..1fc24bb9 --- /dev/null +++ b/deploy/test/test_cases/TEST_ID_20_helm_multiple_provider_same_serviceaccount.sh @@ -0,0 +1,45 @@ +#!/bin/bash +set -euxo pipefail + +# This test verifies that two Secrets Provider Jobs can run with same Service Account successfully in the same namespace +setup_helm_environment + +pushd ../../ + fill_helm_chart + helm install -f "../helm/secrets-provider/ci/test-values.yaml" \ + secrets-provider ../helm/secrets-provider \ + --set-file environment.conjur.sslCertificate.value="test/test_cases/conjur.pem" +popd + +# Check for Job completion +helm_chart_name="secrets-provider" +$cli_with_timeout wait --for=condition=complete job/$helm_chart_name + +setup_helm_environment + +pushd ../../ + export CREATE_SERVICE_ACCOUNT="false" + export LABELS="app: another-test-helm" + # Supply same Service Account resource that was created above + export SERVICE_ACCOUNT=secrets-provider-service-account + export SECRETS_PROVIDER_SSL_CONFIG_MAP=another-secrets-provider-ssl-config-map + fill_helm_chart "another-" + helm install -f "../helm/secrets-provider/ci/another-test-values.yaml" \ + another-secrets-provider ../helm/secrets-provider \ + --set-file environment.conjur.sslCertificate.value="test/test_cases/conjur.pem" +popd + +helm_chart_name="another-secrets-provider" +$cli_with_timeout wait --for=condition=complete job/$helm_chart_name + +# Verify that another-secrets-provider runs with the correct Service Account +$cli_with_timeout "get pods --namespace=$APP_NAMESPACE_NAME --selector app=another-test-helm --no-headers | grep another-secrets-provider" +pod_name=$($cli_with_timeout get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-helm --no-headers | awk '{print $1}' | head -1) +$cli_with_timeout get pods/$pod_name -o yaml | grep "serviceAccount: secrets-provider-service-account" + +# Deploy app to test against +deploy_helm_app + +$cli_with_timeout "get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-env --no-headers | grep Running" +pod_name=$($cli_with_timeout get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-env --no-headers | awk '{print $1}' | head -1) +verify_secret_value_in_pod $pod_name "TEST_SECRET" "supersecret" diff --git a/deploy/test/test_cases/TEST_ID_21_helm_service_account_does_not_exist.sh b/deploy/test/test_cases/TEST_ID_21_helm_service_account_does_not_exist.sh new file mode 100755 index 00000000..739666e3 --- /dev/null +++ b/deploy/test/test_cases/TEST_ID_21_helm_service_account_does_not_exist.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -euxo pipefail + +# This test verifies that when the user declares that they will provide their own Service Account but that Service Account +# does not exist in the namespace the Secrets Provider will fail +setup_helm_environment + +pushd ../../ + export CREATE_SERVICE_ACCOUNT="false" + + fill_helm_chart + helm install -f "../helm/secrets-provider/ci/test-values.yaml" \ + secrets-provider ../helm/secrets-provider \ + --set-file environment.conjur.sslCertificate.value="test/test_cases/conjur.pem" +popd + +# Job should fail and not be completed +helm_chart_name="secrets-provider" +$cli_with_timeout "describe job $helm_chart_name | grep 'error looking up service account'" diff --git a/deploy/test/test_cases/TEST_ID_22_helm_rbac_defaults_taken_successfully.sh b/deploy/test/test_cases/TEST_ID_22_helm_rbac_defaults_taken_successfully.sh new file mode 100755 index 00000000..daccdd4b --- /dev/null +++ b/deploy/test/test_cases/TEST_ID_22_helm_rbac_defaults_taken_successfully.sh @@ -0,0 +1,29 @@ +#!/bin/bash +set -euxo pipefail + +# This test verifies that if the user does not override Helm defaults, Helm will name K8s resources by their defaults and +# the Secrets Provider will deploy and complete successfully +setup_helm_environment + +pushd ../../ + export DEBUG="false" + export LABELS="app: test-helm" + export K8S_SECRETS="test-k8s-secret" + export CONJUR_ACCOUNT="cucumber" + export CONJUR_AUTHN_URL="https://conjur-follower.${CONJUR_NAMESPACE_NAME}.svc.cluster.local/api/authn-k8s/${AUTHENTICATOR_ID}" + export CONJUR_AUTHN_LOGIN="host/conjur/authn-k8s/${AUTHENTICATOR_ID}/apps/${APP_NAMESPACE_NAME}/*/*" + + fill_helm_chart_no_override_defaults + helm install -f "../helm/secrets-provider/ci/take-default-test-values.yaml" \ + secrets-provider ../helm/secrets-provider \ + --set-file environment.conjur.sslCertificate.value="test/test_cases/conjur.pem" +popd + +# Validate that known defaults were taken if not supplied +$cli_with_timeout "get ServiceAccount secrets-provider-service-account" +$cli_with_timeout "get Role secrets-provider-role" +$cli_with_timeout "get RoleBinding secrets-provider-role-binding" +$cli_with_timeout "get ConfigMap cert-config-map" + +# Validate that the Secrets Provider took the default image configurations if not supplied and was deployed successfully +$cli_with_timeout "describe job secrets-provider | grep 'cyberark/secrets-provider-for-k8s:1.0.0'" | awk '{print $2}' && $cli_with_timeout "get job secrets-provider -o jsonpath={.status.succeeded}" diff --git a/deploy/test/test_cases/TEST_ID_23_helm_service_account_exists.sh b/deploy/test/test_cases/TEST_ID_23_helm_service_account_exists.sh new file mode 100755 index 00000000..8df01221 --- /dev/null +++ b/deploy/test/test_cases/TEST_ID_23_helm_service_account_exists.sh @@ -0,0 +1,30 @@ +#!/bin/bash +set -euxo pipefail + +# This test verifies that when the user declares that they will provide their own Service Account and that Service Account exists +# the Secrets Provider will deploy and complete successfully +setup_helm_environment + +create_k8s_role "another-" + +pushd ../../ + # RBAC will not be created by the Secrets Provider Helm Chart + export CREATE_SERVICE_ACCOUNT="false" + + # These roles should not be created because of the above configuration + export SERVICE_ACCOUNT="another-secrets-provider-service-account" + + fill_helm_chart + helm install -f "../helm/secrets-provider/ci/test-values.yaml" \ + secrets-provider ../helm/secrets-provider \ + --set-file environment.conjur.sslCertificate.value="test/test_cases/conjur.pem" +popd + +## Validate that resources were not created +$cli_with_timeout "get serviceaccount secrets-provider-service-account --no-headers 2>/dev/null || true | wc -l | tr -d ' ' | grep '^0$'" +$cli_with_timeout "get role secrets-provider-role --no-headers 2>/dev/null || true | wc -l | tr -d ' ' | grep '^0$'" +$cli_with_timeout "get rolebinding secrets-provider-role-binding --no-headers 2>/dev/null || true | wc -l | tr -d ' ' | grep '^0$'" + +# Job will complete successfully because provided Service Account exists +helm_chart_name="secrets-provider" +$cli_with_timeout wait --for=condition=complete job/$helm_chart_name diff --git a/deploy/test/test_cases/TEST_ID_24_helm_validate_K8S_SECRETS_env_var_incorrect_value.sh b/deploy/test/test_cases/TEST_ID_24_helm_validate_K8S_SECRETS_env_var_incorrect_value.sh new file mode 100755 index 00000000..eae04ad9 --- /dev/null +++ b/deploy/test/test_cases/TEST_ID_24_helm_validate_K8S_SECRETS_env_var_incorrect_value.sh @@ -0,0 +1,24 @@ +#!/bin/bash +set -euxo pipefail + +# This test verifies that the Secrets Provider (deployed with Helm) mechanism still fails in case a K8S Secret does not exist and the proper errors appear in logs +setup_helm_environment + +pushd ../../ + # Install HELM with a K8s Secret that does not exist + export K8S_SECRETS="K8S_SECRET-non-existent-secret" + export LABELS="app: test-helm" + export DEBUG="true" + + fill_helm_chart + helm install -f "../helm/secrets-provider/ci/test-values.yaml" \ + secrets-provider ../helm/secrets-provider \ + --set-file environment.conjur.sslCertificate.value="test/test_cases/conjur.pem" +popd + +echo "Expecting Secrets Provider to fail with debug message 'CSPFK004D Failed to retrieve k8s secret. Reason: secrets K8S_SECRET-non-existent-secret not found'" +pod_name=$($cli_with_timeout get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-helm --no-headers | awk '{print $1}' ) +$cli_with_timeout "logs $pod_name -c cyberark-secrets-provider | grep CSPFK004D" + +echo "Expecting Secrets Provider to fail with error 'CSPFK020E Failed to retrieve k8s secret'" +$cli_with_timeout "logs $pod_name -c cyberark-secrets-provider | grep CSPFK020E" diff --git a/deploy/test/test_cases/TEST_ID_25_helm_default_retry_successful.sh b/deploy/test/test_cases/TEST_ID_25_helm_default_retry_successful.sh new file mode 100755 index 00000000..a4f76340 --- /dev/null +++ b/deploy/test/test_cases/TEST_ID_25_helm_default_retry_successful.sh @@ -0,0 +1,53 @@ +#!/bin/bash +set -euxo pipefail + +# This test verifies that default values for retry mechanism work +setup_helm_environment + +pushd ../../ + set_image_path + export IMAGE="$image_path/secrets-provider" + export IMAGE_PULL_POLICY="IfNotPresent" + export TAG="latest" + export LABELS="app: test-helm" + export DEBUG="true" + export K8S_SECRETS="test-k8s-secret" + export CONJUR_ACCOUNT="cucumber" + export CONJUR_APPLIANCE_URL="https://conjur-follower.${CONJUR_NAMESPACE_NAME}.svc.cluster.local" + export CONJUR_AUTHN_LOGIN="host/conjur/authn-k8s/${AUTHENTICATOR_ID}/apps/${APP_NAMESPACE_NAME}/*/*" + # A parameter that will force a failure + export CONJUR_AUTHN_URL="https://conjur-follower.${CONJUR_NAMESPACE_NAME}.svc.cluster.local/api/authn-k8s/${AUTHENTICATOR_ID}xyz" # Configure retry mechanism with overriding defaults + + DEFAULT_RETRY_INTERVAL_SEC=30 + DEFAULT_RETRY_COUNT_LIMIT=3 + + fill_helm_chart_test_image + fill_helm_chart_no_override_defaults + helm install -f "../helm/secrets-provider/ci/take-default-test-values.yaml" \ + -f "../helm/secrets-provider/ci/take-image-values.yaml" \ + secrets-provider ../helm/secrets-provider \ + --set-file environment.conjur.sslCertificate.value="test/test_cases/conjur.pem" +popd + +pod_name=$($cli_with_timeout get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-helm --no-headers | awk '{print $1}' ) + +# Find initial authentication error that should trigger the retry +$cli_with_timeout "logs $pod_name | grep 'CSPFK010E Failed to authenticate'" +# Start the timer for retry interval +start=$SECONDS + +echo "Expecting Secrets Provider retry configurations to take defaults RETRY_INTERVAL_SEC 30 and RETRY_COUNT_LIMIT 3" +$cli_with_timeout "logs $pod_name | grep 'CSPFK010I Updating Kubernetes Secrets: 1 retries out of $DEFAULT_RETRY_COUNT_LIMIT'" + +duration=$(( SECONDS - start )) +# Since we are testing retry in scripts we must determine an acceptable range that retry should have taken place +# If the duration falls within that range, then we can determine the retry mechanism works as expected +retryIntervalMin=`echo "scale=2; $DEFAULT_RETRY_INTERVAL_SEC/100*80" | bc | cut -d "." -f 1 | cut -d "," -f 1` +retryIntervalMax=`echo "scale=2; $DEFAULT_RETRY_INTERVAL_SEC/100*120" | bc | cut -d "." -f 1 | cut -d "," -f 1` +if (( $duration >= $retryIntervalMin && $duration <= $retryIntervalMax )); then + echo 0 +else + echo "Timed retry failed to occur according to detailed retry interval. Timed duration: $duration" + echo 1 +fi + diff --git a/deploy/test/test_cases/TEST_ID_26_helm_override_default_retry_successful.sh b/deploy/test/test_cases/TEST_ID_26_helm_override_default_retry_successful.sh new file mode 100755 index 00000000..fc2504b5 --- /dev/null +++ b/deploy/test/test_cases/TEST_ID_26_helm_override_default_retry_successful.sh @@ -0,0 +1,41 @@ +#!/bin/bash +set -euxo pipefail + +# This test verifies that user configured values for retry mechanism work +setup_helm_environment + +pushd ../../ + export LABELS="app: test-helm" + export DEBUG="true" + # A parameter that will force a failure + export CONJUR_AUTHN_URL="https://conjur-follower.${CONJUR_NAMESPACE_NAME}.svc.cluster.local/api/authn-k8s/${AUTHENTICATOR_ID}xyz" # Configure retry mechanism with overriding defaults + export RETRY_COUNT_LIMIT="2" + export RETRY_INTERVAL_SEC="5" + fill_helm_chart + helm install -f "../helm/secrets-provider/ci/test-values.yaml" \ + secrets-provider ../helm/secrets-provider \ + --set-file environment.conjur.sslCertificate.value="test/test_cases/conjur.pem" +popd + +pod_name=$($cli_with_timeout get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-helm --no-headers | awk '{print $1}' ) +# Find initial authentication error that should trigger the retry +$cli_with_timeout "logs $pod_name | grep 'CSPFK010E Failed to authenticate'" +# Start the timer for retry interval +start=$SECONDS + +# Validate that the Secrets Provider retry mechanism takes user input of RETRY_INTERVAL_SEC of 5 and RETRY_COUNT_LIMIT of 2 +echo "Expecting Secrets Provider retry configurations to take their defaults of RETRY_INTERVAL_SEC of 5 and RETRY_COUNT_LIMIT of 2" +$cli_with_timeout "logs $pod_name | grep 'CSPFK010I Updating Kubernetes Secrets: 1 retries out of $RETRY_COUNT_LIMIT'" + +duration=$(( SECONDS - start )) +# Since we are testing retry in scripts we must determine an acceptable range that retry should take place +# If the duration falls within that range, then we can determine the retry mechanism works as expected +retryIntervalMin=`echo "scale=2; $RETRY_INTERVAL_SEC/100*80" | bc | cut -d "." -f 1 | cut -d "," -f 1` +retryIntervalMax=`echo "scale=2; $RETRY_INTERVAL_SEC/100*120" | bc | cut -d "." -f 1 | cut -d "," -f 1` +if (( $duration >= $retryIntervalMin && $duration <= $retryIntervalMax )); then + echo 0 +else + echo "Timed retry failed to occur according to detailed retry interval. Timed duration: $duration" + echo 1 +fi + diff --git a/deploy/test/test_cases/TEST_ID_2_multiple_pods_changing_pwd_inbetween.sh b/deploy/test/test_cases/TEST_ID_2_multiple_pods_changing_pwd_inbetween.sh index 748d3ce0..cdd59579 100755 --- a/deploy/test/test_cases/TEST_ID_2_multiple_pods_changing_pwd_inbetween.sh +++ b/deploy/test/test_cases/TEST_ID_2_multiple_pods_changing_pwd_inbetween.sh @@ -7,7 +7,7 @@ wait_for_it 600 "$CONFIG_DIR/secrets-access-role.sh.yml | $cli_without_timeout echo "Creating secrets access role binding" wait_for_it 600 "$CONFIG_DIR/secrets-access-role-binding.sh.yml | $cli_without_timeout apply -f -" -deploy_env +deploy_init_env pod_name1=$(cli_get_pods_test_env | awk '{print $1}') @@ -24,11 +24,11 @@ if [[ "$PLATFORM" = "kubernetes" ]]; then # Waiting until pod is successfully removed from the namespace before advancing. $cli_with_timeout "get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-env --no-headers | wc -l | tr -d ' ' | grep '^0$'" - set_secret secrets/test_secret secret2 + set_conjur_secret secrets/test_secret secret2 $cli_with_timeout "scale deployment test-env --replicas=1" elif [ $PLATFORM = "openshift" ]; then - set_secret secrets/test_secret secret2 + set_conjur_secret secrets/test_secret secret2 $cli_with_timeout "delete pod $pod_name1" fi @@ -37,7 +37,7 @@ pod_name2=$(cli_get_pods_test_env | awk '{print $1}') echo "Verify pod $pod_name2 has environment variable 'TEST_SECRET' with value 'secret2'" verify_secret_value_in_pod $pod_name2 TEST_SECRET secret2 -set_secret secrets/test_secret secret3 +set_conjur_secret secrets/test_secret secret3 if [[ "$PLATFORM" = "kubernetes" ]]; then echo "Setting deployment test-env to replicas" diff --git a/deploy/test/test_cases/TEST_ID_3_SECRETS_DESTINATION_with_incorrect_value.sh b/deploy/test/test_cases/TEST_ID_3_SECRETS_DESTINATION_with_incorrect_value.sh index f84c130d..d5e18402 100755 --- a/deploy/test/test_cases/TEST_ID_3_SECRETS_DESTINATION_with_incorrect_value.sh +++ b/deploy/test/test_cases/TEST_ID_3_SECRETS_DESTINATION_with_incorrect_value.sh @@ -7,7 +7,7 @@ create_secret_access_role_binding echo "Deploying test_env with incorrect value for SECRETS_DESTINATION environment variable" export SECRETS_DESTINATION_KEY_VALUE="SECRETS_DESTINATION SECRETS_DESTINATION_incorrect_value" -deploy_env +deploy_init_env echo "Expecting secrets provider to fail with error 'CSPFK005E Provided incorrect value for environment variable SECRETS_DESTINATION'" pod_name=$(cli_get_pods_test_env | awk '{print $1}') diff --git a/deploy/test/test_cases/TEST_ID_4_CONTAINER_MODE_not_exist.sh b/deploy/test/test_cases/TEST_ID_4_CONTAINER_MODE_not_exist.sh index c7f25b05..800573de 100755 --- a/deploy/test/test_cases/TEST_ID_4_CONTAINER_MODE_not_exist.sh +++ b/deploy/test/test_cases/TEST_ID_4_CONTAINER_MODE_not_exist.sh @@ -7,7 +7,7 @@ create_secret_access_role_binding echo "Deploying test_env without CONTAINER_MODE envrionment variable" export CONTAINER_MODE_KEY_VALUE=$KEY_VALUE_NOT_EXIST -deploy_env +deploy_init_env echo "Expecting secrets provider to fail with error 'CSPFK007E Setting SECRETS_DESTINATION environment variable to 'k8s_secrets' must run as init container'" pod_name=$(cli_get_pods_test_env | awk '{print $1}') diff --git a/deploy/test/test_cases/TEST_ID_5_no_get_permission_to_secret.sh b/deploy/test/test_cases/TEST_ID_5_no_get_permission_to_secret.sh index 71a3147a..7cef9b08 100755 --- a/deploy/test/test_cases/TEST_ID_5_no_get_permission_to_secret.sh +++ b/deploy/test/test_cases/TEST_ID_5_no_get_permission_to_secret.sh @@ -7,7 +7,7 @@ create_secret_access_role create_secret_access_role_binding -deploy_env +deploy_init_env echo "Expecting secrets provider to fail with error 'CSPFK004D Failed to retrieve k8s secret. Reason:...'" pod_name=$(cli_get_pods_test_env | awk '{print $1}') diff --git a/deploy/test/test_cases/TEST_ID_6_no_update_permission_to_secret.sh b/deploy/test/test_cases/TEST_ID_6_no_update_permission_to_secret.sh index cbcbeb02..880e97c3 100755 --- a/deploy/test/test_cases/TEST_ID_6_no_update_permission_to_secret.sh +++ b/deploy/test/test_cases/TEST_ID_6_no_update_permission_to_secret.sh @@ -7,7 +7,7 @@ create_secret_access_role create_secret_access_role_binding -deploy_env +deploy_init_env echo "Expecting secrets provider to fail with error 'CSPFK005D Failed to update k8s secret. Reason:...'" pod_name=$(cli_get_pods_test_env | awk '{print $1}') diff --git a/deploy/test/test_cases/TEST_ID_7_K8S_SECRETS_env_var_not_exist.sh b/deploy/test/test_cases/TEST_ID_7_K8S_SECRETS_env_var_not_exist.sh index 09f9db70..8a208d54 100755 --- a/deploy/test/test_cases/TEST_ID_7_K8S_SECRETS_env_var_not_exist.sh +++ b/deploy/test/test_cases/TEST_ID_7_K8S_SECRETS_env_var_not_exist.sh @@ -7,7 +7,7 @@ create_secret_access_role_binding echo "Deploying test_env without K8S_SECRETS environment variable" export K8S_SECRETS_KEY_VALUE=$KEY_VALUE_NOT_EXIST -deploy_env +deploy_init_env echo "Expecting for 'CrashLoopBackOff' state of pod test-env" wait_for_it 600 "cli_get_pods_test_env | grep CrashLoopBackOff" diff --git a/deploy/test/test_cases/TEST_ID_8_K8S_SECRETS_env_var_empty.sh b/deploy/test/test_cases/TEST_ID_8_K8S_SECRETS_env_var_empty.sh index ac160838..f28b007a 100755 --- a/deploy/test/test_cases/TEST_ID_8_K8S_SECRETS_env_var_empty.sh +++ b/deploy/test/test_cases/TEST_ID_8_K8S_SECRETS_env_var_empty.sh @@ -7,7 +7,7 @@ create_secret_access_role_binding echo "Deploying test_env with empty value for K8S_SECRETS envrionment variable" export K8S_SECRETS_KEY_VALUE="K8S_SECRETS" -deploy_env +deploy_init_env echo "Expecting for CrashLoopBackOff state of pod test-env" wait_for_it 600 "cli_get_pods_test_env | grep CrashLoopBackOff" diff --git a/deploy/test/test_cases/TEST_ID_9_K8S_SECRETS_env_var_incorrect_value.sh b/deploy/test/test_cases/TEST_ID_9_K8S_SECRETS_env_var_incorrect_value.sh index 43dda991..679a8d91 100755 --- a/deploy/test/test_cases/TEST_ID_9_K8S_SECRETS_env_var_incorrect_value.sh +++ b/deploy/test/test_cases/TEST_ID_9_K8S_SECRETS_env_var_incorrect_value.sh @@ -7,7 +7,7 @@ create_secret_access_role_binding echo "Deploying test_env with incorrect value for K8S_SECRETS envrionment variable" export K8S_SECRETS_KEY_VALUE="K8S_SECRETS K8S_SECRETS_invalid_value" -deploy_env +deploy_init_env echo "Expecting secrets provider to fail with debug message 'CSPFK004D Failed to retrieve k8s secret. Reason: secrets K8S_SECRETS_invalid_value not found'" pod_name=$(cli_get_pods_test_env | awk '{print $1}') diff --git a/deploy/test/test_cases/run_tests.sh b/deploy/test/test_cases/run_tests.sh index 64c3fc6f..927b884a 100755 --- a/deploy/test/test_cases/run_tests.sh +++ b/deploy/test/test_cases/run_tests.sh @@ -24,11 +24,11 @@ times=1 for c in {1..$times} do for filename in ./$TEST_NAME_PREFIX*.sh; do - announce "Running '$filename'." - ./test_case_setup.sh - $filename - ../../teardown_resources.sh - announce "Test '$filename' ended successfully" + announce "Running '$filename'." + ./test_case_setup.sh + $filename + ../../teardown_resources.sh + announce "Test '$filename' ended successfully" done done diff --git a/deploy/test/test_cases/test_case_setup.sh b/deploy/test/test_cases/test_case_setup.sh index 3aa85c88..1fed405a 100755 --- a/deploy/test/test_cases/test_case_setup.sh +++ b/deploy/test/test_cases/test_case_setup.sh @@ -1,27 +1,28 @@ #!/bin/bash set -euxo pipefail -announce "Creating image pull secret." +if [ "${DEV}" = "false" ]; then + announce "Creating image pull secret." + if [[ "${PLATFORM}" == "kubernetes" ]]; then + $cli_with_timeout delete --ignore-not-found secret dockerpullsecret -if [[ "${PLATFORM}" == "kubernetes" ]]; then - $cli_with_timeout delete --ignore-not-found secret dockerpullsecret - - $cli_with_timeout create secret docker-registry dockerpullsecret \ - --docker-server=$DOCKER_REGISTRY_URL \ - --docker-username=_ \ - --docker-password=_ \ - --docker-email=_ -elif [[ "$PLATFORM" == "openshift" ]]; then + $cli_with_timeout create secret docker-registry dockerpullsecret \ + --docker-server=$DOCKER_REGISTRY_URL \ + --docker-username=_ \ + --docker-password=_ \ + --docker-email=_ + elif [[ "$PLATFORM" == "openshift" ]]; then $cli_with_timeout delete --ignore-not-found secrets dockerpullsecret # TODO: replace the following with `$cli create secret` $cli_with_timeout secrets new-dockercfg dockerpullsecret \ - --docker-server=${DOCKER_REGISTRY_PATH} \ - --docker-username=_ \ - --docker-password=$($cli_with_timeout whoami -t) \ - --docker-email=_ + --docker-server=${DOCKER_REGISTRY_PATH} \ + --docker-username=_ \ + --docker-password=$($cli_with_timeout whoami -t) \ + --docker-email=_ $cli_with_timeout secrets add serviceaccount/default secrets/dockerpullsecret --for=pull + fi fi echo "Create secret k8s-secret" diff --git a/deploy/test/test_in_docker.sh b/deploy/test/test_in_docker.sh index c9927413..b599a20b 100755 --- a/deploy/test/test_in_docker.sh +++ b/deploy/test/test_in_docker.sh @@ -4,7 +4,7 @@ set -xeuo pipefail . utils.sh # Clean up when script completes and fails -function finish { +finish() { announce 'Wrapping up and removing test environment' # Stop the running processes @@ -16,13 +16,13 @@ function finish { } trap finish EXIT -function main() { +main() { buildTestRunnerImage deployConjur deployTest } -function buildTestRunnerImage() { +buildTestRunnerImage() { docker build --tag $TEST_RUNNER_IMAGE:$CONJUR_NAMESPACE_NAME \ --file test/Dockerfile \ --build-arg OPENSHIFT_CLI_URL=$OPENSHIFT_CLI_URL \ @@ -30,7 +30,7 @@ function buildTestRunnerImage() { . } -function deployConjur() { +deployConjur() { git clone git@github.com:cyberark/kubernetes-conjur-deploy \ kubernetes-conjur-deploy-$UNIQUE_TEST_ID @@ -41,7 +41,7 @@ function deployConjur() { runDockerCommand "cd ./kubernetes-conjur-deploy-$UNIQUE_TEST_ID && DEBUG=true $cmd" } -function deployTest() { +deployTest() { runDockerCommand "./run_with_summon.sh" } diff --git a/deploy/utils.sh b/deploy/utils.sh index 120adee9..6ae29f2a 100644 --- a/deploy/utils.sh +++ b/deploy/utils.sh @@ -1,6 +1,3 @@ -#!/bin/bash -set -xeuo pipefail - export KEY_VALUE_NOT_EXIST=" " wait_for_it() { @@ -86,7 +83,7 @@ get_conjur_cli_pod_name() { echo $pod_list | awk '{print $1}' } -function runDockerCommand() { +runDockerCommand() { docker run --rm \ -i \ -e UNIQUE_TEST_ID \ @@ -119,6 +116,7 @@ function runDockerCommand() { -v /var/run/docker.sock:/var/run/docker.sock \ -v ~/.config:/root/.config \ -v ~/.docker:/root/.docker \ + -v "$PWD/../helm":/helm \ -v "$PWD":/src \ -w /src \ $TEST_RUNNER_IMAGE:$CONJUR_NAMESPACE_NAME \ @@ -144,7 +142,7 @@ configure_cli_pod() { $cli_with_timeout exec $conjur_cli_pod -- conjur authn login -u admin -p $CONJUR_ADMIN_PASSWORD } -function deploy_env { +configure_conjur_url() { conjur_node_name="conjur-follower" if [ "$CONJUR_DEPLOYMENT" = "oss" ]; then conjur_node_name="conjur-oss" @@ -157,6 +155,147 @@ function deploy_env { export CONJUR_APPLIANCE_URL=$conjur_appliance_url export CONJUR_AUTHN_URL=$conjur_authenticator_url +} + +fetch_ssl_from_conjur() { + selector="role=follower" + cert_location="/opt/conjur/etc/ssl/conjur.pem" + if [ "$CONJUR_DEPLOYMENT" = "oss" ]; then + selector="app=conjur-cli" + export cert_location="/root/conjur-${CONJUR_ACCOUNT}.pem" + fi + + export conjur_pod_name=$($cli_with_timeout get pods --selector=$selector --namespace $CONJUR_NAMESPACE_NAME --no-headers | awk '{ print $1 }' | head -1) +} + +setup_helm_environment() { + set_namespace $CONJUR_NAMESPACE_NAME + + configure_conjur_url + + fetch_ssl_from_conjur + + ssl_location="conjur.pem" + if [ "${DEV}" = "true" ]; then + ssl_location="../conjur.pem" + fi + # Save cert for later setting in Helm + $cli_with_timeout "exec ${conjur_pod_name} --namespace $CONJUR_NAMESPACE_NAME cat $cert_location" > "$ssl_location" + + set_namespace $APP_NAMESPACE_NAME +} + +set_image_path() { + image_path="${APP_NAMESPACE_NAME}" + if [[ "${PLATFORM}" = "openshift" && "${DEV}" = "false" ]]; then + # Image path needs to point to internal registry path to access image + image_path="docker-registry.default.svc:5000/${APP_NAMESPACE_NAME}" + elif [[ "${PLATFORM}" = "kubernetes" && "${DEV}" = "false" ]]; then + image_path="${DOCKER_REGISTRY_PATH}/${APP_NAMESPACE_NAME}" + fi + export image_path +} + +fill_helm_chart() { + helm_path="." + if [ "${DEV}" = "false" ]; then + helm_path=".." + fi + + set_image_path + + id=${1:-""} + sed -e "s#{{ SECRETS_PROVIDER_ROLE }}#${SECRETS_PROVIDER_ROLE:-"secrets-provider-role"}#g" \ + -e "s#{{ SECRETS_PROVIDER_ROLE_BINDING }}#${SECRETS_PROVIDER_ROLE_BINDING:-"secrets-provider-role-binding"}#g" \ + -e "s#{{ CREATE_SERVICE_ACCOUNT }}#${CREATE_SERVICE_ACCOUNT:-"true"}#g" \ + -e "s#{{ SERVICE_ACCOUNT }}#${SERVICE_ACCOUNT:-"secrets-provider-service-account"}#g" \ + -e "s#{{ K8S_SECRETS }}#${K8S_SECRETS:-"test-k8s-secret"}#g" \ + -e "s#{{ CONJUR_ACCOUNT }}#${CONJUR_ACCOUNT:-"cucumber"}#g" \ + -e "s#{{ CONJUR_APPLIANCE_URL }}#${CONJUR_APPLIANCE_URL:-"https://conjur-follower.${CONJUR_NAMESPACE_NAME}.svc.cluster.local/api"}#g" \ + -e "s#{{ CONJUR_AUTHN_URL }}#${CONJUR_AUTHN_URL:-"https://conjur-follower.${CONJUR_NAMESPACE_NAME}.svc.cluster.local/api/authn-k8s/${AUTHENTICATOR_ID}"}#g" \ + -e "s#{{ CONJUR_AUTHN_LOGIN }}# ${CONJUR_AUTHN_LOGIN:-"host/conjur/authn-k8s/${AUTHENTICATOR_ID}/apps/${APP_NAMESPACE_NAME}/*/*"}#g" \ + -e "s#{{ SECRETS_PROVIDER_SSL_CONFIG_MAP }}# ${SECRETS_PROVIDER_SSL_CONFIG_MAP:-"secrets-provider-ssl-config-map"}#g" \ + -e "s#{{ IMAGE_PULL_POLICY }}# ${IMAGE_PULL_POLICY:-"IfNotPresent"}#g" \ + -e "s#{{ IMAGE }}# ${IMAGE:-"$image_path/secrets-provider"}#g" \ + -e "s#{{ TAG }}# ${TAG:-"latest"}#g" \ + -e "s#{{ LABELS }}# ${LABELS:-"app: test-helm"}#g" \ + -e "s#{{ DEBUG }}# ${DEBUG:-"false"}#g" \ + -e "s#{{ RETRY_COUNT_LIMIT }}# ${RETRY_COUNT_LIMIT:-"3"}#g" \ + -e "s#{{ RETRY_INTERVAL_SEC }}# ${RETRY_INTERVAL_SEC:-"30"}#g" \ + "$helm_path/helm/secrets-provider/ci/test-values-template.yaml" > "$helm_path/helm/secrets-provider/ci/${id}test-values.yaml" +} + +fill_helm_chart_no_override_defaults() { + sed -e "s#{{ K8S_SECRETS }}#${K8S_SECRETS}#g" \ + -e "s#{{ CONJUR_ACCOUNT }}#${CONJUR_ACCOUNT}#g" \ + -e "s#{{ LABELS }}# ${LABELS}#g" \ + -e "s#{{ CONJUR_APPLIANCE_URL }}#${CONJUR_APPLIANCE_URL}#g" \ + -e "s#{{ CONJUR_AUTHN_URL }}#${CONJUR_AUTHN_URL}#g" \ + -e "s#{{ CONJUR_AUTHN_LOGIN }}# ${CONJUR_AUTHN_LOGIN}#g" \ + "../helm/secrets-provider/ci/take-default-test-values-template.yaml" > "../helm/secrets-provider/ci/take-default-test-values.yaml" +} + +fill_helm_chart_test_image() { + sed -e "s#{{ IMAGE }}#${IMAGE}#g" \ + -e "s#{{ TAG }}#${TAG}#g" \ + -e "s#{{ IMAGE_PULL_POLICY }}#${IMAGE_PULL_POLICY}#g" \ + "../helm/secrets-provider/ci/take-image-values-template.yaml" > "../helm/secrets-provider/ci/take-image-values.yaml" +} + +deploy_chart() { + pushd ../ + fill_helm_chart + helm install -f "helm/secrets-provider/ci/test-values.yaml" \ + secrets-provider ./helm/secrets-provider \ + --set-file environment.conjur.sslCertificate.value="conjur.pem" + popd +} + +set_config_directory_path() { + export CONFIG_DIR="config/k8s" + if [[ "$PLATFORM" = "openshift" ]]; then + export CONFIG_DIR="config/openshift" + fi +} + +deploy_helm_app() { + set_config_directory_path + + helm_app_path="../$CONFIG_DIR/helm-app.yaml" + if [ "${DEV}" = "true" ]; then + helm_app_path="test/$CONFIG_DIR/helm-app.yaml" + fi + + id=${1:-""} + sed -e "s#{{ SERVICE_ACCOUNT }}#${SERVICE_ACCOUNT:-"secrets-provider-service-account"}#g" $helm_app_path | + sed -e "s#{{ K8S_SECRET }}#${K8S_SECRET:-"test-k8s-secret"}#g" | + sed -e "s#{{ ID }}#${id}#g" | + $cli_with_timeout create -f - +} + +create_k8s_role() { + CONFIG_DIR="config/k8s" + if [[ "$PLATFORM" = "openshift" ]]; then + CONFIG_DIR="config/openshift" + fi + + id=${1:-""} + sed -e "s#{{ ID }}#${id}#g" "../$CONFIG_DIR/secrets-access-role.yaml" | + sed -e "s#{{ APP_NAMESPACE_NAME }}#${APP_NAMESPACE_NAME}#g" | + $cli_with_timeout create -f - +} + +create_k8s_secret_for_helm_deployment() { + helm_app_path="../config/k8s_secret.yml" + if [ "${DEV}" = "true" ]; then + helm_app_path="test/config/k8s_secret.yml" + fi + + $cli_with_timeout create -f $helm_app_path +} + +deploy_init_env() { + configure_conjur_url echo "Running Deployment Manifest" @@ -182,17 +321,17 @@ function deploy_env { fi } -function create_secret_access_role () { +create_secret_access_role() { echo "Creating secrets access role" wait_for_it 600 "$CONFIG_DIR/secrets-access-role.sh.yml | $cli_without_timeout apply -f -" } -function create_secret_access_role_binding () { +create_secret_access_role_binding() { echo "Creating secrets access role binding" wait_for_it 600 "$CONFIG_DIR/secrets-access-role-binding.sh.yml | $cli_without_timeout apply -f -" } -function set_secret () { +set_conjur_secret() { SECRET_NAME=$1 SECRET_VALUE=$2 echo "Set secret '$SECRET_NAME' to '$SECRET_VALUE'" @@ -202,7 +341,7 @@ function set_secret () { set_namespace $APP_NAMESPACE_NAME } -yaml_print_key_name_value () { +yaml_print_key_name_value() { spaces=$1 key_name=${2:-""} key_value=${3:-""} @@ -221,11 +360,11 @@ yaml_print_key_name_value () { fi } -cli_get_pods_test_env () { +cli_get_pods_test_env() { $cli_with_timeout "get pods --namespace=$APP_NAMESPACE_NAME --selector app=test-env --no-headers" } -test_secret_is_provided () { +test_secret_is_provided() { secret_value=$1 variable_name="${2:-secrets/test_secret}" environment_variable_name="${3:-TEST_SECRET}" @@ -235,14 +374,14 @@ test_secret_is_provided () { $cli_with_timeout "exec $conjur_cli_pod -- conjur variable values add \"$variable_name\" $secret_value" set_namespace "$APP_NAMESPACE_NAME" - deploy_env + deploy_init_env echo "Verifying pod test_env has environment variable '$environment_variable_name' with value '$secret_value'" pod_name=$(cli_get_pods_test_env | awk '{print $1}') verify_secret_value_in_pod "$pod_name" "$environment_variable_name" "$secret_value" } -verify_secret_value_in_pod () { +verify_secret_value_in_pod() { pod_name=$1 environment_variable_name=$2 expected_value=$3 diff --git a/helm/secrets-provider/ci/take-default-test-values-template.yaml b/helm/secrets-provider/ci/take-default-test-values-template.yaml new file mode 100644 index 00000000..43c2810d --- /dev/null +++ b/helm/secrets-provider/ci/take-default-test-values-template.yaml @@ -0,0 +1,13 @@ +labels: { {{ LABELS }} } + +environment: + # Array of Kubernetes Secret names that applications consume, and whose value is sourced in DAP/Conjur. + # For example, [k8s-secret1,k8s-secret2] + k8sSecrets: [{{ K8S_SECRETS }}] + conjur: + account: {{ CONJUR_ACCOUNT }} + applianceUrl: {{ CONJUR_APPLIANCE_URL }} + authnUrl: {{ CONJUR_AUTHN_URL }} + authnLogin: {{ CONJUR_AUTHN_LOGIN }} + sslCertificate: + value: "" diff --git a/helm/secrets-provider/ci/take-image-values-template.yaml b/helm/secrets-provider/ci/take-image-values-template.yaml new file mode 100644 index 00000000..e51c1a96 --- /dev/null +++ b/helm/secrets-provider/ci/take-image-values-template.yaml @@ -0,0 +1,5 @@ +secretsProvider: + image: {{ IMAGE }} + imagePullPolicy: {{ IMAGE_PULL_POLICY }} + tag: {{ TAG }} + name: cyberark-secrets-provider diff --git a/helm/secrets-provider/ci/test-values-template.yaml b/helm/secrets-provider/ci/test-values-template.yaml new file mode 100644 index 00000000..2e12b333 --- /dev/null +++ b/helm/secrets-provider/ci/test-values-template.yaml @@ -0,0 +1,37 @@ +# Values for secrets-provider Chart. All missing values need to be supplied via environment variable. + +rbac: + # Indicates whether the Secrets Provider service account, Role, and RoleBinding should be created. + # This should be set to true unless resources with the proper permissions exist in the namespace. + create: {{ CREATE_SERVICE_ACCOUNT }} + roleName: {{ SECRETS_PROVIDER_ROLE }} + roleBindingName: {{ SECRETS_PROVIDER_ROLE_BINDING }} + serviceAccount: + # Name of the service account for the Secrets Provider. + name: {{ SERVICE_ACCOUNT }} + +secretsProvider: + image: {{ IMAGE }} + imagePullPolicy: {{ IMAGE_PULL_POLICY }} + tag: {{ TAG }} + name: cyberark-secrets-provider + +# Additional labels to apply to all resources. +labels: { {{ LABELS }} } +annotations: {} + +environment: + debug: {{ DEBUG }} + # Array of Kubernetes Secret names that applications consume, and whose value is sourced in DAP/Conjur. + # For example, [k8s-secret1,k8s-secret2] + k8sSecrets: [{{ K8S_SECRETS }}] + conjur: + retryIntervalSec: {{ RETRY_INTERVAL_SEC }} + retryCountLimit: {{ RETRY_COUNT_LIMIT }} + account: {{ CONJUR_ACCOUNT }} + applianceUrl: {{ CONJUR_APPLIANCE_URL }} + authnUrl: {{ CONJUR_AUTHN_URL }} + authnLogin: {{ CONJUR_AUTHN_LOGIN }} + sslCertificate: + value: "" + name: {{ SECRETS_PROVIDER_SSL_CONFIG_MAP }} diff --git a/helm/secrets-provider/values.schema.json b/helm/secrets-provider/values.schema.json index d4967be3..7a58d22f 100644 --- a/helm/secrets-provider/values.schema.json +++ b/helm/secrets-provider/values.schema.json @@ -53,7 +53,7 @@ "tag": { "type": ["string", "number"], "minLength": 1, - "pattern": "(^\\d+(\\.\\d+){0,2}|dev)$" + "pattern": "(^\\d+(\\.\\d+){0,2}|latest)$" }, "imagePullPolicy": { "type": "string",