diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 00000000..d620be54 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,19 @@ + + +[![Build status badge](https://jenkins.docker.hpecorp.net/buildStatus/icon?job=sec-eng%2Fistio-spire%2F)](https://jenkins.docker.hpecorp.net/job/sec-eng/job/istio-spire/job//) + +**Affected functionality** + + +**Description of change** + + +**Which Jira task this PR is related to** + + +**Depends on** + + +Signed-off-by: diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..1144112c --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +# Visual Studio Code +.vscode + +# Terraform +.terraform +terraform.tfstate +terraform.tfstate.backup +.terraform.lock.hcl \ No newline at end of file diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 00000000..be3e4f88 --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,448 @@ +AWS_PROFILE = "mithril-jenkins" +BUILD_IMAGE = "hub.docker.hpecorp.net/sec-eng/ubuntu:pipeline" +DEVELOPMENT_IMAGE = "529024819027.dkr.ecr.us-east-1.amazonaws.com/mithril" +CHANNEL_NAME = "#notify-project-mithril" +ECR_REGION = "us-east-1" +ECR_REPOSITORY_PREFIX = "mithril" +HPE_REGISTRY = "hub.docker.hpecorp.net/sec-eng" +ISTIO_STABLE_BRANCH = "release-1.10" // the Istio branch to be distributed +PATCHSET_BUCKET = "mithril-poc-patchset" +CUSTOMER_BUCKET = "mithril-customer-assets" +MITHRIL_MAIN_BRANCH = "master" +PROXY = "http://proxy.houston.hpecorp.net:8080" + +def SLACK_ERROR_MESSAGE +def SLACK_ERROR_COLOR + +// Start of the pipeline +pipeline { + + options { + timestamps() + ansiColor('xterm') + } + + // Version of the Jenkins slave + agent { + label 'docker-v20.10' + } + + environment { + BUILD_TAG = makeTag() + GOOS = "linux" + } + + parameters { + string(name: 'ISTIO_BRANCH', defaultValue: ISTIO_STABLE_BRANCH, description: 'The Istio branch to run against') + } + + triggers { + parameterizedCron( + BRANCH_NAME == MITHRIL_MAIN_BRANCH ? ''' + H H(0-3) * * * %ISTIO_BRANCH=master + H H(0-3) * * * %ISTIO_BRANCH=release-1.10 + H H(0-3) * * * %ISTIO_BRANCH=release-1.11 + H H(0-3) * * * %ISTIO_BRANCH=release-1.12 + ''': '' + ) + } + + stages { + stage("notify-slack") { + steps { + script { + slackSend ( + channel: CHANNEL_NAME, + message: "Hello. The pipeline ${currentBuild.fullDisplayName} started. (<${env.BUILD_URL}|See Job>)") + } + } + } + + stage("setup-pipeline") { + steps { + script { + def secrets = vaultGetSecrets() + + AWS_ACCOUNT_ID = "${secrets.awsAccountID}" + AWS_ACCESS_KEY_ID = "${secrets.awsAccessKeyID}" + AWS_SECRET_ACCESS_KEY = "${secrets.awsSecretAccessKeyID}" + HPE_DOCKER_HUB_SECRET = "${secrets.dockerHubToken}" + } + } + } + + stage("make-poc-codebase") { + steps { + // Istio clone from the specified branch + sh "git clone --single-branch --branch ${params.ISTIO_BRANCH} https://github.com/istio/istio.git" + + // Apply Mithril patches + sh """ + cd istio + git apply ${WORKSPACE}/POC/patches/poc.${params.ISTIO_BRANCH}.patch + """ + } + } + + stage("build-and-push-dev-images-ecr"){ + environment { + AWS_ACCESS_KEY_ID = "${AWS_ACCESS_KEY_ID}" + AWS_SECRET_ACCESS_KEY = "${AWS_SECRET_ACCESS_KEY}" + } + steps { + script { + // Creating volume for the docker.sock, passing some environment variables for Dockerhub authentication + // and build tag, building Istio and pushing images to the ECR. + docker.image(BUILD_IMAGE).inside("-v /var/run/docker.sock:/var/run/docker.sock") { + + def ECR_REGISTRY = AWS_ACCOUNT_ID + ".dkr.ecr." + ECR_REGION + ".amazonaws.com"; + sh """#!/bin/bash + + aws ecr get-login-password --region ${ECR_REGION} | \ + docker login --username AWS --password-stdin ${ECR_REGISTRY} + + docker build -t mithril:${BUILD_TAG} \ + -f ./docker/Dockerfile . + docker tag mithril:${BUILD_TAG} ${DEVELOPMENT_IMAGE}:${BUILD_TAG} + docker push ${DEVELOPMENT_IMAGE}:${BUILD_TAG} + """ + } + } + } + } + + stage("unit-test") { + environment { + AWS_ACCESS_KEY_ID = "${AWS_ACCESS_KEY_ID}" + AWS_SECRET_ACCESS_KEY = "${AWS_SECRET_ACCESS_KEY}" + ISTIO_BRANCH = "${params.ISTIO_BRANCH}" + } + steps { + script { + docker.image(BUILD_IMAGE).inside("-v /var/run/docker.sock:/var/run/docker.sock") { + sh '''#!/bin/bash + cd ${WORKSPACE}/terraform/istio-unit-tests + + echo "** Begin istio unit tests **" + terraform init + terraform apply -auto-approve -var "BUILD_TAG"=${BUILD_TAG} -var "AWS_PROFILE"=${AWS_PROFILE} -var "ISTIO_BRANCH"=${ISTIO_BRANCH} + num_tries=0 + + while [ $num_tries -lt 500 ]; + do + aws s3api head-object --bucket mithril-artifacts --key "${BUILD_TAG}/${BUILD_TAG}-istio-unit-tests-log.txt" --no-cli-pager 2> /dev/null + if [ $? -eq 0 ]; + then + break; + else + ((num_tries++)) + sleep 1; + fi + done + + terraform destroy -auto-approve + + aws s3 cp "s3://mithril-artifacts/${BUILD_TAG}/${BUILD_TAG}-istio-unit-tests-result.txt" . + RESULT=$(tail -n 1 "${BUILD_TAG}-istio-unit-tests-result.txt" | grep -oE '^..') + if [[ "$RESULT" == "ok" ]]; + then + echo "Istio unit tests successful" + else + echo "Istio unit tests failed" + cat "${BUILD_TAG}-istio-unit-tests-result.txt" + exit 1 + fi + ''' + } + } + } + } + + stage("build-and-push-istio-images") { + environment { + AWS_ACCESS_KEY_ID = "${AWS_ACCESS_KEY_ID}" + AWS_SECRET_ACCESS_KEY = "${AWS_SECRET_ACCESS_KEY}" + BUILD_WITH_CONTAINER = 0 + ISTIO_BRANCH = "${params.ISTIO_BRANCH}" + } + + steps { + script { + def passwordMask = [ + $class: 'MaskPasswordsBuildWrapper', + varPasswordPairs: [ [ password: HPE_DOCKER_HUB_SECRET ] ] + ] + + wrap(passwordMask) { + docker.image(BUILD_IMAGE).inside("-v /var/run/docker.sock:/var/run/docker.sock") { + + // Build and push to ECR registry + def ECR_REGISTRY = AWS_ACCOUNT_ID + ".dkr.ecr." + ECR_REGION + ".amazonaws.com"; + def ECR_HUB = ECR_REGISTRY + "/" + ECR_REPOSITORY_PREFIX; + + sh """#!/bin/bash + export HUB=${HPE_REGISTRY} + export TAG=${BUILD_TAG} + export istio_branch=${istio_branch} + + echo ${HPE_DOCKER_HUB_SECRET} | docker login hub.docker.hpecorp.net --username ${HPE_DOCKER_HUB_SECRET} --password-stdin + + # Checks go version dependencies of istio branches + . ./terraform/istio-unit-tests/check-go-version.sh + + cd istio && go get github.com/spiffe/go-spiffe/v2 && go mod tidy && make push + + aws ecr get-login-password --region ${ECR_REGION} | \ + docker login --username AWS --password-stdin ${ECR_REGISTRY} + + docker images --format "{{.ID}} {{.Repository}}" | while read line; do + pieces=(\$line) + if [[ "\${pieces[1]}" == *"hub.docker.hpecorp.net"* ]] && [[ "\${pieces[1]}" != *"/ubuntu"* ]]; then + tag=\$(echo "\${pieces[1]}" | sed -e "s|^${HPE_REGISTRY}||") + docker tag "\${pieces[0]}" "${ECR_HUB}\${tag}:${BUILD_TAG}" + docker push "${ECR_HUB}\${tag}:${BUILD_TAG}" + fi + done + """ + } + } + } + } + } + + // Tag the current build as "latest" whenever a new commit + // comes into master and pushes the tag to the ECR repository + stage("tag-latest-images") { + environment { + AWS_ACCESS_KEY_ID = "${AWS_ACCESS_KEY_ID}" + AWS_SECRET_ACCESS_KEY = "${AWS_SECRET_ACCESS_KEY}" + } + + when { + allOf { + branch MITHRIL_MAIN_BRANCH + equals expected: ISTIO_STABLE_BRANCH, actual: params.ISTIO_BRANCH + } + } + + steps { + script { + def ECR_REGISTRY = AWS_ACCOUNT_ID + ".dkr.ecr." + ECR_REGION + ".amazonaws.com" + def ECR_HUB = ECR_REGISTRY + "/" + ECR_REPOSITORY_PREFIX + + docker.image(BUILD_IMAGE).inside("-v /var/run/docker.sock:/var/run/docker.sock") { + sh """#!/bin/bash + aws ecr get-login-password --region ${ECR_REGION} | \ + docker login --username AWS --password-stdin ${ECR_REGISTRY} + + docker images "${ECR_HUB}/*" --format "{{.ID}} {{.Repository}}" | while read line; do + pieces=(\$line) + docker tag "\${pieces[0]}" "\${pieces[1]}":latest + docker push "\${pieces[1]}":latest + done + """ + } + } + } + } + + stage("run-integration-tests") { + environment { + AWS_ACCESS_KEY_ID = "${AWS_ACCESS_KEY_ID}" + AWS_SECRET_ACCESS_KEY = "${AWS_SECRET_ACCESS_KEY}" + } + + steps { + script { + def folders = sh(script: 'cd terraform/integration-tests && ls -1', returnStdout: true).split() + def builders = [:] + + folders.each{ folder -> + builders[folder] = { + stage("$folder") { + script { + def usecase = folder + docker.image(BUILD_IMAGE).inside("-v /var/run/docker.sock:/var/run/docker.sock --name $usecase -e usecase=$usecase ") { + sh '''#!/bin/bash + cd terraform/integration-tests/${usecase} + + echo "** Begin test ${usecase} **" + terraform init + terraform apply -auto-approve -var "BUILD_TAG"=${BUILD_TAG} -var "AWS_PROFILE"=${AWS_PROFILE} + num_tries=0 + while [ $num_tries -lt 500 ]; + do + aws s3api head-object --bucket mithril-artifacts --key "${BUILD_TAG}/${BUILD_TAG}-${usecase}-log.txt" --no-cli-pager 2> /dev/null + if [ $? -eq 0 ]; + then + break; + else + ((num_tries++)) + sleep 1; + fi + done + + terraform destroy -auto-approve + ''' + } + } + } + } + } + parallel builders + } + } + } + + stage("analyze-integration-tests") { + environment { + AWS_ACCESS_KEY_ID = "${AWS_ACCESS_KEY_ID}" + AWS_SECRET_ACCESS_KEY = "${AWS_SECRET_ACCESS_KEY}" + } + + steps { + script { + docker.image(BUILD_IMAGE).inside("-v /var/run/docker.sock:/var/run/docker.sock") { + sh '''#!/bin/bash + RESULT_LIST=() + + cd terraform/integration-tests + + HAS_MISSING_ARTIFACTS=false + for FOLDER in *; + do + BUCKET_EXISTS=false + aws s3api head-object --bucket mithril-artifacts --key "${BUILD_TAG}/${BUILD_TAG}-${FOLDER}-result.txt" --no-cli-pager + if [ $? -eq 0 ]; + then + BUCKET_EXISTS=true + fi + if $BUCKET_EXISTS; + then + echo "Artifact object for usecase ${FOLDER} exists" + else + echo "Artifact ${BUILD_TAG}/${BUILD_TAG}-${FOLDER}-result.txt object for usecase ${FOLDER} does not exist" + HAS_MISSING_ARTIFACTS=true + fi + done + + if $HAS_MISSING_ARTIFACTS; + then + echo "One or more artifacts do not exist" + exit 1 + else + echo "All artifacts found" + fi + + HAS_FAILED_TEST=false + for FOLDER in *; + do + aws s3 cp "s3://mithril-artifacts/${BUILD_TAG}/${BUILD_TAG}-${FOLDER}-result.txt" . + RESULT=$(tail -n 1 "${BUILD_TAG}-${FOLDER}-result.txt" | grep -oE '^..') + if [ "$RESULT" == "ok" ]; + then + echo "Test for usecase ${FOLDER} successful" + else + echo "Test for usecase ${FOLDER} failed" + cat "${BUILD_TAG}-${FOLDER}-result.txt" + HAS_FAILED_TEST=true + fi + done + + if $HAS_FAILED_TEST; + then + echo "One or more tests have failed" + exit 1 + fi + ''' + } + } + } + } + + stage("distribute-poc") { + environment { + AWS_ACCESS_KEY_ID = "${AWS_ACCESS_KEY_ID}" + AWS_SECRET_ACCESS_KEY = "${AWS_SECRET_ACCESS_KEY}" + } + + when { + allOf { + branch MITHRIL_MAIN_BRANCH + equals expected: ISTIO_STABLE_BRANCH, actual: params.ISTIO_BRANCH + } + } + + failFast true + parallel { + stage("distribute-assets") { + steps { + script { + def S3_CUSTOMER_BUCKET = "s3://" + CUSTOMER_BUCKET + + docker.image(BUILD_IMAGE).inside("-v /var/run/docker.sock:/var/run/docker.sock") { + sh """ + cd ./POC + tar -zcvf mithril.tar.gz bookinfo spire istio configmaps.yaml \ + deploy-all.sh create-namespaces.sh cleanup-all.sh forward-port.sh create-kind-cluster.sh \ + doc/poc-instructions.md demo/demo-script.sh demo/README.md demo/federation-demo.sh ../usecases/federation + aws s3 cp mithril.tar.gz ${S3_CUSTOMER_BUCKET} + aws s3api put-object-acl --bucket ${CUSTOMER_BUCKET} --key mithril.tar.gz --acl public-read + """ + } + } + } + } + + stage("distribute-patches") { + steps { + script { + def S3_PATCHSET_BUCKET = "s3://" + PATCHSET_BUCKET + + docker.image(BUILD_IMAGE).inside("-v /var/run/docker.sock:/var/run/docker.sock") { + sh """ + cd ./POC + tar -zcvf mithril-poc-patchset.tar.gz patches + aws s3 cp mithril-poc-patchset.tar.gz ${S3_PATCHSET_BUCKET} + aws s3api put-object-acl --bucket ${PATCHSET_BUCKET} --key mithril-poc-patchset.tar.gz --acl public-read + """ + } + } + } + } + } + } + } + + post { + success { + slackSend ( + channel: CHANNEL_NAME, + color: 'good', + message: "The pipeline ${currentBuild.fullDisplayName} completed successfully. (<${env.BUILD_URL}|See Job>)" + ) + } + + failure { + script { + SLACK_ERROR_MESSAGE = "Ooops! The pipeline ${currentBuild.fullDisplayName} failed." + SLACK_ERROR_COLOR = "bad" + if (BRANCH_NAME == MITHRIL_MAIN_BRANCH) { + SLACK_ERROR_MESSAGE = "@channel The pipeline ${currentBuild.fullDisplayName} failed on `${BRANCH_NAME}`" + SLACK_ERROR_COLOR = "danger" + } + } + slackSend ( + channel: CHANNEL_NAME, + color: SLACK_ERROR_COLOR, + message: "${SLACK_ERROR_MESSAGE} (<${env.BUILD_URL}|See Job>)", + ) + } + } +} + +// Method for creating the build tag +def makeTag() { + def today = new Date() + return today.format("dd-MM-yyyy") + "-" + params.ISTIO_BRANCH + "-" + env.GIT_BRANCH + "-" + env.GIT_COMMIT.substring(0,7) +} diff --git a/POC/README.md b/POC/README.md new file mode 100644 index 00000000..6d009b89 --- /dev/null +++ b/POC/README.md @@ -0,0 +1,551 @@ +# Mithril POC + +[![Build status badge](https://jenkins.docker.hpecorp.net/buildStatus/icon?job=sec-eng%2Fistio-spire%2Fmaster)](https://jenkins.docker.hpecorp.net/job/sec-eng/job/istio-spire/job/master/) + +This POC is a WIP. + +Currently, it deploys to local `kind` cluster the istio `bookinfo` example configured using static secrets that were +generated from SVIDs issued by SPIRE. The four workloads from the example (details, productpage, ratings, and reviews) +are deployed in the `default` namespace. + +This POC requires at least 20GB of disk space and 2 CPUs, keep that in mind when setting up a VM. + +## Minimal configuration + +- 4 CPUs +- 8 GB RAM +- 20 GB (for POC *only*) + +### Build requirements + +- docker +- rpmbuild +- fpm +- make +- go 1.16 + +### No build requirements + +- docker + +### Install kubectl client + +[Install the kubernetes client for your operating system](https://kubernetes.io/docs/tasks/tools/#kubectl) + +### Install istioctl + +``` +curl -L https://istio.io/downloadIstio | ISTIO_VERSION=1.10.1 sh - +``` + +Should work with istio `1.10.x` and `1.11.x`. + +## Install Kind + +Follow [kind install instructions](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) + +## Create the cluster and the local docker registry + +```bash +./create-kind-cluster.sh +``` + +# Mithril Images + +We can build and deploy our custom Istio images or simply deploy the already built images stored in our public/private ecr's. + +## Build Istio images + +1. Clone https://github.com/istio/istio +Note: You will need to clone the main Istio repo to $GOPATH/src/istio.io/istio for the build commands to work correctly. +2. `git checkout release-1.10` +3. Apply patch `POC/patches/poc.release-1.10.patch` +4. `export TAG=my-build` +5. `export HUB=localhost:5000` +6. `export BUILD_WITH_CONTAINER=0` +7. `make push` + +Note: steps 2 and 3 can be also be done with istio branches `release-1.11` and `master` using the corresponding patches +`poc.release-1.11.patch` and `poc.master.patch`. + +This will create the docker images with the tag `my-build`, and push them to the local docker registry (`localhost:5000`). + +(More info about building istio: https://github.com/istio/istio/wiki/Preparing-for-Development) + +## Elastic Container Registry images + +Note: Depending on your environment, there may be firewall-related issues while trying to pull the images. It can be prevented by building Istio images locally. + +### Running the POC with images from the public ECR + +```bash +TAG=stable \ +HUB=public.ecr.aws/e4m8j0n8/mithril \ +./deploy-all.sh +``` + +### Running the POC with images from the private ECR + +Install AWS CLI and configure it, follow [aws cli install and configure instructions.](https://aws.amazon.com/cli/?nc1=h_ls) +Note: It may be necessary to request access to the private ECR +```bash +TAG=latest \ +HUB=529024819027.dkr.ecr.us-east-1.amazonaws.com/mithril \ +./create-docker-registry-secret.sh \ +./deploy-all.sh +``` + +### Running the POC with local registry + +Before running the deploy script, specify your trust domain and cluster name on the spire server config at `spire/server-configmap.yaml` + +```bash +TAG=my-build \ +HUB=localhost:5000 \ +./deploy-all.sh +``` + +The output should look like: + +``` +namespace/istio-system created +secret/istio created +secret/istio.istio-ingressgateway-service-account created +configmap/istio-ca-root-cert created +✔ Istio core installed +✔ Istiod installed +✔ Ingress gateways installed +✔ Installation complete Thank you for installing Istio 1.10. Please take a few minutes to tell us about your install/upgrade experience! https://forms.gle/KjkrDnMPByq7akrYA +peerauthentication.security.istio.io/default created +namespace/spire created +clusterrolebinding.rbac.authorization.k8s.io/k8s-workload-registrar-role-binding created +clusterrole.rbac.authorization.k8s.io/k8s-workload-registrar-role created +configmap/k8s-workload-registrar created +Warning: apiextensions.k8s.io/v1beta1 CustomResourceDefinition is deprecated in v1.16+, unavailable in v1.22+; use apiextensions.k8s.io/v1 CustomResourceDefinition +customresourcedefinition.apiextensions.k8s.io/spiffeids.spiffeid.spiffe.io created +serviceaccount/spire-server created +configmap/spire-bundle created +clusterrole.rbac.authorization.k8s.io/spire-server-trust-role created +clusterrolebinding.rbac.authorization.k8s.io/spire-server-trust-role-binding created +configmap/spire-server created +statefulset.apps/spire-server created +service/spire-server created +serviceaccount/spire-agent created +clusterrole.rbac.authorization.k8s.io/spire-agent-cluster-role created +clusterrolebinding.rbac.authorization.k8s.io/spire-agent-cluster-role-binding created +configmap/spire-agent created +daemonset.apps/spire-agent created +secret/istio.details created +secret/istio.productpage created +secret/istio.ratings created +secret/istio.reviews created +configmap/istio-ca-root-cert created +service/details created +serviceaccount/details created +deployment.apps/details-v1 created +service/ratings created +serviceaccount/ratings created +deployment.apps/ratings-v1 created +service/reviews created +serviceaccount/reviews created +deployment.apps/reviews-v1 created +deployment.apps/reviews-v2 created +deployment.apps/reviews-v3 created +service/productpage created +serviceaccount/productpage created +deployment.apps/productpage-v1 created +gateway.networking.istio.io/bookinfo-gateway created +virtualservice.networking.istio.io/bookinfo-service created +destinationrule.networking.istio.io/enable-mtls created +``` + +Wait for all pods are to reach `Running` state: + +```bash +kubectl get pods -A +``` + +Expected output: + +``` +NAMESPACE NAME READY STATUS RESTARTS AGE +default details-v1-5d8d8fbf4-t49fm 2/2 Running 0 38m +default productpage-v1-6c79c57c79-zzgbs 2/2 Running 0 38m +default ratings-v1-9655b4cf8-zbv6v 2/2 Running 0 38m +default reviews-v1-7bc6ccf8b6-ccs4m 2/2 Running 0 38m +default reviews-v2-8468c7558f-g2jlh 2/2 Running 0 38m +default reviews-v3-57648fdb96-hclfj 2/2 Running 0 38m +istio-system istio-ingressgateway-84cc868cb6-tv2s7 1/1 Running 0 38m +istio-system istiod-c87f5f7f8-wwv25 1/1 Running 0 41m +kube-system coredns-74ff55c5b-gz44l 1/1 Running 0 41m +kube-system coredns-74ff55c5b-pvph4 1/1 Running 0 41m +kube-system etcd-kind-control-plane 1/1 Running 0 41m +kube-system kindnet-dhgtt 1/1 Running 0 41m +kube-system kube-apiserver-kind-control-plane 1/1 Running 0 41m +kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 41m +kube-system kube-proxy-nf78x 1/1 Running 0 41m +kube-system kube-scheduler-kind-control-plane 1/1 Running 0 41m +local-path-storage local-path-provisioner-78776bfc44-t2zjm 1/1 Running 0 41m +spire spire-agent-8dtgl 3/3 Running 0 41m +spire spire-server-0 2/2 Running 0 41m +``` + +### Cluster Overview + +![Structure for each Mithril cluster with the bookinfo example.](img/overview.png) + +### SPIRE Entries +When using [K8S Workload Registrar](https://github.com/spiffe/spire/tree/main/support/k8s/k8s-workload-registrar) for automatic workload registration within Kubernetes, you can check the created entries using the following command: + +``` +kubectl exec -i -t pod/spire-server-0 -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -socketPath /run/spire/sockets/server.sock" +``` + +## Test example + +### Inside the cluster: + +```bash +kubectl exec "$(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}')" -c ratings -- curl -sS productpage:9080/productpage +``` + +The output is an HTML page that should not have any error sections. + +### Outside the cluster: + +#### Checking the gateways rules created + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: bookinfo-gateway +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 8080 + name: http + protocol: HTTP + hosts: + - "*" +--- +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: bookinfo-gateway-mtls +spec: + selector: + istio: ingressgateway-mtls + servers: + - port: + number: 7080 + name: https + protocol: HTTPS + tls: + mode: ISTIO_MUTUAL + hosts: + - "*" +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: bookinfo-service +spec: + hosts: + - "*" + gateways: + - bookinfo-gateway + - bookinfo-gateway-mtls + http: + - match: + - uri: + exact: /productpage + - uri: + prefix: /static + - uri: + exact: /login + - uri: + exact: /logout + - uri: + prefix: /api/v1/products + route: + - destination: + host: productpage.default.svc.cluster.local + port: + number: 9080 +--- +apiVersion: "networking.istio.io/v1alpha3" +kind: "DestinationRule" +metadata: + name: "enable-mtls" +spec: + host: "*.svc.cluster.local" + trafficPolicy: + tls: + mode: ISTIO_MUTUAL +``` + +Forward host port 8000 to port 8080 (ingressgateway pod port) inside the cluster: + +```bash +./forward-port.sh + +Forwarding from 127.0.0.1:8000 -> 8080 +Forwarding from [::1]:8000 -> 8080 +``` + +#### Make a **HTTP** request from the host: + +```bash +curl localhost:8000/productpage +``` + +Or open in the browser `localhost:8000/productpage`. + +The output is an HTML page that should not have any error sections. + +#### Make a **HTTPS** request from the host: + +Mint SVID in the trust domain `example.org`: + +```bash +$ kubectl exec --stdin --tty -n spire spire-server-0 -- /opt/spire/bin/spire-server x509 mint -spiffeID spiffe://example.org/myservice -socketPath /run/spire/sockets/server.sock +``` + +Copy the X509-SVID section of the output to a file `svid.pem`. +Copy the Private key section of the output to a file `key.pem`. + +Forward host port 7000 to port 7080 (ingressgateway pod port) inside the cluster: + +```bash +INGRESS_POD=$(kubectl get pod -l istio=ingressgateway -n istio-system -o jsonpath="{.items[0].metadata.name}") +kubectl port-forward "$INGRESS_POD" 7000:7080 -n istio-system & +``` + +#### Test TLS request + +```bash +$ curl --cert svid.pem --key key.pem -k -I https://localhost:7000/productpage + +< HTTP/2 200 +< content-type: text/html; charset=utf-8 +< content-length: 5183 +< server: istio-envoy +< ... +``` + +## Clean up + +```bash +./cleanup-all.sh +``` + +# Playaround + +There are some configurations and dependencies that have to be met +to correctly deploy Mithril on your custom environment. Let's get through these: + +## Istio-SPIRE integration dependencies + +### Istio configuration + +Let's take a closer look at `istio-config.yaml`: + +```yaml +apiVersion: operator.istio.io/v1alpha1 +kind: IstioOperator +metadata: + namespace: istio-system +spec: + profile: default + meshConfig: + trustDomain: example.org +``` +This defines the `trustDomain` environment variable and sets a default Istio deployment. + +#### SPIRE Server k8s notifier plugin configuration + +Configure the k8s notifier plugin in your custom SPIRE `server.conf` file + +```yaml +Notifier "k8sbundle" { + plugin_data { + namespace = "spire" + config_map = "trust-bundle" + config_map_key = "root-cert.pem" + } +} +``` + +#### Istio Discovery configuration + +- Environment variables: + - `ENABLE_CA_SERVER`: disables default istiod CA functionality. + - `PILOT_ENABLE_XDS_IDENTITY_CHECK`: checks that _istio-agent's_ namespace and service account match the ones in the JWT token presented in XDS the connection. + - `PILOT_CERT_PROVIDER`: sets SPIFFE Workload API as the cert provider for _istiod_. + - `SPIFFE_ENDPOINT_SOCKET`: sets the SPIFFE Workload API socket path. +- Volume mounts: + - `spire-agent-socket`: SPIFFE Workload API socket path. + + +```yaml +pilot: + k8s: + env: + - name: ENABLE_CA_SERVER + value: "false" + - name: PILOT_ENABLE_XDS_IDENTITY_CHECK + value: "true" + - name: PILOT_CERT_PROVIDER + value: "spiffe" + - name: SPIFFE_ENDPOINT_SOCKET + value: "unix:///run/spire/sockets/agent.sock" + overlays: + - apiVersion: apps/v1 + kind: Deployment + name: istiod + patches: + - path: spec.template.spec.containers.[name:discovery].volumeMounts[7] + value: + name: spire-agent-socket + mountPath: "/run/spire/sockets" + readOnly: true + - path: spec.template.spec.volumes[7] + value: + name: spire-agent-socket + csi: + driver: "csi.spiffe.io" +``` + +#### Istio Agent (sidecar) configuration + +- Environment variables: + - `CA_PROVIDER`: sets SPIFFE Workload API as the cert provider for _istio-agent_. + - `SPIFFE_ENDPOINT_SOCKET`: sets the SPIFFE Workload API socket path. +- Volume mounts: + - `spire-agent-socket`: SPIFFE Workload API socket path. + +```yaml +ingressGateways: + - name: istio-ingressgateway + enabled: true + label: + istio: ingressgateway + k8s: + overlays: + - apiVersion: apps/v1 + kind: Deployment + name: istio-ingressgateway + patches: + - path: spec.template.spec.volumes[8] + value: + name: spire-agent-socket + csi: + driver: "csi.spiffe.io" + - path: spec.template.spec.containers.[name:istio-proxy].volumeMounts[8] + value: + name: spire-agent-socket + mountPath: "/run/spire/sockets" + readOnly: true + env: + - name: CA_PROVIDER + value: "spiffe" + - name: SPIFFE_ENDPOINT_SOCKET + value: "unix:///run/spire/sockets/agent.sock" +``` + +We still need to create a couple of configmaps for the istio namespace and for every +namespace that you want to deploy your workloads. The `istio-ca-root-cert` configmap that is required for _istio-agent_ injection. + +```yaml +# This remains empty but needs to be present because of istio-agent injection +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-ca-root-cert + namespace: istio-system +--- +# This remains empty but needs to be present because of istio-agent injection +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-ca-root-cert + namespace: default +--- +``` + +#### Workload's sidecars configuration + +To automatically propagate the _ingressgateway_ configuration to the sidecars +we can make use of [Custom templates](https://istio.io/latest/docs/setup/additional-setup/sidecar-injection/#custom-templates-experimental). + +```yaml +sidecarInjectorWebhook: + templates: + spire: | + spec: + containers: + - name: istio-proxy + env: + - name: CA_PROVIDER + value: "spiffe" + - name: SPIFFE_ENDPOINT_SOCKET + value: "unix:///run/spire/sockets/agent.sock" + volumeMounts: + - name: spire-agent-socket + mountPath: /run/spire/sockets + readOnly: true + volumes: + - name: spire-agent-socket + csi: + driver: "csi.spiffe.io" +``` + +The template holds the same configuration for the `ingressgateway` and can be applied +to the workloads by adding an annotation to the app's deployment spec in the `template` section, +as in the example below: + +```yaml +template: + metadata: + labels: + app: productpage + version: v1 + annotations: + inject.istio.io/templates: "sidecar,spire" +``` + +# Deploying the POC to Amazon EKS + +1. Install [kubectl](#install-kubectl-client) and [istioctl](#install-istioctl). + +2. Install [eksctl](https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html) and [aws cli](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html). + +3. Set up the credentials for AWS. +```bash +aws configure +``` + +4. Create an EKS cluster. Name the cluster at will, choose a region, and configure an AWS Key Pair or an SSH key (optional). This may take a while. +```bash +eksctl create cluster \ + --name \ + --region us-east-1 \ + --with-oidc \ + --ssh-access \ + --ssh-public-key my-key-pair \ + --managed +``` + +5. Deploy the latest (master) tag using the images from the ECR repository. +```bash +TAG=latest \ +HUB=public.ecr.aws/e4m8j0n8/mithril \ +./deploy-all.sh +``` +When you are done, you can [clean up your istio deployment](#clean-up), and then delete the EKS cluster. +```bash +eksctl delete cluster --region us-east-1 --name +``` diff --git a/POC/agent.sh b/POC/agent.sh new file mode 100644 index 00000000..77bf8d6c --- /dev/null +++ b/POC/agent.sh @@ -0,0 +1,115 @@ +#!/bin/bash + +# Creates folders for secret management +mkdir -p ./var/run/secrets/tokens ./var/run/secrets/istio + +# Creates temporary script to set some k8's configurations +mkdir -p ./.temp +echo '#!/bin/bash' > ./.temp/temp.sh +echo -n "echo " >> ./.temp/temp.sh && echo -n "'" >> ./.temp/temp.sh && echo -n '{"kind":"TokenRequest","apiVersion":"authentication.k8s.io/v1","spec":{"audiences":["istio-ca"], "expirationSeconds":2592000}}' >> ./.temp/temp.sh && echo -n "'" >> ./.temp/temp.sh +echo ' | \' >> ./.temp/temp.sh +echo 'kubectl create --raw /api/v1/namespaces/${1:-default}/serviceaccounts/${2:-default}/token -f - | jq -j ".status.token" > ./var/run/secrets/tokens/istio-token' >> ./.temp/temp.sh +bash ./.temp/temp.sh +rm -rf ./.temp + +# Get root-cert from secret of the POC and writes to file in the default Istio path for the root-cert +kubectl -n istio-system get configmaps istio-ca-root-cert -ojsonpath='{.data.root-cert\.pem}' > ./var/run/secrets/istio/root-cert.pem + +# Get certs from secrets of the POC and writes to files in the default Istio path +kubectl -n istio-system get secrets istiod-certs -ojsonpath='{.data.root-cert\.pem}' | base64 -d > ./etc/certs/root-cert.pem +kubectl -n istio-system get secrets istiod-certs -ojsonpath='{.data.istiod-key\.pem}' | base64 -d > ./etc/certs/key.pem +kubectl -n istio-system get secrets istiod-certs -ojsonpath='{.data.istiod-svid\.pem}' | base64 -d > ./etc/certs/cert-chain.pem + + +# Get script input parameters +# Flags: +# -r (remote) -> '1' for container build, else for local build +# -t (TAG) -> +# -h (HUB) -> +# -c (PILOT_CERT_PROVIDER) -> sets the 'PILOT_CERT_PROVIDER' Istio environment variable, default value is 'istio' +# use 'SPIRE' to run with our implementation of the cert provider +while getopts r:t:h:c: flag +do + case "${flag}" in + r*) r=${OPTARG};; + t*) TAG=${OPTARG};; + h*) HUB=${OPTARG};; + c*) PILOT_CERT_PROVIDER=${OPTARG};; + esac +done + +# Runs istio agent in local mode by default +# If flag 'r' (remote) is set to '1', runs the istio agent in container mode +if [[ $r = "1" ]] +then + echo "Running istio agent container!" + if [[ $HUB = "" ]] + then + echo "Empty HUB env var!" + exit + fi + if [[ $TAG = "" ]] + then + echo "Empty TAG env var!" + exit + fi + echo "TAG: $TAG"; + echo "HUB: $HUB"; + + if [[ $PILOT_CERT_PROVIDER != "" ]] + then + echo "PILOT_CERT_PROVIDER="$PILOT_CERT_PROVIDER + fi + + echo "discoveryAddress: localhost:15012 +statusPort: 15020 +terminationDrainDuration: 0s +tracing: {}" > $PWD/proxy-config-docker.yaml + + TAG=$TAG HUB=$HUB BUILD_WITH_CONTAINER=0 DOCKER_TARGETS=docker.proxyv2 make push + + if [[ $PILOT_CERT_PROVIDER == "spiffe" ]] + then + echo "PILOT_CERT_PROVIDER="$PILOT_CERT_PROVIDER + docker run -it -v $PWD/var/run/secrets/tokens/istio-token:/var/run/secrets/tokens/istio-token -v /run/spire/sockets/agent.sock:/run/spire/sockets/agent.sock \ + -v $PWD/var/run/secrets/istio/:/var/run/secrets/istio/ \ + -v $PWD/etc/certs/:/etc/certs/ \ + --network host \ + -e SPIFFE_ENDPOINT_SOCKET="unix:///run/spire/sockets/agent.sock" \ + -e TRUST_DOMAIN="example.org" \ + -e PILOT_ENABLE_XDS_IDENTITY_CHECK=true \ + -e ENABLE_CA_SERVER=false \ + -e PILOT_CERT_PROVIDER=$PILOT_CERT_PROVIDER \ + -e PROXY_CONFIG="$(< $PWD/proxy-config-docker.yaml envsubst)" \ + $HUB/proxyv2:$TAG proxy sidecar + else + docker run -it -v $PWD/var/run/secrets/tokens/istio-token:/var/run/secrets/tokens/istio-token -v /run/spire/sockets/agent.sock:/run/spire/sockets/agent.sock \ + -v $PWD/var/run/secrets/istio/:/var/run/secrets/istio/ \ + -v $PWD/etc/certs/:/etc/certs/ \ + --network host \ + -e PILOT_ENABLE_XDS_IDENTITY_CHECK=true \ + -e ENABLE_CA_SERVER=false \ + -e PILOT_CERT_PROVIDER=$PILOT_CERT_PROVIDER \ + -e PROXY_CONFIG="$(< $PWD/proxy-config-docker.yaml envsubst)" \ + $HUB/proxyv2:$TAG proxy sidecar + fi +else + echo "Running istio agent locally!" + if [[ $PILOT_CERT_PROVIDER != "" ]] + then + echo "PILOT_CERT_PROVIDER="$PILOT_CERT_PROVIDER + fi + echo "binaryPath: $PWD/out/linux_amd64/envoy +configPath: $PWD +proxyBootstrapTemplatePath: $PWD/tools/packaging/common/envoy_bootstrap.json +discoveryAddress: localhost:15012 +statusPort: 15020 +terminationDrainDuration: 0s +tracing: {}" > $PWD/proxy-config.yaml + if [[ $PILOT_CERT_PROVIDER == "spiffe" ]] + then + SPIFFE_ENDPOINT_SOCKET="unix:///run/spire/sockets/agent.sock" TRUST_DOMAIN="example.org" PILOT_ENABLE_XDS_IDENTITY_CHECK=true PILOT_CERT_PROVIDER=$PILOT_CERT_PROVIDER PROXY_CONFIG="$(< $PWD/proxy-config.yaml envsubst)" go run ./pilot/cmd/pilot-agent proxy sidecar + else + PILOT_ENABLE_XDS_IDENTITY_CHECK=true PROXY_CONFIG="$(< $PWD/proxy-config.yaml envsubst)" go run ./pilot/cmd/pilot-agent proxy sidecar + fi +fi diff --git a/POC/bookinfo/bookinfo.yaml b/POC/bookinfo/bookinfo.yaml new file mode 100644 index 00000000..96a253bc --- /dev/null +++ b/POC/bookinfo/bookinfo.yaml @@ -0,0 +1,355 @@ +# Copyright Istio Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +################################################################################################## +# This file defines the services, service accounts, and deployments for the Bookinfo sample. +# +# To apply all 4 Bookinfo services, their corresponding service accounts, and deployments: +# +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml +# +# Alternatively, you can deploy any resource separately: +# +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l service=reviews # reviews Service +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l account=reviews # reviews ServiceAccount +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml -l app=reviews,version=v3 # reviews-v3 Deployment +################################################################################################## + +################################################################################################## +# Details service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: details + labels: + app: details + service: details +spec: + ports: + - port: 9080 + name: http + selector: + app: details +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-details + labels: + account: details +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: details-v1 + labels: + app: details + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: details + version: v1 + template: + metadata: + labels: + app: details + version: v1 + annotations: + inject.istio.io/templates: "sidecar,spire" + spec: + serviceAccountName: bookinfo-details + containers: + - name: details + image: docker.io/istio/examples-bookinfo-details-v1:1.16.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + securityContext: + runAsUser: 1000 +--- +################################################################################################## +# Ratings service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: ratings + labels: + app: ratings + service: ratings +spec: + ports: + - port: 9080 + name: http + selector: + app: ratings +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-ratings + labels: + account: ratings +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ratings-v1 + labels: + app: ratings + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: ratings + version: v1 + template: + metadata: + labels: + app: ratings + version: v1 + annotations: + inject.istio.io/templates: "sidecar,spire" + spec: + serviceAccountName: bookinfo-ratings + containers: + - name: ratings + image: docker.io/istio/examples-bookinfo-ratings-v1:1.16.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + securityContext: + runAsUser: 1000 +--- +################################################################################################## +# Reviews service +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: reviews + labels: + app: reviews + service: reviews +spec: + ports: + - port: 9080 + name: http + selector: + app: reviews +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-reviews + labels: + account: reviews +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v1 + labels: + app: reviews + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v1 + template: + metadata: + labels: + app: reviews + version: v1 + annotations: + inject.istio.io/templates: "sidecar,spire" + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v1:1.16.2 + imagePullPolicy: IfNotPresent + env: + - name: LOG_DIR + value: "/tmp/logs" + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: wlp-output + mountPath: /opt/ibm/wlp/output + securityContext: + runAsUser: 1000 + volumes: + - name: wlp-output + emptyDir: {} + - name: tmp + emptyDir: {} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v2 + labels: + app: reviews + version: v2 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v2 + template: + metadata: + labels: + app: reviews + version: v2 + annotations: + inject.istio.io/templates: "sidecar,spire" + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v2:1.16.2 + imagePullPolicy: IfNotPresent + env: + - name: LOG_DIR + value: "/tmp/logs" + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: wlp-output + mountPath: /opt/ibm/wlp/output + securityContext: + runAsUser: 1000 + volumes: + - name: wlp-output + emptyDir: {} + - name: tmp + emptyDir: {} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: reviews-v3 + labels: + app: reviews + version: v3 +spec: + replicas: 1 + selector: + matchLabels: + app: reviews + version: v3 + template: + metadata: + labels: + app: reviews + version: v3 + annotations: + inject.istio.io/templates: "sidecar,spire" + spec: + serviceAccountName: bookinfo-reviews + containers: + - name: reviews + image: docker.io/istio/examples-bookinfo-reviews-v3:1.16.2 + imagePullPolicy: IfNotPresent + env: + - name: LOG_DIR + value: "/tmp/logs" + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + - name: wlp-output + mountPath: /opt/ibm/wlp/output + securityContext: + runAsUser: 1000 + volumes: + - name: wlp-output + emptyDir: {} + - name: tmp + emptyDir: {} +--- +################################################################################################## +# Productpage services +################################################################################################## +apiVersion: v1 +kind: Service +metadata: + name: productpage + labels: + app: productpage + service: productpage +spec: + ports: + - port: 9080 + name: http + selector: + app: productpage +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: bookinfo-productpage + labels: + account: productpage +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: productpage-v1 + labels: + app: productpage + version: v1 +spec: + replicas: 1 + selector: + matchLabels: + app: productpage + version: v1 + template: + metadata: + labels: + app: productpage + version: v1 + annotations: + inject.istio.io/templates: "sidecar,spire" + spec: + serviceAccountName: bookinfo-productpage + containers: + - name: productpage + image: docker.io/istio/examples-bookinfo-productpage-v1:1.16.2 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9080 + volumeMounts: + - name: tmp + mountPath: /tmp + securityContext: + runAsUser: 1000 + volumes: + - name: tmp + emptyDir: {} +--- diff --git a/POC/bookinfo/cleanup-bookinfo.sh b/POC/bookinfo/cleanup-bookinfo.sh new file mode 100755 index 00000000..18aaa855 --- /dev/null +++ b/POC/bookinfo/cleanup-bookinfo.sh @@ -0,0 +1,73 @@ +#!/bin/bash +# +# Copyright Istio Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +SCRIPTDIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) + +# only ask if in interactive mode +if [[ -t 0 && -z ${NAMESPACE} ]];then + echo -n "namespace ? [default] " + read -r NAMESPACE +fi + +# verify if the namespace exists, otherwise use default namespace +if [[ -n ${NAMESPACE} ]];then + ns=$(kubectl get namespace "${NAMESPACE}" --no-headers --output=go-template="{{.metadata.name}}" 2>/dev/null) + if [[ -z ${ns} ]];then + echo "NAMESPACE ${NAMESPACE} not found." + NAMESPACE=default + fi +fi + +# if no namespace is provided, use default namespace +if [[ -z ${NAMESPACE} ]];then + NAMESPACE=default +fi + +echo "using NAMESPACE=${NAMESPACE}" + +protos=( destinationrules virtualservices gateways ) +for proto in "${protos[@]}"; do + for resource in $(kubectl get -n ${NAMESPACE} "$proto" -o name); do + kubectl delete -n ${NAMESPACE} "$resource"; + done +done + +OUTPUT=$(mktemp) +export OUTPUT +echo "Application cleanup may take up to one minute" +kubectl delete -n ${NAMESPACE} -f "$SCRIPTDIR/bookinfo.yaml" > "${OUTPUT}" 2>&1 +ret=$? +function cleanup() { + rm -f "${OUTPUT}" +} + +trap cleanup EXIT + +if [[ ${ret} -eq 0 ]];then + cat "${OUTPUT}" +else + # ignore NotFound errors + OUT2=$(grep -v NotFound "${OUTPUT}") + if [[ -n ${OUT2} ]];then + cat "${OUTPUT}" + exit ${ret} + fi +fi + +# wait for 30 sec for bookinfo to clean up +sleep 30 + +echo "Application cleanup successful" diff --git a/POC/bookinfo/deploy-bookinfo.sh b/POC/bookinfo/deploy-bookinfo.sh new file mode 100755 index 00000000..11a9f924 --- /dev/null +++ b/POC/bookinfo/deploy-bookinfo.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +istioctl kube-inject --filename bookinfo.yaml | kubectl apply -f - +kubectl apply -f gateway.yaml diff --git a/POC/bookinfo/gateway.yaml b/POC/bookinfo/gateway.yaml new file mode 100644 index 00000000..60a317aa --- /dev/null +++ b/POC/bookinfo/gateway.yaml @@ -0,0 +1,69 @@ +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: bookinfo-gateway +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 8080 + name: http + protocol: HTTP + hosts: + - "*" +--- +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: bookinfo-gateway-mtls +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 7080 + name: https + protocol: HTTPS + tls: + mode: ISTIO_MUTUAL + hosts: + - "*" +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: bookinfo-service +spec: + hosts: + - "*" + gateways: + - bookinfo-gateway + - bookinfo-gateway-mtls + http: + - match: + - uri: + exact: /productpage + - uri: + prefix: /static + - uri: + exact: /login + - uri: + exact: /logout + - uri: + prefix: /api/v1/products + route: + - destination: + host: productpage.default.svc.cluster.local + port: + number: 9080 +--- +apiVersion: "networking.istio.io/v1alpha3" +kind: "DestinationRule" +metadata: + name: "enable-mtls" +spec: + host: "*.svc.cluster.local" + trafficPolicy: + tls: + mode: ISTIO_MUTUAL diff --git a/POC/cleanup-all.sh b/POC/cleanup-all.sh new file mode 100755 index 00000000..ea0ef929 --- /dev/null +++ b/POC/cleanup-all.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +(cd bookinfo ; ./cleanup-bookinfo.sh) +(cd istio ; ./cleanup-istio.sh) +(cd spire ; ./cleanup-spire.sh) diff --git a/POC/configmaps.yaml b/POC/configmaps.yaml new file mode 100644 index 00000000..fd73fffb --- /dev/null +++ b/POC/configmaps.yaml @@ -0,0 +1,15 @@ +--- +# This remains empty but needs to be present because of istio-agent injection +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-ca-root-cert + namespace: default +--- +# This remains empty but needs to be present because of istio-agent injection +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-ca-root-cert + namespace: istio-system +--- diff --git a/POC/create-docker-registry-secret.sh b/POC/create-docker-registry-secret.sh new file mode 100755 index 00000000..18b05fad --- /dev/null +++ b/POC/create-docker-registry-secret.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +HUB_PASSWORD="$(aws ecr get-login-password --region us-east-1)" + +kubectl create ns istio-system + +kubectl create secret docker-registry secret-registry -n istio-system \ + --docker-server=$HUB\ + --docker-username=AWS \ + --docker-password=$HUB_PASSWORD + +#copy docker-registry secret to namespace default +kubectl get secret secret-registry --namespace=istio-system -o yaml | sed 's/namespace: istio-system/namespace: default/g' | kubectl create -f - \ No newline at end of file diff --git a/POC/create-kind-cluster.sh b/POC/create-kind-cluster.sh new file mode 100755 index 00000000..944f809a --- /dev/null +++ b/POC/create-kind-cluster.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +set -o errexit + +# create registry container unless it already exists +reg_name='kind-registry' +reg_port='5000' +running="$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" +if [ "${running}" != 'true' ]; then + docker run \ + -d --restart=always -p "127.0.0.1:${reg_port}:5000" --name "${reg_name}" \ + registry:2 +fi + +# create a cluster with the local registry enabled in container, and configured +# with the Token API server. +cat <> workload.log + +# SPIRE agent log +kubectl logs $(kubectl get pod -l app=spire-agent -o jsonpath='{.items[0].metadata.name}' -n spire) -n spire >> spire-agent.log + +echo -e "${PURPLE}Log for SPIRE in operation availabe at $PWD${NC}" \ No newline at end of file diff --git a/POC/demo/federation-demo.sh b/POC/demo/federation-demo.sh new file mode 100755 index 00000000..ed506ff1 --- /dev/null +++ b/POC/demo/federation-demo.sh @@ -0,0 +1,109 @@ +#!/bin/bash + +# Environment Variables +export TAG=stable_20211022 +export HUB=public.ecr.aws/e4m8j0n8/mithril +export BASE_DIR=$HOME/mithril + +# Colors +PURPLE='\033[0;35m' +GREEN='\033[0;32m' +NC='\033[0m' # No Color + +# Get POC from AWS S3 +echo -e "${PURPLE}Downloading POC version from AWS S3...${NC}" + +aws s3 cp s3://mithril-customer-assets/mithril.tar.gz . --profile scytale +mkdir -p $BASE_DIR && tar -xf ./mithril.tar.gz -C $BASE_DIR + +echo -e "${PURPLE}Creating namespaces...${NC}" +$BASE_DIR/usecases/federation/create-namespaces.sh + +echo -e "${PURPLE}Deploying Spire...${NC}" + +# Call script to deploy Spire 1 +cd $BASE_DIR/usecases/federation/spire +./deploy-spire.sh $BASE_DIR + +# Wait SPIRE Agent to be ready +echo -e "${GREEN}$(kubectl wait pod --for=condition=Ready -l app=spire-agent -n spire)${NC}" + +# Call script to deploy Spire 2 +cd $BASE_DIR/usecases/federation/spire2 +./deploy-spire.sh + +# Wait SPIRE Server 1 to be ready +echo -e "${GREEN}$(kubectl wait --for=condition=ready pod spire-server-0 -n spire2 --timeout=-1s)${NC}" + +# Echo bundle from SPIRE server +bundle=$(kubectl exec --stdin spire-server-0 -c spire-server -n spire2 -- /opt/spire/bin/spire-server bundle show -format spiffe -socketPath /run/spire/sockets/server.sock) +echo $bundle + +# Wait SPIRE Server 2 to be ready +echo -e "${GREEN}$(kubectl wait --for=condition=ready pod spire-server-0 -n spire --timeout=-1s)${NC}" + +# Set domain.test bundle to example.org SPIRE bundle endpoint +kubectl exec --stdin spire-server-0 -c spire-server -n spire -- /opt/spire/bin/spire-server bundle set -format spiffe -id spiffe://domain.test -socketPath /run/spire/sockets/server.sock <<< "$bundle" + +# Call script to deploy Istio +echo -e "${PURPLE}Deploying Istio...${NC}" +cd $BASE_DIR/usecases/federation/istio +./deploy-istio.sh $BASE_DIR + +# Check running pods +echo -e "${GREEN}$(kubectl get pods -n istio-system)${NC}" + +sleep 10.0 + +# Deploying Bookinfo Application +echo -e "${PURPLE}Deploying Bookinfo application...${NC}" + +cd $BASE_DIR/usecases/federation/bookinfo +./deploy-bookinfo.sh $BASE_DIR + +# Port Forwading Services +echo -e "${PURPLE}Port Forwarding Services...${NC}" +$BASE_DIR/forward-port.sh +$BASE_DIR/usecases/federation/forward-secure-port.sh + +# Waiting for pods to be ready +echo -e "${GREEN}$(kubectl wait pod --for=condition=Ready -l app=productpage --timeout=-1s)${NC}" +echo -e "${GREEN}$(kubectl wait pod --for=condition=Ready -l app=details --timeout=-1s)${NC}" +echo -e "${GREEN}$(kubectl wait pod --for=condition=Ready -l app=reviews --timeout=-1s)${NC}" +echo -e "${GREEN}$(kubectl wait pod --for=condition=Ready -l app=ratings --timeout=-1s)${NC}" + +# Check running pods +echo -e "${PURPLE}$(kubectl get pods -n default)${NC}" + +sleep 10.0 + +# Check Product page SA response +echo -e "${GREEN}$(curl localhost:8000/productpage)${NC}" + +sleep 10.0 + +# Demonstrating SPIRE in operation +cd $BASE_DIR/spire + +# Workload Log +kubectl logs $(kubectl get pod -l app=details -o jsonpath='{.items[0].metadata.name}') -c istio-proxy >> workload.log + +# SPIRE agent log +kubectl logs $(kubectl get pod -l app=spire-agent -o jsonpath='{.items[0].metadata.name}' -n spire) -n spire >> spire-agent.log + +echo -e "${PURPLE}Log for SPIRE in operation availabe at $PWD/spire-agent.log${NC}" +echo -e "${PURPLE}Log for SPIRE in operation availabe at $PWD/workload.log${NC}" + +# Demonstrating Federation +cd $BASE_DIR/usecases/federation + +# Mint x509 SVID +kubectl exec --stdin --tty -n spire2 spire-server-0 -- /opt/spire/bin/spire-server x509 mint -spiffeID spiffe://domain.test/myservice -socketPath /run/spire/sockets/server.sock >> mint-cert.pem +sleep 3.0 + +# Extracting key and svid from mintend x509 +openssl pkey -in mint-cert.pem -out key.pem +openssl x509 -in mint-cert.pem -out svid.pem + +# Test TLS request with the svid.pem and key.pem generated +echo -e "${GREEN}$(curl --cert svid.pem --key key.pem -k -I https://localhost:7000/productpage)${NC}" diff --git a/POC/demo/mithril.drawio b/POC/demo/mithril.drawio new file mode 100644 index 00000000..8f8ee9fa --- /dev/null +++ b/POC/demo/mithril.drawio @@ -0,0 +1 @@ +5Vtbd6I6FP41Ptold33UWjudtlNn7PRyXroiBMgUCIVoZX79SSCgXOpgBe107FqV7EAg+9v59iXYkU7d1XkAfPsaG9DpiD1j1ZHGHVEUhJ5Kv5gkSiSDgZYIrAAZ/KS1YIZ+Qy7scekCGTDMnUgwdgjy80Idex7USU4GggC/5k8zsZO/qw8sWBLMdOCUpffIIHYi7Su9tfwLRJZNsgnzHhekJ3NBaAMDv26IpLOOdBpgTJIjd3UKHaa8VC+mgx61G3WuyaYvyNrp8j/nqZsMNtnlkmwKAfTIu4eWvy764vT3Yno5exkG99E96N3zS3pL4Cy4vvhcSZQqMMALz4BskF5HGr3aiMCZD3TW+0pNhsps4jq0JdDDmk/KZ7SEAYGrDZz4k59D7EISRPQU3ivxB43yxvW6gWiKm72BpqpyIeBWZGUjrzVFD7iydlCc0LDiTOQ4p9jBQXytZCrsj8pDEuBnuNGjxh92BfbIhjz5tAmBWMBAKoMgVoEgaA2A4H7/pfoXcHDnTubjy+/yZdS7adx6iyCYpqjrVSAY6lxV1DaVLReUrZSVLahVypbasnjxn7P4AgYZJhsYaP0KDLKV0jgG8qc1eLWobLWmwYv9tpStfF5lH1PX0c9TKXy8fwpeokds+ecET8Q6TA4NGtfxJg6IjS3sAedsLR3l0Vifc4WxzzH4BQmJeJAKFgTnEQoJCMiQBZ1U4GEPprIJYtOJhy2gaCiwb8hVKPbFuRTTVtKTBp8UlpEBQjt+zt1DphAvAh1usVoertOntiAfb+UYNn62Z4o+HP4kxk/r1b9NNc60utVkAugAgpb5aLrxtaZW4K86hOkKLXN2oL4sWMAdO4NuGCM5pCcIir9ad9Iji3876cnvH2U2vfhxRnuHFoOID0mnmYyavxMVx0+cShskjBqmxp4oXSO93Qxrh9ikSB9aBX2IVdF4a8H4UdiCqjWIHvj1ceORNU6UtDlebXaOI96qwzJwhcjDejTaetzoWQ/MGum4u5tLjodi20nmKqwv26SthplKrKCqbeTQHFXxS6cYxbTA7TpLXqI0/CvYazIjflXBZLPH2MOKpRIJTiDRbSq6MKjKEUEwrDT0KzCHTt44gYMsjx7r9EJIcR+x9Y104Ax5h4sMI1kHkPIfmHPOkEY+m188Y2XUUcYVllG2g62rssQsWbGH37WzWU+pYpxu70QYaGoOHT74npgLRYyxaYZwX3i7vx5Wd+ENmg3xlRmJd8L8oltZMdjRO8lV3un+irmm6cWbjunwBnMMA9FEVcsZiLCfgTRqENXTVz9PjJv5wswx1vOFO3qg97jFhr2WlBaxP4jXUgvRmFrTa5UGkvsF9ycUBkpm3J770/bOARg/xsRYoMgqN1orXP/AzjXNmJrgTlayymGfFvv2tM1utnvUvnutqiA04V5n49lf6FmbtA5N7WuNWkf7rlWUjuFaU++UHm+4wDe9UyNJpO6AMER6wZNX1Rz7OqyuOc77iqy04C7FI7lB5cDeSyyX5t3bq9lf5FHSNdNMuib28+la98OH4+L+NchKB9KQG4LBEtGVRlO9dMB5UFF8PHBNsnFCqV+GLIar/WNXIcV+yYC+YQOW9E3nR4o52Ztbnm9sktbniyo884gX+eLQ26xaoZxcsctatRfVGoyDEoxTbLwHxWwf762dvzZRPBBaFVuHB0VLaqusduYtMZv22+T6OWi0GDgdn0fTXHQD0nQH7hS7PvYgD+F2XI7rXbI3ylufYDkOKsCTDroej5p55euCac9h6oLcav9YF0xP/CA771LZ3ZUR9IxCxThXt6216PLFXGnLMizEH6o6EieTnQH7o3o3lohSsUJS2b55aWFTUegN8kPULc9qyh8GajnBldsqsPGiLIPEd4DHUhwSANNE+t9Xd8sW0945NE2h6SeHeHNF2cKwYn6IRlLsbe8cVZhQOZfdJknDAZYUU2SbjMTqvmRzANdefM9GqPuezaAB594lZnfooS/Yv/jmXMOB+jCO6rylB0I/+RWHiVYMgpEPA0TvzhbgmN4C+SGcrkVb12v2g4vcTmGv7CJkUdNGIyp3GAuMgP5sxTaQz7nop4Ae21IMqWUgz7qNIw3mlpAb/6Ik/R4j16LKctCc/kdzl/6fO1h/1m2APNrQGalQsgIEPtHAxMYB5bKTcGm1aBly0alUxOxyhWHIDRjGNX753h89Pb3ePT3/kG315SYIuh8o6Nu+Gbwut68r7FmoWLfcHuPfKRfb3x9RViq1VzegrBvxNB5Qbnvs5jfisgrov5aSK4d8wdI4/3rpgcE1GUaL2y9gdo/08zrET8naT+gyoc8NzRbjLsIWeCaNI7cpDhFBmPXOMSHYrUfm8c2GqcthmJT8T0rjNiHsF4dDpgjK2oYnnSAak5mIWkhwomNG7AYggLE8lYesiV2fHXctJ/LtLjUQ2DVx4ALCQrGJqKjMAJlUD4O0h0Q+7Api/8T3rNb4X1RyBiL1WuN/2lz/wjGJA9e/E5XO/gc=7Vxbd9o4EP41PMLxHecREmi723azpXva7sseYcu2GmN5bUGgv34lLBlfBGuIDYSWnJPYI1nG840+zYzG6en3i/WbBMTBB+zCsKcp7rqnP/Q0zbZ1+psJNpnANO1M4CfIzUTqTjBDPyAXKly6RC5MSx0JxiFBcVno4CiCDinJQJLg53I3D4flu8bAhzXBzAFhXfoFuSTgj2UqO/lbiPxA3FlVeMsCiM5ckAbAxc8FkT7p6fcJxiQ7WqzvYch0J/Ty1luH4Q9j+odjfPKVv8Bssvyznw02PeaS/BESGJF2h9ayoVcgXHJ98WclG6HABC8jF7JBlJ4+fg4QgbMYOKz1mVoMlQVkEdIzlR42/Kb8iVYwIXBdwIl/8zcQLyBJNrQLb9XM7IpN2bieC4gK3IICmpbFhYBbkZ+PvNMUPeDKOkJxRsuK81AY3uMQJ9trdc9kP1SekgQ/wUKLtf2wK3BECvLs0yEEhlKCQDfqGAxtCQa61hUGVscYeJ7mODIMXGtumVaHuraqyrbqylYtibI1uytlD29X2Vena1WXKNsKCZ/1Ja1b/y6xaOin2+V3RDuoRrzeNdIjn/19lxKEaevIh9thRmLYeSK6CAn94tm9hLhFsF0AbU8KtuXYcO51CLZdAXsoAVuTrSSdYS2bWBkELlqdDnU3BjM+wmBy8fY5bsuMrCppXN6O7jrijNnDrCknQJe63vwUJyTAPo5AONlJx2W4d33eYxxzkL9DQjY8jgBLgssmQHFMNl/Z9QNTnH4rtj2s+eDZ2UacrREpXEbPvhVadhexE3HNccaT4mXiwEO+NoeegMSHBwfklMDUedAYExgCglblSKd1yxLf+zoZahKt8OYXM+1lJsO+NDMJw7x1ZjqBYU5ls7aZyWzKTPp1MVM9Bjkj0GoR5hz0/wO6CHMB9T1ApxQSMmJpMCpwQpCmyBHiKQr3BVO2A+XB1Nw2DVPJW0RGTLugTQkuuhabqsdei8/vZ1JDew/mMCwbBwiRHzGsqBYhVfqYMTdyQDjiDQvkupkdQspvYL4djyESYxSR7dOY4575wEmQW6FqNEdIzIrampEnWflde8U8pmwt6SsDVbOt0noiDKAxGnz0R/Z4hS7Y81JqF1W48i/xAgTN1+CvHBN637K/Yl+du1JPK37ELqzpmz4fKSv1QK52T3a3OVvI8CwjXmWLc+eHh5WIWJIeliXROoOxnld5xO4pKOYJyH0pyy5RPBNakpznedGyJZx9fteuobvVtqt090pdpc5STjBZIaqyQlTfZTq6gbvc1TSsRuvmxfOIgrevIXgWLW2k59qbMxWHVuCsV7ao1QpCGQ3wq3YgHT9QRhO1gdpyn/V6ukbsAtzjRYwjyO97rDMkPMl9vucNLKN3ktmrn3Md1bUaeD/POqrrTddR86rWUb2r7d7dOsr6XGav94KLq311a2uDqqE0ADE7RIttYVtRs1U+JGw25tJtMuoRp4hyNWudY0LwgnYIWcMYOE/+FsdyBEM/tMv2ZqM0zgrwGCZAnHhozZAf8+/zEBDCKvdGTBPa1HEjfYAcHHmIWkgycOgdtakLCKB/mDxlp3TZYMd9P9zEQZ8aCOx7OFkAwurPphoNrqgBMqmTJqKFbGLYVzV7EEd+VwZSrWlSJfxtSAzE6MxAGpQ0/TKQ8xmIaV3OQL5PRvD3dw/J7MPa8Merb1/Up9+alLzByBUrb0SdNZb1BGmwZW+1qYtW3h3QDzhtlSyTZY216bR957ygb1OibyF7oQ9vqWW4hye68EPr8DjtefBSI9lfUfQyd+Kedkowu30cgoi5FSQBnkdduyN2YTveM6nb3cF59OItEmWg0k8JbVFU/UJj7FeMMd/XaXUHRaodcefjaOYQs+zZd5QwS+4v3jSzqNbdwGyHXCRDdcwvaj1D0A7BfIBp8JpZJZ83Pz2txKOPbzajr77298fPqwS5MLS0fgNWubF3JvJ3H8RcleyKabIXV9Sqy3CKBykFQTZ32wThcnX8ejWaM+vKlhby56i0rmxZNm5fEuaQZJuAdVsFqsGW/7mSqHp1RWuaqLnrCjdZQq6i+1r8G8ME0buzpeaB3gLFKXzciQ6uTPmbhUoxWlPqhGVow+F43CxkL6LH6rtSahko8j9vU7j6LmDPA3e0oHH0NERzFpnPWZg+D7Hz5AQARSySZ8snXZYBgf+AJQlwgshmkK78Di3DUCqWIakOOWuCRjWl7sfhdPpLE+jcvy5mz5X6hFbnQIWabEIrijUZMdc5r0LM60ubVSGeUtpa5qZSCqLz2kNRQF/cCDjEz13vDVrinWlR5HTq3qBRSWnTaKA731+u2noG6n72iQpGj++kU+M6ayTFPG6lRpL5eRWfuteKq14e1dLLA3RXQSkij0u92nMcOzXhylMY7Hh+rRNbiffaZjnJduclWa7qxl2AnOr+9qskp31+0kkF3HfDcgF3S9ykl7mpsqCdwk30dPfPNLLuu/9Iok/+Aw==7Vxbd5s4EP41fkyOuRo/xnHSbTfZzdbds82+7MEgg1qMXFBiu79+JZC4CgdsMNStc04MgxhgZvTNRWNGyu169y4wN+4jsoE3ksf2bqTMR7IsSWOdfFHKnlMUNaY4AbQZLSUs4HfAiGNGfYE2CHMDMUIehps80UK+Dyyco5lBgLb5YSvk5a+6MR1QIiws0ytT/4E2dmOqoY1T+m8AOi5OnpgdWZt8MCOErmmjbYak3I2U2wAhHG+td7fAo9Ljctl/VP92DHJc+st4kJfb+eOjfxUzu29ySvIIAfBxu6zlmPWr6b0webFnxXsuwAC9+DagTMYjZbZ1IQaLjWnRo1tiM4Tm4rVH9iSyWfNO2RO9ggCDXUZP7M7fAbQGONiTIeyownSwzxvXNqNRrjc3o01dZ0STWZGTcE4lRTaYsBoITmlZcCvoebfIQ0F0rrLS6B+hhzhAX0HmiB596BnIxxl6/OlSBXJeB0pZB7JIB3JnOlB/Oh2oBRWoZR1MDIEOEt21rgOtYx2sVrJliXRg60td07u094KwpUlZ2JIuEHbiQVoXtn6xwtaLlq3XFLZsdCXsyeUKe3CyNgSy1j1y2ZkNX3My17+90HgrQt6rMIo2b8gASdvs0oNky2HfHh98PJfF0/uPd+TojUP1wFiSx4y55q9EyNEdc2qLBmNrwLBVkcEY8lJJ3NGCXW3clfUoRfMR4aIsCsa6sp5pSc7AJjkA20UBdpGDfNO7S6mzvCbSMQ8IbZj8vwCM9yyhMV8wymsnxGaAb2iCQgg+8gGn3UN69xHbSB8xA0k9SqNEd8H+M2V3rfHdZ3YH0c58x64V7+3Znm2GbvR0UsKeZz9yY2QJ0UtggUPjeGJnBg44xFBmqqL6OWhrAfBMDF/zOZzIcNipTwhGU5zHSYVYVVIKthffKTurYH7JbRxvkTw/yQDaPcCWS0jvbSJziCHJiUVW+2AuSQaeszTTg45Pti1yIiAGMqNzFZIM94YdWEPbjo0aECwzl2z+K7MNfb7oibXZSJsLLLKmIUwrQCJJ29lFR9nMWAQeV+NrSWYx47E65kPQahWCbtQnldTXjit5H2KI7Eof0qa3MIGxEoYXumWA5ep4WzjZYUjTvh2GpPbhMVIszyJ5BtgrsLyOpwE7iD/z65DtDGeylzKmO5xvc2+U8ypF8+ney+g1vYzRtpM5zdjKKfIP7Qv45GnHGUwneg4c5NN8A78fOX9Gh65ClJM3dBVqpatIs47xDWe7DARJR59+pCu/YQwtz5CMwSYaxycWiWeq54z6dQFc3ed3AWKc0TUjZ6OqVLC9+IlKeUY5YSkYe4lR1wkLj/CPr8BQFIswqQBkImdXq1gyYBfI61VtuEBanc/pni/KnWibSeDRvROUy+nuMdZzev2uliedNfCk3Vf1+vOv+uAKeTz6az2YWswXdYOlsyaCzYp6PMlLE7vnbF4nTPLadsBKTQfM7WYgSZgsWq8fDELd+a9o/wuZKpEpiY76QyZRr8EFItMRCHMsmrWNTEZNZOIZ+1CQSetT0U3qha3ULy3PDENoFXJY0fq4YQHx+vjS0FStTwvizmQoFlQuQa0/PSx+oHyKz4GW1peMfEmRG8Bw15vk0/sfzhCdNKlAXnJ0UizV9B+cnF68+dU+w9Hr3O0zWt/Wo5SLN38gG5SET54PjwqV6cq+2IpO2vq+RqTcvPpLzTVn7sWdFKonglZcUQ9dZ2os9yw80VaD5lpM+g+rOha71OKZtCVoeTyvtkSVrp7XlqpD85YDbaV2EWlYgbZSXUQ6sQgAgldIRJapAHW5ptt6anV8ZUfrveaslCs7PGK5ResN8gFbcWuIomlUURGHXACKihq5lLPCaK/Fk3yVjB85Sx2eW+2bEMoHDgVCy7UKvjx30mxLmxor0sQLmG0JLvY33Wr8JuZyo5ba5cFh9R8q1cWltqIWOqaf9rQeQxljaJGMKupaL85O3y60kuUaumoFNPkuL+VAiFNIyXV9Jt/fN1bJm/MoI3FNIHBOO7XXa1yA4/E0z6Jur9dEe4NRx71eqij3bKXRJu7woirZeKZPkQEH5mpF0LnBEmfHSxQ1bU+twoPGSxLja0niPyhpu8NrXGAr51l0t2Kh1nj/QRlrDsFLgtdvwkuC+ZcNL9r0utDAdzTAlFl1DTFdtUs8gtD9oWGFz5ufHlbmH13/wwd3bu+3+je00Lc7+HxV3UdaDisPUXgFh4aoRLNtRpx115F6yA8nbJK/FYFOW4hAf4d/SP/+Od08K4/hl++a5Kn4m1B9BdGb4SZ+y9MK7qgGZhsQQHJ1Ov/m5BJwE4KnlHRwuiYvZMr9MGFc9gyqPJnMZoTuURCYmdZXJzKB/OIG+RSUR3/BEBLDgL7zKcpNqTeC6+iNU/x7DtcOEZYHl+Q/XK7J/6WHrK+Wa0Kf7FgUUwhWmRj8R1JZFwUkQroOX50ODUMqzGDRLy5VgWGozQ2D7KZvwIphIH2RmHL3Pw==7Vxbd5s4EP41fnQOF3Pxo+3EvWy6TTftbrovPTLItrYYUVAcu79+JZAwF+GCA7bj1D2ngUESaObTNzOSoKdPVps3IQiWH7ALvZ6muJueft3TNNtS6P9MsE0Ehq0lgkWI3ESk7gT36CfkQl5v8YhcGOUKEow9goK80MG+Dx2Sk4EwxE/5YnPs5e8agAUsCe4d4JWl/yCXLHm3DGUnfwvRYinurCr8ygqIwlwQLYGLnzIi/aanT0KMSXK02kygx3Qn9ILNTX/279PtF7ieTz6NHvqfEegnjU2bVEm7EEKfHNz0w+rbeE2+BO+VT8q3D7dv/17733gVZQ28R64v3leyFQoM8aPvQtaI0tPHT0tE4H0AHHb1iSKGypZk5dEzlR7WfFLeozUMCdxk7MSf/A3EK0jCLS3Cr+r8Qbd5cD1lLCrstsxY0zS5EHAULdKWd5qiB1xZcsVpQdh3fmgPT3/d//HW+/hJmeg/W1fcHHneBHs4jOvqc4P9o/KIhPg7zFwx4x+rgX2SkSe/Lk2gFWyglY2gyYygWl0ZQX11RhjkbaAPyjawbIkNUuO1bgOtYxvM55rjyGzgmjPTMLsEfEHZqkTZqikDvN6VsvWLVbZZRLZZU9ma3ZWyB5er7LPTtS3RtenR245nIT1akFgbicRF66KI0XDOMuaPRywu9KM4KB3RAqoRbHYXi62Ub0X7kjRdEkse4tU8F4oIwm7DZ2lx5LgA2nPpyDEdG87mvB88F1EHXfqIwlBSZUNJkwylYVcjaVhSNXRpQsRPcUiWeIF94N3spOO8MXZlbjEOuAn+g4RsuUbBI8F5A8ENIg+s+pVl8NOvvDV2fL3JnmzFiU/7m9TSB5oQsHrqlaJaQrCrHJ/lat/BEFG1wZALIwJCMmJZIxX42IdCNkVMi3GZApjUGVChJgOTopg3o6kETElJkU2yui6IlrH+mudAEX4MHbgvyOVOn3ZjAcmegpxDmbn3QjeEHiBonc+P24/NyxnS6PpeCs5bMINeHlDAQwufHjtUh8y6YzYkEc3qR/zCCrlugl1I6QrM4vaYeQOMfBJ3xhj3jOtaXLBvIJW4IJ2q4DftZWcDZByhXKkD1c7RhIiaa5uCN37HOpcpgufziGKiaKv0GZ5hPlVqqq55JKWELB0IVtnDBnUG/o6k1KGWZSnBNpVM1ZwyWPUyOb0O2ihgtcJTDpSCB0welNfqANDlRHVy/xcVjO7evSBWUqtClMa01GfjwDJzRnkmLYmWCzNFppFvoUPWkiXIDUPeQWXI2wcLGDcyahCZHzfu7SrOtQsZo1UzzO1s/lUtZ+dV4cXRYl/V1g8Jfk0zjXVfQPAr82w5P1Z2c217NutlBsTGAeg8AI/dwCDFq9prFpkdlBrmB+VrgZfcm5pGPncYqAVOTXpUCpxKDQ3sE0dg5mVEYEaLERhbmckZ5bmZoXigPGaOGIFZlRFYPB14aADWRRj3/AnWywjuzOJ6wOmju+r1gOcBII4R60XqRw0dD5o1TV3v16xfrvDDMdvx7ijNkNS5hxsWHFNxmamuhys1VFyI7drDDTuC7f15wXYHQasRBg+KBVsO7wSP/TK8E57sTNIHrTyffkaO9cZf4+1vl1rpUlNKO5lLFcC8dG5KKUY7yKdqjQitbW4y6nKTfl7cpJ3S1GrWzmkk9StL56cxGq0vOR6IIuT08vMqsi0+tgPlW3xmtjEw5ItCp8KUYKNzwVR5Kn/1+fYlLSCLUdHOUo1m55dqBADOdwlZk22gu7wkrs586+X4ibgq7S3YZgrwIVM9G2UVoiGj8KpCs/L0IHmCduFqVML1jALsJkuPlxxgF+eyTx9flyc+/8QuLOmb9o8UCaTy7YKK9xHqOzeZPfMWLzq3Y7/RUBjqetmOlsSM3b3OUJ55vGN7XZtbMd3FXbXvu0srHslakt2ux7WWbMLtxCFAdXbQchSg153JOrPIXq+eyXrmTAQM14iqLDMN1eV2nBrZXVfDsDi9ZJx8xUY/ycCTL6CIK23MJ3e99qHqRs6S6qGr+79sqOO1D72cr79jq61UNMGrgOZF/L5Ng6H0lY6K2PMC3OhQMnr1Y/pRvca7bpfrR2vPkBnn5UerE9a2/Cgrc5q9rid0rvbZ+dYaUW60BAE7RKv4exhZzRb5kLDRmErjudM7HCHK1ezqDBOCV7SAxy6MgfN9Edsxn8HQHy0S32wUBcl3O5hNgDiZow2z/Jg/z/WSEPbBjxHThDZ1XF+/Qg7254giJLxy6B21qQsIoH+YPGKn1G2w4/7C2wbLPgUI7M9xuAKEbZKaajS5ogBkUicKxRWyDWBf1eyrwF90BZCUh8WuecnnKAYSgAy6AsigvLOuBJCSZYLMJl56DxREMLOvd+9cevollNzuTOFAM0gZaJY1HtcDU9b3xntRKVUgf/E59h36DkoppNCKWnjqoRnDzIwBaOZh57uzBMhnGGMAn1OIE/iNupolDhHZXkXrRYfUkW6pFEGYhDuOiwzZRqrf1HEy6jCGp6OO96tJ3xl7ys178hFp724+WqtaHwCCvltY3shtyK4VvOeXOfU94XyBQUxzrE2n7adtGX0bEn0L2XN3thX4oBgi1E3uLGN/O+3ldlKQdLV1ZEILhZjdPvCAzwJOEoI55ewmy3sdL/6Wcbd3HLXxtrAqXvJrd094+kp22qyWb6KVpWCpduqklyWW2UcsFfsnJMSSJhIXTSzD4VVhi/+h1FJuqWNy6SqL/QCj5UtmFDFmXhuj0NPdNxWT4rsPU+o3/wM=7Vxtl5o6EP41ftw9BATx4+rq7t5uW+/a7m375R6EqGzRWMC3/voGSBBIYEFA0dP2nBaGEJLJ5JknmYktqb/YPdjaav4RGdBqiYKxa0n3LVFUOwL+1xPsA4GsioFgZptGIAIHwdj8DYmQvDdbmwZ0YgVdhCzXXMWFOlouoe7GZJpto2282BRZ8a+utBlkBGNds1jpf6bhzkm3ZOEgf4TmbE6/DATyZKHRwkTgzDUDbSMiadCS+jZCbnC12PWh5emO6iV4b5jyNGyYDZdunhemD1pfMvrfhh9+TN6G5u/Ht/XjDRmMjWatSYdJY9091YCN1ksDepUILam3nZsuHK803Xu6xUOOZXN3YeE7gC/ZRpF2bqDtwl1ERBr5ANECuvYeFyFPJaKvfdwQthHtUx3PI5pXFCLUyIjPwpoPSsEXRC98HS3aj9oH63kMXl4fwOQH0OzV8AbIFStpalpWH1nI9t+VprL3F8sd10Y/YeSJ4v/x3kBLNyIP/lSkbjGub4nVt8jTt1ibvpVrVnc7oe02q+6OylF3OEyVq7tds7qnU1HXeeo2lIkiKxVZcUKvoMPqFSgcvYZ4XUav8NMQfdUmv7rP9ndlBQf/rJWXm7ATF61YJWmwSk7FimpdigXqVSi2eXrlqFWx8Gd7hrmJqVf5tfZYi4+TN47P2e5wASCvdoeH+GpG/rdo4eNrcVamDW8w2/IqCSrEnQzqjH8Hi/32UmmFlmHIUDXaPMtQxYkUuo4x+ZpQgZlISTvhAZvI40O14Vqb0Sk0MGcmt8h252iGlpo1OEh7ca0fyjwjtCK6foOuuycLAG3tovhIOK5mu3ceoceCJVpCKhuaXvP9an3dBxWA9lGjh8fJ3n/zqruV6e130gL/5n5HvhXc7cmdoTlzv3cgrJ6uFsQsuHDQ2tZhlrKDcrijM+hmOHEpKOcNRKZV2dDSXHMTX9zwTIS8OkKmP2spe0mQRSAlrCxoKHkrYWhhM0rYHkvFh9DV51j0ZGDlmq6JF4s883zWJnhhGjMpzTJnS3yt4xchtoSeNytNvPS7Iw8WpmEE1gsxPGkTMqml3srrn99judeS7zmmx4545lxi8CBc0JKvtqJrRh5O3Ai3gGrn2EGmRdB06sB6xi/VxZTzDqbjmshIdQxVugANqlMuOVB0FU6muWyhtBcA3XN7AcByrhN4Abgz3Qg847vvFI/x9QGcvRuKzRFMl9piDNVvBdB5B9n9uxG0Taw2DyR8YS5nFLcbMNEAFHl2IwjK4G7IsZvanIrEOpWs2VqdU8lrcc6H54nyeTRwet3/xS8v28+vn0Ycenp3P74grKczpjzWY6hv00UQAQQ6Vs2FfrqLdmLACCd/dOJH2F3KtM8zwykYUWCK1JwKRsUpaQwFkqZ3DlQA50IFfms618UIw83mShhht6PEYKIkStD2iPE36gMNqTxfbKfyRbqbINzRSic2ZzPhnFSyAuqoNm3/ALCDmubIm80mFSUkjxfAJllXEfMsx/uNdtPZJN8Km7uLdfyuFWgVIjknoxZ5TaRyasH3X4ocJ89tkMC6oEPMLha7HZYAV6aimrfDAC9UWmzL3nOPvrtLeEgeicq1v95galXlZpsXka10BUarpgSwfnLFC/wWN57y8Z4cDK1XgKHVHwU6CW9TGhf46dZExn32l49uN58UFgob5QsW1u4Ouwkvloww53WHTEX1RYf4iVpCTRY6bpaFHqytU8jcDqbdyW/bx7M+OSfrazdqYcBL9muMVxwsN2j/1x/GUOps/pCXhnSFaBOChniUQxQLQdTxaKPkRJvKMyXKbUOAc45pkchFJZEU3dIcx9Rb8V0PXg6eqkN+Dt5EldvyaUxFbpSpiOy+6eLL80VFQNNA/7hsFzUe26C7R40IgfL5KRurusYVVJ5NzgvEef9V3C1tHylA5kbq2irMQ6esRU4c0ilWHl8ELah0I6iaXcSaKW+RIN2VUN7k7vIpGW9WlunfDPEmZ4jL57YS1st9QgZk1Iy75yYdR+qBrJQjXPlJDW8Y4wPNZI/XdwgsAfKcI3cdzoDVdgKMXciOvGTa4uMVno9JO1FT53hVPy6cIzknHRdueO/MfC59BZeL0vH7SQ92N3ZBltns6jeAoL0xscYi+3x15iGVWWsfv38nnzKexR8/kRm/8ejpZYBFfbRY4YULCVMURMGDp0/hBpeFgryjBtJJYVA6B+rx44D0SYlYSaYtvnvQixZsCgaySQRPXuS+7Bw6HKZJWXhd1hwKge18k4gXvb9GLqHm5BJKs+ZRbcGkkEt4Zc6T6HwagqGekV/07acHR5yNX7++bGa7/u+tadvc8/TJGbc0EhulsZzMXAgZT9SUMjAzsaJVlJ44HGYp/93JEdGtzFEtlZVNUBH467Ki+SkdObue6tJT9Ne3hWwMv3R//hpt/tXX095wk/H7CuWmeB8XspH3+ZWlLb2p7tradIqBtUBMoOaIEWtimVOmiiNygK7Pqk7QFBLVivEqKokfcQ2IXa9UCSjhsuV9QKELmWsBFCB0q0EUpqKaIaWu81Ek63t/YZiSOWv+YgpPOzl+/YvFlCwYCSnduzAS0sKrgRG5e5vI3j8aSNiqaoaS9LzHclDyETrzS4aPyg6NXBh84NvDb5MGxQ8/8CoN/gA=7VxZc5tIEP41qtpNVVzcx6NlWdns5vBa2UrylEIwEsSI0QK2pf31OwMznIMECARWbFfZ0EAD3T1fH9PDRLzZ7N75xtb+CC3gTgTO2k3E2UQQeJ5T0D9M2ccUXVdjwtp3LHJSSlg4/wFC5Aj10bFAkDsxhNANnW2eaELPA2aYoxm+D5/zp62gm7/r1liDEmFhGm6Z+tWxQjumajKX0v8AztoOkxcmRzYGPZkQAtuw4HOGJN5OxBsfwjDe2uxugIuFR+Xy49Pc//rJ1jlut1j498/399z+bcxs3uSS5BV84IXdshZi1k+G+0jkRd413FMB+vDRswBmwk3E6bPthGCxNUx89BmZDKLZ4cZFezzarPmk5I2egB+CXUZP5MnfAbgBob9Hp5CjItHBc6pCVZdjmp1Rn0q1ZxCzWSesUtGgDSKdBpISO5bUynHdG+hCP7pWXMn4F9GD0IcPIHNEiX7wFdALM/T4p0+ZU8MnI19USzoQqLizOhCUvnQg/XI6kAoqkBjjQGPoINFd5zqQe9bBaiWYJksHlrJUZKVPey8Im2eADi8zhM1LfQlbuVhhK0XLVhjCVljoovUlbPVyhT06WWsMWSsuuu3Ucp5yMlf+fcQBVoS8b4MovLxGJ/DydpceRFtr8t+lJ7fnsrh7P5/fosNfof/gQsNCm9d37ylz9MIx//w9ETl6dkrt0HQsGWiWxDIdTViKiWNakLtxfdmRWDQklWFIAsOQeosI9JKcgYXCf7IL/dCGa+gZ7m1KneY1kZ7zAcItkf9PEIZ7kssYjyHMaycIDT+8xrkJInjQA5Q2d/DTR2wjfcQMeKmVRpHu/P03zO5KprvfyRNEO7MduVe8tyd7lhHY0dvxCXua+AiNMSaAj74JDp1HczrDX4NDDAUSvGH9HLQ1H7hG6Dzl0zeW4ZBL76ATDXYaMQkVXpyyiJ+UXFUwv+Qx2lskzXsz0DYHoWlH1yGZO+H+DdNoPxhLlHrnDM1wnbWHtk10HUD2McVD1UG57TU5sHEsK7ZpgEDNWJLhL063+PWiF5anE3nGMMiadqBXYESSsJObTrI5MQs73nJXvECCx7YqpqfA1SoA/WiPL2mvG5/yPggdaFW6kC6dhQG0FTPOUEwNLFftbaG5v5DV/FjUh/YXfDmmO4PDSKE8C+QZXK+A8jqOBuyc8Bu9D9rOcEZ7KWO8Q/k2d0Y5p1I0n/6djFTTyWhd+5jTjK1cM3rJroCOnW58ga4qOWwQTnMN9HmE/BU9egpWMaqhp5AqPQXOONYgYnNN2S59RsoxpBvpy21oY8syeHm0aUb7tCJxTPV80cAeQB3KA7BxRpG1nI1KfMH24jcqZRnldKVg7CVGfacrrBJjs0oME8UYjq5WmWTE7o9Wvrtwf7hCn9M7nYk70S6TpzyDA2TVSzuwnH686LSBF+2/njecb1VGV8Ljq2vBp5nAYraoGyidNQdsVs6j+V2a033PpnTM/K5r56vXdL4UEsaSf+mVljUChLr1nuD+FZkqkSmJjAZDJqFcyr1IZGqBMG3RrGNkEsS6yNT57MNplsUPqegmpcJOSpemawSBYxbyV9YcuWYC9hz5UpMleVAL0sdlQbQYl2LT5suHxQvKp4QqD9FuaknLlxOpAYx3qklgdRSOLjppUn285OikWKYZPjiprj+/ttAwTGcsLTSFKVF5cDsqN3R+ghYoCR+9Xzgp1Kcru2Qr+mrrex2WcvPqLzXYnLkzV80DAl/Wo8pQY29tuUK5jnuH+w2aazHpRqzqX+xTi2fSFqOV/bzaqtFreu4ZpuogveuQW3uhIXdvhUrgPzlIZJlaUJ8zu50nWe1rPPLg1WehXCNEscs9Dl1u4GYLPUDm3RqiaBpVVMQhF4CiilxWnnhOGKU3G0O9jB45S0VeqFuRF8YFoWK5vZJO1J002tLOxoqE8QJGmy4MPdrKJadfJ2gR69YJaVlnLCOuusrUVdCCzxmmR23ASEYbWyAj1lgcCzyr0E+W6+qqFc/kW73EAxFOISNXlKkwnzdWydFxlJE4a3EmpZ3a8MUV0LjQZ1O330uVD/Ppud1LZC3f7aTfhvR5Yc26hodxIfSN1Qphc4OZzp5nKmpaXjKUTp6Z4K54ni4p6brRiyuwFfIs+pu4EGusSi4jzSFwSdD6KLgkiH/R4MJL4lVH+MJg1TfEVPcFngYxH0Fgv2hYoePmFVaY4qkur5WjykOUG2xCODxFeu0y2qw7hXT+1JCnWjoWfeq9RZ+sVruC7I1gG3/gaeXssAqmW+A76PZ4+M3QPZxtAO5S0sHRmnyLKbc0gSs7BklQ1ekU0V2MAVPDfFhHNpCf2EA/Be3hNQwBsgzHW3+JElPsjJxN9LEp+n/mbNZIWq6zRH+d5Qb9XbrQfDBtw/HQjokhBUGVEYIfKI+1oY8CpKvgad2jZfCFAczTBDBjGhLDMnr7hIk0SI2uxdoZjpM5sGINb47jZ9ObYrZ0fA3MaY11k1y/lXqs4aqZQXW35FKuGUmd6JhKX0JqveCGO8KoIjxCxmTsM6eRMKDygfnCAwsk9UrHUsyxUx8qleurdCEOCjFWzho9MPfbbrb4fYg4qaV3pIJ8XXcaSUN8xdPelkL0jWJq6ZN6UksU04voUmDUEYoVH5jep18UK5cyLwDFaCH+FcWwNGp8RvCEanWCMEcLSglKXVhBKV8dkLR2MKPKh/n0XEqSqhcno9zMa19KopPOaOQhA8HPUV20ju80+uqS1Nny5HNWl/pYrfzznx+fH6af/3R1+2/jL9GfP6y2jM/3fsFtBUV9Hus1IECUBRFC6rLDoBnm1C8OyIW6kcT4ECSrONBFSwFTK2VH/waPQdvBHt7xTPcRfztc4L5dyZyOLRm96kRA9+IeQBTGeBb6u0Syc0HwqymzEALyarnSw/6qZ3Ntot30O+fxkEy/Fi/e/g8=7Vxtj6JIEP41JneX7IR38OOoM3eb7F4m51529tMGocW+QdqFdkb31183dINAo6i0GGedZJQCiu6q6qdeKBjo4+Xmz9hdLT4jH4QDTfE3A30y0DRVVSzyRSnbjDIc2hkhiKHPDioIU/gTMKLCqGvog6R0IEYoxHBVJnooioCHSzQ3jtFb+bA5CstXXbkBqBGmnhvWqV+hjxcZ1TGVgv4XgMEC5xNme5YuP5gRkoXro7cdkv4w0McxQjj7tdyMQUiFx+Xy9d9/Hj4+L42H75Ppzx/OkzOZrT9kzB6POSWfQgwi3C1rLWP96oZrJi82V7zlAozROvIBZaIM9NHbAmIwXbke3ftGTIbQFngZki2V/GTsQIzBpiL/A4NXc4kSUwRoCXC8JecxLjrTwVuhQntoZrTFjvpsrj2XmU2QsypEQ34w6RwhKb1jSc1hGI5RiOL0XH1u0j9CT3CMXsDOHiv90DNQhHfo2UemzLnhb/OlUVOCxuW9qwTVsSUpwXh3SjDKOtANwUJwBDrIlde5DkzJOpjPNc8T6cC3ZpZpyTR4QysbvEDYqikyeEOWsK2bFbZVtWxLIGxLIGzNkSVs+3aFfXWydgSytkJy2ZEPX0syt36saYSVIu+HJI0v78kBqrnaFDvJr4B9h/zg07lMgRcDev4EJh4i6qCinIL4FRJNa8r900d+HTL37FLlyxNyOg1O7dCKfBM4viGyIkeb6bmPmrKrKdLAsmpTtsCmNIFNWbJMSlUbbeo8c/iYYIj8RpV3qVwXOHMhRFieA2Zzrtwsy1INmc7QtMvO0Oldv3U4Bj7J9dgmivECBShyw4eCOiprozjmE0IrpoP/AMZbJlJ3jVFZQ0SG8faZnZ9ufKMbdybfnGx2d062bCvBbozvaQJLCBGKAKc9Qjrt9Jjjl7bvJot0NqrAFLLTeJar0SFtIH7mEyG/s6FrJtsshk43+MgzKVPRnmBPRD1oHXtgz3EaS6WIMAKwj6EjNtAYhC6Gr+XRdW9t9YTvEWBvQWeYuodEaI2f3BkIyxbkhjCIyG+PiA8QhY7oYoWeG96zHUvo+5mxAgJF7owBtz5aIRjhdGLmaGBOjln9rJzCmA3yIsZhLWp7YeGDcqcObasEDWxdttYLY/5EZ1dw5subMc0VwDmg+TyhTrmi13yIZ6halFYe6TiMRsdBA4YApGzuOdtZLIgY+vQqsryIc3VBgtmHE5HjDHLXlPupdq7pRCfCHcVVOBF1eCEnIgYry3RKhm2oFYPNJsDO2q3KVhgZlRVSY5RNsMaoM+wTVRiOS8SE0Ffzla0So9vwoOYhD0rLc2Vn140LzdEt96HOxXyoqHjSgR3JccSjIxyx/Iy+P/dsXV8S31wYOs8ENpNp21jrolll4a2/7e5rcN25L77j7vfbrve9blecYURvCd3wmhHqIXpF21/I1IhMRu/lJz6xCwOFsIKzP/bupGTlhW6SQK+SqIhuazgeEN/WmDmmYZ6NQJ3H7vk9a2ZapmaeFrtXbbTGSHLszgsvO4i2/PJpeptRtbYfL2hdSnPKdSnuwU61F/lRsybqKrk6n3RM2eqWfVI1Ve/fJdV7Mf5GPqjJm8gBDyoFqcYGl4aWmPZoIdJnWeNVtLh0U41dVqSgr8kWqFFaR41Wr8E80fuNx2sxbyRoaj2QqcULaUu3e9ZWizaRS5eUm4O1s0KvNjfzWL5+OPkbnhnMnac0WWWFoivkIrdyWgfb8jMys/dakVbP6MfrBKMloY3v02BzuUIRYNH3kVhatNM03Ji5ASy1zLoKRQ2O0sCU93X3k1SXq2h8T09dEW2raFqvQKrXm6x4cX18zmor+psawv0bWG1Dre/VVi8YvOPQRW/bh5Rl6b2tuOYaQVehCz2mn9aUHuMZ59rCGb3F0y0g8ittJKVmjlbxTLnDQ98T4VTycssaaY+P3ddzdyQugkNOO7dlo1L2HZ5Y9bXN/XwkF3110fM3ndwj94nyIKYKWYVuRHEBx+58TrD5iPumkuvMjWBwcl05W3WNKKHcqSpvLO+6WUOpsNXKLOSVnfUWjxXVkWYfuORofRBccsS/aXBRDf2uI3wRsJINMc29POdBzGeQLN4LrFjvEFaai2z1qHIfJS3e0PCU6LXLaLPts1OXTw1VrqVD0edQWvQpao+pyN5NVtkrGuZwQ1UwWoEYksvT5Tch14CrBDwVpL2rNX+bQqkjWak7BkOz7dGI0EOKASPXewlSGyjf3iCfivZo63JCLANGwZc0MaXOCC7T10Xw7wlcBkRaIZyR/3C2JP9nIfJevIULI7LhUUghUOVi8J3ksQsUkwDpLnkNJFqGWlnAKk8Ad0zDEFiGtGeQjV5qdCe0zCuKqYC5aHkrijoZjavZ0hGt79fUbsfSxINli4Y25HO7Z6rPbZ/c+a4cYNQQ5RCbcLc7hzFv3jhgtTJgHvIVSyLj2KkrNOplUt4RT0KKOQzIgJXfNpPp732EO/K7dQxlsA/j5D9FJj/a4S+L+AWL8mGxcxSzK++20W3jRBQbitGlaxSrDvgyKNb8DOz7QDHt9lFMbtE5R5iDdaEcpW6sLlRO8o3KA1nti0L7+UiuCBnNRWeSYkWnV4T4vWOyRImB0HE0156zK91Skci4ntpzpcIoEXDqxcU/qLoXkDoTGHnhmr4vUlOe70xlSKVLBD7QyMWUF5B6zMgn/2ckjgpB/R0Mh1oNGIDtgg8jddlg0GgqZ9YGzEq0odr12oD4RU7S3gan1NT563m9Hp/X6zxSrqbPRvWVYG1dWI2R3i5SPgw8ZLN4C2t2ePEuW/3hfw==7Vxbc5s6EP41ntPTGWcw4mIeHSdu0pOmmdI2bV86CghbDQYX5Ft//ZFswEaSbeKAg+PGDzELrMXutxftSjRAdzh7F8HR4EPoIr+hKu6sAS4aqtpqKQb9xyjzJcWyzCWhH2E3uWhFsPEflBCVhDrGLopzF5Iw9Ake5YlOGATIITkajKJwmr/MC/38r45gHwkE24G+SL3HLhksqW1dWdGvEO4PSPbAyZkhTC9OCPEAuuF0jQQuG6AbhSFZfhvOushnwkvlcvHh/IrYj6POsPvl1p/c3Q2nqLlk1nvKLdkjRCgge7P2Oj+69q01appXXTDoRp5v/2zqS9YT6I8TeaFgEs6TBybzVIpROA5cxDi1GuB8OsAE2SPosLNTihtKG5Chn5z2sO93Qz+MFvcCF6K251B6TKLwEa2dMZw2evDomYIPmAhigiKCZmvqTR74HQqHiER09Epytp2oLsGuaSZgnq6Q0ErVO1hDgZHQYAK+fsZ5JWD6JZHxE+RtCPLGMcFhk6KQPvkrkbpq1k3spiB2++6617uktPswevRD6NKvl4E7CvHr0YNm1U0PbTn83dcicaN2ErcEiQuyRoHbYYGWHgVhgPKypQKK5t/ogXKmp4ff189dsEdXsqN5ejTDZO02evR97czqJnaQ3uPCeJDpnB3cQUJQFCwoqqJmKk0DuSoq39PZR6Z8ACyr13uy8uNwHDlodwglMOojsuW6RMPIzeUrIpTWoKJLkJLSIuRDgif5LEcGn+QX7paeLUVqS+OQmuZ5KYvlc99l/rC3gZGm5xnpJsdoKRiB0QLN2WPvD/A0zzwKhIvw3QzTAtZQMpKNgkhWa4VkoFpneh7LrT2xLLKinvmwaE5DzvGgeQ27imL2imJXNIWS0WwWRDOoFZp1rTQ0i6wOjmb1VaBZhOrhfXO7IJq1WqHZLA/NIquDo1kslmxDs+PDOMZOHtBPQ0VNtAhAPsVTTWtPj6TvYFS1BsXySw8RZ0BJ3850xWraX68pUXEg48WrlirjBj4gP69R6OM+s3uHahRRz3HOZouYcugkJ4bYdRmP8wjF+A98WPBjDmdRa1g8uH7e0C+k6NgORH5emlVRk19prBcqZfPVpnKmWZqZV0opmNHU/B2h58WoGqWKRYXTMEvemoDOsSic9oIdjKo2S7FI8QmRMQ2niV0yYFOss38DiCmZ/g4dsuFTgZ4/RPRbn30bRXgCCaLnHtF8eREMXBY1o3HM7n4YB9QU4zqbdgrmZ5u2ckan5vmAmeL3mbhLw27CNfVG1Vs6ENPJr1RH7lLnhOZowdvTMH2TL7pY7f1Mn2dkqByjDaZPRQrna5clgN884LZ8wBvH1ZaPa4Wo5QjKxRcQ8PWWB9g/MUNLhAMHj6C/UDhh+QMZsEsCOETxok6dup4YRRO8PHaccCxpLlA7J3lI5mciyaxovc6ZkIr7J1kBfVViV6rswAmKFCvimsRQ1KoK4kDM4s/H2Ge6uly2Ppmn9XBfEiZOwZUY2p7JvcCoYHL/XFeSDriwKzFz11fjSlJIn1xSyueSuq7tBye+QSEwqjgp1cRKrH1hU8In6uHDIEaiPmuTRWboKyGL1LScGtLi/3NhwimzuqRRE4O6oLl4ioc+XMTUmMKKJKuYlC3tRS8MSK5rw/5kVcRLg32EKiKosu/Pt/aEiGsBiQfIHOVzYu5H/Ht687M5wlYPamTw6OmDb81KnOFWbdfEG2aLyRJVaHxaU9QbqtYORuV5Q6kGRWfYSZzh7zGKxaT2RXzhVuz9rZWx5FcsgJ6IJZotLgsEnND3tUSBUcV5CRDXsl2zNVRF5i+1SVAyHJZhlYpmqaXmKAeoWRdISl6nHfIREbSztt5zLVHCqurCtSao0a5fWNyOwL9xkUlDdKodQhYaXDQDqVJOwzx1Lky2VLCfcQqMdI5RSdUgXZMPePOiGPm4yqoGmaY9A7furel1bv78mng/RtOxZP7Thc6iYOxE8xFhsXsIqeVj6JdRdTwGoNEpaN55q3tHAY6RXiwGPBVoQJUPePNaQvm4Ki07qiLS0uI2X7xSvCgcLtzbAoknArp8oqilyfTTQccxMjhGpYFOPuDNoJOPq9q22cmWd1SrlRO3ruzpxMzWDkZVTyrF+k66MEqYVCpvOhf2v6KGa5PagiMo+aQzh2xlxsFSXVVMdT/f2CvZ4DAQdFvXFvX2gn/hermuW3mvmXYuX6pHrYplOhstlhP812YW+P7+cyNZjyDoKi+zp+2c8zxPdaQ751zjwdCNKpUA+BUiqUtc3zrXkmgBVKUFUMnCwuOIa1yN0+DcU+EmrrKDUdVxTVxZ2DmWJi4ocSngETVxpRNoSS+e9WnfiGnIc9yfq6O2q8ncX1t9AEal7k/l6yWpO1yzeFNi8fx2in2832yG7q9a3tf798NJ01J+eV/u0en2bPlOUWvfnbFCCYxnVJ7zk2pwc05f080OW3FY42Q+CzOlOkKpNMSGw4lYpWBMJpf2FZ9q72BUsVWKy09373U47o0OW4Fc430OTTXP9nAbHcTYW5O3MpWU7HA9GFVMdQ77khRVkHet3spUVorJb+N8abGLiyBkb2Xq3F2/GhXwL0t5cRWI+UT6QibljYtjJ6TPNt82yzou+RsvKH9pGJQU+l7ZxNZU8yK3RJEfdF4rOp1TXemR709TzeRZFJ/mcox0jlFpKz3kA9680kM+rj17ofRw9arP5eWrF6aCy/8B1Vldc9o6EP01fqTjb+xHPtPMbRpamsntfekISwY1xvLIAkx/fSUsGxu5IeFioMMD3rW0Hp2ze7wLmjVYZncUJIsHAlGkmTrMNGuomaZh6C7/Ep5t7vH9bu6YUwzlor1jin8h6dSld4UhSmsLGSERw0ndGZA4RgGr+QClZFNfFpKo/tQEzJHimAYgUr3PGLJF7vUcfe//iPB8wcoDyztLUCyWjnQBINlUXNZIswaUEJZfLbMBigR4BS7d5FGHQ5g8efpL8KQ//OiMZ5082Pg9W8ojUBSzk0O/fJ78/O/LP6vt4vGZ/XQehrOnYadgaQ2ilQRMHpZtCwQ3C8zQNAGBsDc8SzSrv2DLiFsGvwRpkvMW4gzxZ/VDHEUDEhHKfTGJ+a5+yih5KfE3S0+xTDMtXXfdwYDfeeNhJShrRBnKKlTLw98hskSMbvkSedcsKJeJXJibfVbYXelbVDKidAKZifMy9B5tfiEBbwb/7sv2IfrR+2g8fqXjb6NBsCbDjoo9itdkqxBAySqGAtod3kfoqKLPYYUAeWHQBLgbeGgWNpLTFgW26dQo8FUKDLOBArctBgyFgfuUYcJdvbk4+5+Z0N/NxMgVn4vibZn1lC9FuQK41QR4KYJnR9xUEJ8OpwrOPBB/P6DjGLeGnH2AnKsi5zeJRVu4WQpumcBNTyjJVMW4On7OYeZdGz9bzbvJ/Xg84r5nQl8iAqCo+sn9WdU3DEMzaFRf6M5c59JqUFdfy7u2/DoKKVjILzyr7kIHedBu4sAzZ5Z7WQ5sXz/OgdOWIje2gGoXAnEaEH6mv0BXmvBrS1eaG2gFPRW0GPbEIMOtIAJpinNBAJSp7gqEKMPsX5HxHxxpfZf5L66HWdXYSuN9sKdkRQN0/GWNYG3EUsmpgN+Uu4WPoggwvK4PZk2MyCdMCBY9WKlffl2/zMOSyM8jd1UnoYNAtl0PVBZcEYhzM0dMCbRLkPLYp+eMp76LmnogjvonMOPjeH3civA8FjnDKUZcyPqieDCfd3vyxhJDKGL0KUrxLzDbxRPJkYjz7E7o9DVn2Jgur+b4YZWWQ7t8iFadi5uqV/9g2ZZfw77IsVNzo1hCwjBFrbCldqzXrHDOFN3mm7pOYX8vAgpjv29ntSQNsh/NK+WVdfpNSYhj/KEtfa+EON16oEtLiK8kZe+mJaQoojNIiOG51l8mIerw8TYJOaoVxhW6gaMlb99Uydt6vVLdk7uGw98PL1zyhtpqzr9OBqKEgNh9s7VfZP//rv0OL/5bL3X3bcLcQrVXOoNaY6BfvTF4bey8UZVw/HOpxOHkebJKcHP/X1O+fP+PnTX6DQ==7VzdcqM4E30aV+1cjIt/25exk+yXquxWKtmvkrmUQbZVwcgLcn7m6VcCCZCEbSBgU7szNxMJIdDpVnefVuORvdh+/B6D3eYPHMBwZBnBx8i+HlmWaRoe/Y/1fGY9s9kk61jHKOCDio4n9BPyToP37lEAE2kgwTgkaCd3+jiKoE+kPhDH+F0etsKh/NQdWEOt48kHod77jAKyyXqnrlH0/w+i9YbkC+ZXtkAM5h3JBgT4vdRl34zsRYwxyf7afixgyMDLcZm9TMLNdPdG/vzxsL93bp9f5t+zyW6b3JIvIYYR6XZqK5v6DYR7jleAtwBFYwITwpdNPgWWyTvahiCirfkKheEChzhOr9iBC6eBQ/sTEuNXWLoytZa259ErNRfBF/sGYwI/SiLki/od4i0k8Scdwq86Uy4frqC5vN4LcdtC3JuSqO0Z7wRcxdb53AWM9A+OZANUbQ3Vp4e7xxva9QRjujIN2BjvowCyKQ2K1PsGEfi0Az67+k63Je3bkC19hWuzT+itptCbpgy9qyNvWhXIe30B71QBf3tLkffAlsEYLZNdCogX0neZL6kovDX7a04FQKVhGTdRsMMo0pW/kYz6QtyVAc+NVQnxWQXgTl+AuxrgdwlBmJlPmGw0DOkqiQyUrLQR1owL7wIhWke06VNA6Qay5wwzRM38Fb+wRQETYLVkZNn1JZwc+k/F6pSkU2WIrL6k42nS0RW/XQ+zY8hn+2V+ZKOYjY0ZNKk5m1QZs5k3sUGfxsxW3Miswpg5FdKb9iW9iSa9R7hGFBhAd1iUmioSIxpbfUkAvW0GQ8bT0eGcVjnlvtCcHvTJV2v4VXM/HJeswm567oV98kwD/hnHryEGAcP+4W6YjtZUo0rzwvorni8ZcwU5GAVXjDIxPxmCJEG+DBb8QOSl9PcPhvHY5a3rDw552vgUDWpkPl+Kgaz5Q8zBGsVtaUvc10wwCd7HPjy2fD4QBhLd0+V3wteKvhiG1Iy+ySSxSmj8CQ9ZTJhvM9eVXb3pylNkC3rII8nbQxNZ8kRaqEZAvIZEm4jKGXyWhqUxa3LkhZXn2LxdqGQ2Y6GgOaZf0FlB5gqdvYXE36T3Ub1AhC3hN5/uSjr/K2StkTVnKYJ4z/insUzD8uSbrupUD+7BEoayhtcPE2OYoJ9gmc7H9JUDSCd35yP3ugFhFRtTtS15ToM/ZVROG1TZHGPsTN2JJCZBJduqqxiCV6sEkpFqhLqQsZ5CQIwCfAeVbpXOiHbJoTj9HIZdKKXYCBUU6pjh6N6w69mCm+gNfw4POscYGHI63W/uElu6N+FKC/f5o+w9K11pxy6RR7SZgzgyzh6Y51QCVFU96npOW410jXqeszPDp+c+4AfY7kI4xvFaU8QjuVMApyu/ii14/hQuVz3mTi0lyO0kdcqf9sg8X7ROk2r1H5dLUUqQ2PLTQEidewQInDOqkPQiXT130mcOtzsVcJvqgJrEnVYI5ayE0dQTHz1S9Qsir2SczMnFkdeTJBfg6i1U2DhtWM5L1vWkR/eZ16sj0mieeV2toOdXboRgMlsaDeKXFvLzFPldPPcqFOhCrKYFhBMZwosH55bO/c9Da5pjZzmegp15Yex0Tq3DVjPXJ1jIAHJ9QjInmY3YfSepjXBYA+E2jnnAjjXlNo5qEHvKCpoqpT9HVtDSEx7/vqxgvoX/m1lBS0/NXPDwsrlLULypd+F40tJzDfUcQkINBamZACtb/JID6MbmnzTlQn0HYsrVXIVtee1MuapKWszac5rK0hMZ/54SnRaxnlqiUy/U661Ex6pMdgy4ZK0N5HJ47eoVCWetWbP0LEc9YzrsY4Pa0bXIr52Orr1BmWRbja4ta9z61F2tkpmoU/VtlqsyRC0pXrtyDtndW135+7pK6NVUQmdQOmh5qv9oe3qlKLN95tMrEapWkDDqX/7es48a5tK3AKJT42G63g6HhYld1gELM+UCGqHpw+Vgtp6B48njK7ahIuFLD51S4og88StMCP4GhcE9+MR7hjglFv6raM03OEY/6XhQxIyUd/CPgSxPGvHE7uRzpoKGD0KMptL1B/iQBt6DhIi3wWEIdgnKxMdu3NINg6I5JgRv+aD+UtxNYyCFBFR9KDGpsFqTaU8xkK1nGJMdogCN7Cs2MzUG5bPs2yRTnO9A0xjOF5qShBCuSIUJIJhFpQkNUlG0vk/HXDtFzyOHhnVheu8qTL3jhhoOGKUkggBy0HDMKcIL5iCpCbEWtG0W7dSq7HBMFjhiWQqUyh9ShXtn1q+2Ztj1VUOEMV49XVAJaHe6UJWPC2AMqN1/RuydmL049l3YLx34og641qV1oFkpFRdkAJJNnqoreXrW/wAIK9JIeyzDztVDfIRptWRDJebluDOJe42NyeQU/2KtBxgjChuLQfoIbUXu83Rh1rAqs9RTWHOqeJ/a7EqJbU1bmajv2LZttrSr4zNzZPeRSq2pf3WZlShSGIj6OUo5jKtm02sfnimfbjhnzrjaesZ1+9f9U4rQ3/sq/zkcwmQfyu61IUze1OnGMCl39EiZKrKxWeQ7/29Spu6+x2xMmdzp0DiTnjdWOJMUI+ecafkrXj5hdCa1dWMwnElP31ZwpmP14L904Is6cHHO5Ogp1GNh5jA4k+sowenMswbAmeoeSdnDilkn6pGUWppdN2adWNOxYUtzmaYxNgxzlv877wGVo2ePxRH0xhRH0LfMjvBe+pTigugM0JvoMr/R2eRvKBL614qfOpANO8pWKrzYdWoF1auZn02y/rTeLYbpVJkFDlL5k8249GqlF6l4t8OF1ydvtb7x5eRF2QZKsnB5hdb7OH0bFGlrL96X2mf+wqMahy/9r8iWVzTvbEWSO1RX1OJFv3Crw9b4vIFRuri8sFtXSF6RiEq6KAsbRIEK16JQTGbN2uh2Hdz4o09qzCIfyl9quae0JH8wr8rOxZsW6CWpiP1wH2SyzIZuwW6XtZeQ+vcUPX1V/FnyXj3LXnR1zRWr24LX7OVGJRpuEKzKk4G1o9SJST1dKWCLfBm7xoxhxMKuFQ28CMwQI3RtpXH/f7xjE179KcmlLksQ48+ClafbLYEVjUxj4OuKW6Gah06IMx3YJzDJZ/FxTHHd4UiolHKInAmDuhsUZPBmt+1gallKyI8PlUb1CdeEwXUXwG0a38HdKK1qygxjwCI7YRKrtG+RYpEt+/S2loHpQxmqqUiLkj9OL2iHW7T+YlzkmhXxV9ecVROQNtV/TVMKSobRq/p9LrsiWtSKM7rjEPo57DEOcbwWrWHauU2Ku+Ow364Z9osqyYGE/a76Q29m21S1Whys5rwbf+fROQ04/LFGU0umK/Zg0t75Nuwg7W2LcxVRKGR1o3Pyz5ZY8v1tUuK0Wfx4bTa8+Alg++Yf7Vzbkto4EP0aHkPZkm88DjPDbrZILRu2KsmjwAJcYyxii8Dk61eyJYMkYy6DMbNJXoLasmxOH7X6tMR04ONy+0eKVotPJMRxB1jhtgOfOgDYtuWx/7jltbD0en5hmKdRKDrtDOPoJxZGS1jXUYgzpSMlJKbRSjVOSZLgKVVsKE3JRu02I7H61BWaY8MwnqLYtH6JQroorIFr7ex/4mi+oOUXFleWSHYWhmyBQrLZM8HnDnxMCaHFp+X2EcccPInLZvh3SJaf/hmmwVNvNB8NX/7650Mx2OCcW8qvkOKEXndoUAz9A8VrgRfrg6KkS3FGxdemrxLLbBMtY5SwVn8WxfEjiUmaX4Ghi4PQYfaMpuQF710JwAR6HrsinoRTireaa458L7sEm7EUkyWm6Su7T4ziBMI/gqClvzY7d0Pp7sWeq2FPGJGg2Lwcewcj+yCQPANVaKA6Hn38/MxMY5wyBAxgU7JOQsyHtBhSm0VE8XiFpvzqhk1LZlvQJXuFJ/vG0INa6G1bhd5xDeRtUIG81xTwThXwgwFD3kNLDmMyyVY5IF7M3qU/Ya7w5vxTnzmAeQNYz0m4IlFikv8sHzWFuKsCXgarPcR7FYA7TQHuGYCb0F5m4TMlmnKP9GtcYZ89XbDNJoxfNV16ng9Rk9MFaoGq55jTxanwXtCU93zDe5/xPGLAIBqRJJ8MNI3Y6v0mBzQFJ7C06GPOhaAq7DeFZnAw6j/M8VsDyv0EfR1222s76vcM4L+Q9CUmKOTYjz52+NsM+Or7NJaWewzutp7J2C0zWj5fCe8acjgJH3iazlrTGGVZNFXBwtuIft37/I1j3HVF62krIM8br7LBws7r111H3vwmx+CN3W15S95XvBwODUVwilvYtyLrdIrr8IDV/tvzT1WiKW0pjllg/aG+XJXTxBNGRR5S0sOzFXpA4KpDFO8/KrOXwYGBgLxR8kxPDyhK55gaAzE/o9e9bnmelB1+Yf05ECoKhX0oRtwRtMT0DZy1Dc4OMJ0uuMRkMzbj75/wwDDJMz7epgs2/ecLGSB0gjM6DdGEyWKF1yiO5gknPaMWy+hhn8eDiOnOB3FhGYU8o4T9FGfRTzTJx+MsFbCxwd1+x32qCyhCFIubO6W0OS6ZrE5dpLG6TuD6inOkaLmUpHIY21dvIbNZhqnm9+t42pRZz8kP8mp6MI6jVYbbC+yOGtehZa6YdXHj+nHd1Ennx/ULY7RcD3ZrwLf9JaByPWg6rovMrQh7Nf1aDf+G9tPpcWr4h3oCZ50W/q82b12DfXjL5HmMuySdG0SsKTohHMymVUmwNw3wZNZg0QlomdpVak7iaZ95xE/meTXi9MeVXtx/HIDq01DM1qoEUdzn+W7WiHfNkkCTxa9GKeDWckCvfgUVTrmpDrJNPd+gAm0Rea2QYvutI29q/3uVoMdIbR0PNbfVoKa6v36J8aHGG+eXGGcz7E0rp0bo9yaW1aj/PM1/rRcZgSnIWkvTj4AHYKCA13qiLvPyaxRgZFZ9jwUYmRIezdTLLeRjqXoRkVsr1fjqLLy4VOPo07mhUo2tS9RblGqAKeB/wVINqK8dNFiq6al3NFepAWa94b42mo6sC9ps9lpOiUBlnv++trmPrcTy2JGA3DUX4pvucwMzwT9/IX4HFbOTF2KpNY8vxF6bCzG0dTkDuhfvmuj7nr4+VMOFM1AlhS7MBi/bjtunrdUF7hHmXszCU0nonUhCp00OAi2HgxcXbjUywxsXbuWCVpGvsfXl+5ofhOwr5welkfsoXXOLyOVM3r7L1K1Xu4hZXVvdAJXEvpRQzWdq0BTrokrywGdyItfSQwV6ktCxuMKdMF1EcThEr2TNYcwomr7IVn9B0ugn64+kr9nllIoDxMBTeoz5nWLM3NF4JH1ja6ZPaKt0HKKMyrchcYxWWVT4md+4ZBMmSvqEUrIUnW5ay6mlj5Z1umbWWcrM/ajlBw3lQNAsRmSriAHUgQ95cXOgbOMMsoI4H5DBGAYUNSBNiLHHI0wyCMR4RitCACU8K81Ykhol82He58nZWT4LaLiJsHtncb46LljgwAmnCaGIHgwcfYbwI18gWQgBj6xt79p5VFmRlD6ShGuZKHc2ZoTb8Oj3FmbAA9SQaYx3Ghdkv+tzoUq6h5jpOZx9ifg78XhRd5b8NwfeyAEXtM2B804RCEeGKFuUgn5vpef2EaJ8fzK3AAuW9JA/3AAXqqE95eW4PUV7dS3fP6a/eGuE04jBxnOQm6S2smRy/ExCy2fSNFEUaKvPyepKy21tqA3UdG5rHkqoI/P1K+12R9FWbUurU5WVfSAFvg39HG0n2NVrbifX2bWjt46+HdY0/cxTE8t/h+Mcoe/rqvXzXQomWF/1Y4LJC5zrBCbtjgYlU0U1tsh8+7+mZGr0Fzb1kskN7k0zmXVjTTMpOXKpmSa/8+Xz82W/Nl9uXzOZ5dsKzVR3FPI3B97IgdY1k2OWUOvSzPvQTK6jJac9D9yjZjp1Swq2mrP6+paUfirx1JzVB0HXgspYtm11Lcvulf9uu0HlmBXCOnbX75KeKYguEV9NExKeSEhwIGrdhpBuTyVkuc10tojSKtbGCYizDytdnaCHTxztNquUFfh/voNVzNgaQQbl+Vu5hQWuwzn1p4xAvf8Sscaauz/FUnTf/UEb+Pwf \ No newline at end of file diff --git a/POC/deploy-all.sh b/POC/deploy-all.sh new file mode 100755 index 00000000..13bf6c43 --- /dev/null +++ b/POC/deploy-all.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +./create-namespaces.sh +kubectl apply -f ./configmaps.yaml + +(cd spire ; ./deploy-spire.sh) + +(cd istio ; ./deploy-istio.sh) +(cd bookinfo ; ./deploy-bookinfo.sh) diff --git a/POC/doc/aws-policies/apply-ecr-policy.sh b/POC/doc/aws-policies/apply-ecr-policy.sh new file mode 100755 index 00000000..927bcd12 --- /dev/null +++ b/POC/doc/aws-policies/apply-ecr-policy.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +declare -a image_names=("install-cni" "operator" + "istioctl" "app_sidecar_centos_7" + "app_sidecar_centos_8" "app_sidecar_debian_10" "app_sidecar_debian_9" + "app_sidecar_ubuntu_focal" "app_sidecar_ubuntu_bionic" "app_sidecar_ubuntu_xenial" + "app" "proxyv2" "pilot" + ) + +for image_name in "${image_names[@]}" +do + aws ecr set-repository-policy --repository-name mithril/$image_name --policy-text file://ecr-policy.json +done + + \ No newline at end of file diff --git a/POC/doc/aws-policies/apply-s3-policy.sh b/POC/doc/aws-policies/apply-s3-policy.sh new file mode 100755 index 00000000..630e570e --- /dev/null +++ b/POC/doc/aws-policies/apply-s3-policy.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +aws s3api put-bucket-policy --bucket mithril-customer-assets --policy file://s3-policy.json \ No newline at end of file diff --git a/POC/doc/aws-policies/aws-policies.md b/POC/doc/aws-policies/aws-policies.md new file mode 100644 index 00000000..c005f374 --- /dev/null +++ b/POC/doc/aws-policies/aws-policies.md @@ -0,0 +1,63 @@ +# AWS policies + +You manage access in AWS by creating policies and attaching them to IAM identities (users, groups of users, or roles) or AWS resources. A policy is an object in AWS that, when associated with an identity or resource, defines their permissions. + +There are many types of AWS policies, one of them is **resource-based policies**. They are inline policies attached to resources. + +Policies usually contain a "**Version**" and one or more **Statement**. +As a best practice, always use the latest 2012-10-17 version, and the content of "Statement" depends on the policy type. + +Resource-based policies contain four main elements in their Statement: +- **"Effect"**: Use Allow or Deny to indicate whether the policy allows or denies access. +- **"Action"**: Include a list of actions that the policy allows or denies, and they are related to the resource. +- **"Resource"**: Specify a list of resources to which the actions apply. If you do not include this element, then the resource to which the action applies is the resource to which the policy is attached. +- **"Principal"**: Indicate the account, user, role, or federated user to which you would like to allow or deny access. + +There is also **"Sid"**, which is optional, and is responsible for providing a statement ID to differentiate between statements. + +``` +{ + "Version": "2012-10-17", + "Statement": [{ + "Sid": "FirstStatement", + "Effect": "Allow", + "Action": ["ResourceActionOne", "ResourceActionTwo"], + "Resource": ["ResourceOne", "ResourceTwo"], + "Principal": ["IAMIdentityOne", "IAMIdentityTwo"] + }] +} +``` + +Since groups are not considered IAM principals, it is not possible to share resources with them. + +## Applying resource-based policies for S3 + +[Our bucket](https://s3.console.aws.amazon.com/s3/buckets/mithril-customer-assets?region=us-east-1&tab=objects) name is `mithril-customer-assets` and it is located at region `us-east-1`. + +In order to give reading access to our files for a user outside scytale-dev AWS account, we need to allow the actions `S3:GetObject` and `s3:ListBucket`. `s3:GetObject` needs an object as a resource, and `s3:ListBucket` needs a bucket, so, respectively, the resources are going to be `s3://mithril-customer-assets/*` and `s3://mithril-customer-assets`. + +A sample policy for sharing our S3 with an IAM User is available at `s3-policy.json` and it can be applied using the [apply-s3-policy.sh](apply-s3-policy.sh) script. + +To check the current policies: +``` +aws s3api get-bucket-policy --bucket mithril-customer-assets +``` + +## Applying resource-based policies for ECR + +Amazon ECR repository policies are a subset of IAM policies that are scoped for, and specifically used for, controlling access to individual Amazon ECR repositories. + +[Our images](https://console.aws.amazon.com/ecr/repositories?region=us-east-1) are at us-east-1, under multiple repositories, with prefix name `mithril`. + +In order to give reading access to our images for a user outside scytale-dev AWS account, we need to allow the actions `ecr:BatchGetImage` and `ecr:GetDownloadUrlForLayer`. Since the resource-based policy is applied to a specific repository, it is not necessary to add a resource. + +A sample policy for sharing our ECR with an IAM Account is available at `ecr-policy.json` and it can be applied using the [apply-ecr-policy.sh](apply-ecr-policy.sh) script. + +To check the current policies: +``` +aws ecr get-repository-policy --repository-name mithril/app +``` + +## Attention + +When applying the policies, make sure to preserve the past configuration, since it will be overrided! diff --git a/POC/doc/aws-policies/ecr-lifecycle.json b/POC/doc/aws-policies/ecr-lifecycle.json new file mode 100644 index 00000000..b36f5b7c --- /dev/null +++ b/POC/doc/aws-policies/ecr-lifecycle.json @@ -0,0 +1,47 @@ +{ + "rules": [ + { + "rulePriority": 10, + "description": "Keep latest image", + "selection": { + "tagStatus": "tagged", + "tagPrefixList": [ + "latest" + ], + "countType": "imageCountMoreThan", + "countNumber": 9999 + }, + "action": { + "type": "expire" + } + }, + { + "rulePriority": 14, + "description": "Keep stable images", + "selection": { + "tagStatus": "tagged", + "tagPrefixList": [ + "stable" + ], + "countType": "imageCountMoreThan", + "countNumber": 9999 + }, + "action": { + "type": "expire" + } + }, + { + "rulePriority": 20, + "description": "Expire images older than 1 day", + "selection": { + "tagStatus": "any", + "countType": "sinceImagePushed", + "countUnit": "days", + "countNumber": 1 + }, + "action": { + "type": "expire" + } + } + ] +} \ No newline at end of file diff --git a/POC/doc/aws-policies/ecr-policy.json b/POC/doc/aws-policies/ecr-policy.json new file mode 100644 index 00000000..86952a7d --- /dev/null +++ b/POC/doc/aws-policies/ecr-policy.json @@ -0,0 +1,14 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "MithrilPOC", + "Effect": "Allow", + "Principal": "*", + "Action": [ + "ecr:BatchGetImage", + "ecr:GetDownloadUrlForLayer" + ] + } + ] + } \ No newline at end of file diff --git a/POC/doc/aws-policies/s3-policy.json b/POC/doc/aws-policies/s3-policy.json new file mode 100644 index 00000000..252f1da7 --- /dev/null +++ b/POC/doc/aws-policies/s3-policy.json @@ -0,0 +1,17 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": "*", + "Action": [ + "s3:GetObject", + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::mithril-customer-assets", + "arn:aws:s3:::mithril-customer-assets/*" + ] + } + ] +} \ No newline at end of file diff --git a/POC/doc/poc-instructions.md b/POC/doc/poc-instructions.md new file mode 100644 index 00000000..2f5cefa5 --- /dev/null +++ b/POC/doc/poc-instructions.md @@ -0,0 +1,151 @@ +# Mithril POC + +Currently, it deploys to local `kind ` cluster the istio `bookinfo` example. The four workloads from the example (details, productpage, ratings, and reviews) are deployed in the `default` namespace. + +## Minimal configuration + +- 4 CPUs +- 8 GB RAM +- 20 GB (for POC *only*) + +## Requirements + +- docker + +### Install kubectl client + +[Install the kubernetes client for your operating system](https://kubernetes.io/docs/tasks/tools/#kubectl) + +### Install istioctl: + +``` +curl -L https://istio.io/downloadIstio | ISTIO_VERSION=1.10.1 sh - +``` + +Should work with istio `1.9.1` and `1.10.1`. + +## Install Kind + +Follow [kind install instructions](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) + + +## Install AWS CLI and configure it + +Follow [aws cli install and configure instructions](https://aws.amazon.com/cli/?nc1=h_ls) + +## Create the cluster and the local docker registry + +```bash +./create-kind-cluster.sh +``` + +## Running the POC locally +In order to run the POC locally, + +```bash +TAG=stable_20211022 \ +HUB=public.ecr.aws/e4m8j0n8/mithril \ +./deploy-all.sh +``` + +Wait for all pods are to reach `Running` state: + +```bash +kubectl get pods -A +``` + +Expected output: + +``` +NAMESPACE NAME READY STATUS RESTARTS AGE +default details-v1-c658fff7-cvj8d 2/2 Running 0 6m19s +default productpage-v1-5f85c6d9d8-mb6jm 2/2 Running 0 6m18s +default ratings-v1-66db75fdb9-jv4ln 2/2 Running 0 6m19s +default reviews-v1-dbcbb4f7c-jzkh5 2/2 Running 0 6m19s +default reviews-v2-64854577cd-cw7zw 2/2 Running 0 6m18s +default reviews-v3-bd5fcc875-8b722 2/2 Running 0 6m18s +istio-system istio-ingressgateway-849d55784b-fwz7m 1/1 Running 0 6m36s +istio-system istiod-5c79c669f9-7qx5m 1/1 Running 0 6m49s +kube-system coredns-74ff55c5b-pl5wd 1/1 Running 0 19m +kube-system coredns-74ff55c5b-zq798 1/1 Running 0 19m +kube-system etcd-kind-control-plane 1/1 Running 0 19m +kube-system kindnet-cxrzk 1/1 Running 0 19m +kube-system kube-apiserver-kind-control-plane 1/1 Running 0 19m +kube-system kube-controller-manager-kind-control-plane 1/1 Running 0 19m +kube-system kube-proxy-xzjgd 1/1 Running 0 19m +kube-system kube-scheduler-kind-control-plane 1/1 Running 0 19m +local-path-storage local-path-provisioner-78776bfc44-4dp4x 1/1 Running 0 19m +spire spire-agent-w9jfd 1/1 Running 0 6m21s +spire spire-server-0 2/2 Running 0 6m24s +``` + +### SPIRE Entries +The SPIRE entries can be checked using the following command: + +``` +kubectl exec -i -t pod/spire-server-0 -n spire -c spire-server -- /bin/sh -c "bin/spire-server entry show -socketPath /run/spire/sockets/server.sock" +``` + +## Test example + +### Inside the cluster: + +```bash +kubectl exec "$(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}')" -c ratings -- curl -sS productpage:9080/productpage +``` + +The output is an HTML page that should not have any error sections. + +### Outside the cluster: + +Forward host port 8000 to port 8080 (ingressgateway pod port) inside the cluster: + +```bash +./forward-port.sh + +Forwarding from 127.0.0.1:8000 -> 8080 +Forwarding from [::1]:8000 -> 8080 +``` + +Make a request from the host: + +```bash +curl localhost:8000/productpage +``` + +Or open in the browser `localhost:8000/productpage`. + +The output is an HTML page that should not have any error sections. + +### Ingress mTLS and Federation + +Forward host port 7000 to port 7080 (ingressgateway-mtls pod port) inside the cluster: + +```bash +> ./forward-secure-port.sh + +Forwarding from 127.0.0.1:7000 -> 7080 +Forwarding from [::1]:7000 -> 7080 +``` + +#### Generate certs + +Mint SVID in the trust domain `domain.test`: + +```bash +> kubectl exec --stdin --tty -n spire2 spire-server-0 -- /opt/spire/bin/spire-server x509 mint -spiffeID spiffe://domain.test/myservice -socketPath /run/spire/sockets/server.sock +``` + +Copy the X509-SVID section of the output to a file `svid.pem`. +Copy the Private key section of the output to a file `key.pem`. + +#### Test TLS request + +```bash +> curl --cert svid.pem --key key.pem -k -I https://localhost:8000/productpage + +HTTP/2 200 +content-type: text/html; charset=utf-8 +content-length: 5183 +server: istio-envoy +``` \ No newline at end of file diff --git a/POC/forward-port.sh b/POC/forward-port.sh new file mode 100755 index 00000000..82900d54 --- /dev/null +++ b/POC/forward-port.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +INGRESS_POD=$(kubectl get pod -l istio=ingressgateway -n istio-system -o jsonpath="{.items[0].metadata.name}") +kubectl port-forward --address 0.0.0.0 "$INGRESS_POD" 8000:8080 -n istio-system & diff --git a/POC/img/overview.png b/POC/img/overview.png new file mode 100644 index 00000000..fce74039 Binary files /dev/null and b/POC/img/overview.png differ diff --git a/POC/istio/auth.yaml b/POC/istio/auth.yaml new file mode 100644 index 00000000..ad906296 --- /dev/null +++ b/POC/istio/auth.yaml @@ -0,0 +1,8 @@ +apiVersion: security.istio.io/v1beta1 +kind: PeerAuthentication +metadata: + name: default + namespace: istio-system +spec: + mtls: + mode: STRICT diff --git a/POC/istio/cleanup-istio.sh b/POC/istio/cleanup-istio.sh new file mode 100755 index 00000000..642ba64c --- /dev/null +++ b/POC/istio/cleanup-istio.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +istioctl x uninstall --purge +kubectl delete namespaces istio-system diff --git a/POC/istio/deploy-istio.sh b/POC/istio/deploy-istio.sh new file mode 100755 index 00000000..52899da4 --- /dev/null +++ b/POC/istio/deploy-istio.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +if [[ $HUB ]]; then + echo "Using HUB from environment: $HUB" + args="$args --set values.global.hub=$HUB" +else + echo "No HUB set, using default value from istio-config.yaml" +fi + +if [[ $TAG ]]; then + echo "Using TAG from environment: $TAG" + args="$args --set values.global.tag=$TAG" +else + echo "No TAG set, using default value from istio-config.yaml" +fi + +istioctl install -f istio-config.yaml --skip-confirmation $args +kubectl apply -f auth.yaml diff --git a/POC/istio/istio-config.yaml b/POC/istio/istio-config.yaml new file mode 100644 index 00000000..c087e0ff --- /dev/null +++ b/POC/istio/istio-config.yaml @@ -0,0 +1,91 @@ +apiVersion: operator.istio.io/v1alpha1 +kind: IstioOperator +metadata: + namespace: istio-system +spec: + profile: default + meshConfig: + trustDomain: example.org + values: + global: + hub: localhost:5000 + tag: my-build + imagePullPolicy: "Always" + imagePullSecrets: + - secret-registry + # This is used to customize the sidecar template + sidecarInjectorWebhook: + templates: + spire: | + spec: + containers: + - name: istio-proxy + env: + - name: CA_PROVIDER + value: "spiffe" + - name: SPIFFE_ENDPOINT_SOCKET + value: "unix:///run/spire/sockets/agent.sock" + volumeMounts: + - name: spire-agent-socket + mountPath: /run/spire/sockets + readOnly: true + volumes: + - name: spire-agent-socket + csi: + driver: "csi.spiffe.io" + components: + pilot: + k8s: + env: + # Disable istiod CA Sever functionality + - name: ENABLE_CA_SERVER + value: "false" + # Check that istio-agent's namespace and service account match the ones in the JWT token presented in the connection + - name: PILOT_ENABLE_XDS_IDENTITY_CHECK + value: "true" + # Configure the SPIFFE Workload API as the cert provider for istiod + - name: PILOT_CERT_PROVIDER + value: "spiffe" + - name: SPIFFE_ENDPOINT_SOCKET + value: "unix:///run/spire/sockets/agent.sock" + overlays: + - apiVersion: apps/v1 + kind: Deployment + name: istiod + patches: + - path: spec.template.spec.containers.[name:discovery].volumeMounts[7] + value: + name: spire-agent-socket + mountPath: "/run/spire/sockets" + readOnly: true + - path: spec.template.spec.volumes[7] + value: + name: spire-agent-socket + csi: + driver: "csi.spiffe.io" + ingressGateways: + - name: istio-ingressgateway + enabled: true + label: + istio: ingressgateway + k8s: + overlays: + - apiVersion: apps/v1 + kind: Deployment + name: istio-ingressgateway + patches: + - path: spec.template.spec.volumes[8] + value: + name: spire-agent-socket + csi: + driver: "csi.spiffe.io" + - path: spec.template.spec.containers.[name:istio-proxy].volumeMounts[8] + value: + name: spire-agent-socket + mountPath: "/run/spire/sockets" + readOnly: true + env: + - name: CA_PROVIDER + value: "spiffe" + - name: SPIFFE_ENDPOINT_SOCKET + value: "unix:///run/spire/sockets/agent.sock" diff --git a/POC/patches/poc.master.patch b/POC/patches/poc.master.patch new file mode 100644 index 00000000..db51b5a6 --- /dev/null +++ b/POC/patches/poc.master.patch @@ -0,0 +1,518 @@ +diff --git a/pilot/pkg/bootstrap/certcontroller.go b/pilot/pkg/bootstrap/certcontroller.go +index 8fba7a262f..3e9330d28a 100644 +--- a/pilot/pkg/bootstrap/certcontroller.go ++++ b/pilot/pkg/bootstrap/certcontroller.go +@@ -303,3 +303,32 @@ func (s *Server) loadIstiodCert() error { + s.certMu.Unlock() + return nil + } ++ ++func (s *Server) setIstioCertBundleAndNotify(certChain []byte, key []byte, bundle []byte) { ++ keyPair, err := tls.X509KeyPair(certChain, key) ++ ++ if err != nil { ++ log.Errorf("istiod loading x509 key pairs failed: %v", err) ++ return ++ } ++ for _, c := range keyPair.Certificate { ++ x509Cert, err := x509.ParseCertificates(c) ++ if err != nil { ++ log.Errorf("x509 cert - ParseCertificates() error: %v", err) ++ return ++ } ++ for _, c := range x509Cert { ++ log.Infof("x509 cert - Issuer: %q, Subject: %q, SN: %x, NotBefore: %q, NotAfter: %q", ++ c.Issuer, c.Subject, c.SerialNumber, ++ c.NotBefore.Format(time.RFC3339), c.NotAfter.Format(time.RFC3339)) ++ } ++ } ++ ++ s.certMu.Lock() ++ s.istiodCert = &keyPair ++ s.certMu.Unlock() ++ log.Info("istiod certificates are set") ++ ++ s.istiodCertBundleWatcher.SetAndNotify(nil, nil, bundle) ++ log.Info("istiod Cert Bundle Watcher notified") ++} +diff --git a/pilot/pkg/bootstrap/server.go b/pilot/pkg/bootstrap/server.go +index 2459a71c44..66788b49ec 100644 +--- a/pilot/pkg/bootstrap/server.go ++++ b/pilot/pkg/bootstrap/server.go +@@ -21,6 +21,8 @@ import ( + "encoding/json" + "errors" + "fmt" ++ "github.com/spiffe/go-spiffe/v2/spiffeid" ++ "github.com/spiffe/go-spiffe/v2/workloadapi" + "net" + "net/http" + "os" +@@ -186,6 +188,9 @@ type Server struct { + statusManager *status.Manager + // RWConfigStore is the configstore which allows updates, particularly for status. + RWConfigStore model.ConfigStoreCache ++ ++ // source of X.509 certs and bundle, when using SPIFFE Workload API as cert provider ++ x509Source *workloadapi.X509Source + } + + // NewServer creates a new Server instance based on the provided arguments. +@@ -218,6 +223,16 @@ func NewServer(args *PilotArgs, initFuncs ...func(*Server)) (*Server, error) { + for _, fn := range initFuncs { + fn(s) + } ++ ++ if strings.EqualFold(features.PilotCertProvider, constants.CertProviderSpiffe) { ++ ctx := context.Background() ++ x509Source, err := workloadapi.NewX509Source(ctx) ++ if err != nil { ++ return nil, fmt.Errorf("failed creating spiffe X.509 source: %v", err) ++ } ++ s.x509Source = x509Source ++ } ++ + // Initialize workload Trust Bundle before XDS Server + e.TrustBundle = s.workloadTrustBundle + s.XDSServer = xds.NewDiscoveryServer(e, args.Plugins, args.PodName, args.Namespace, args.RegistryOptions.KubeOptions.ClusterAliases) +@@ -995,6 +1010,20 @@ func (s *Server) initIstiodCerts(args *PilotArgs, host string) error { + if err == nil { + err = s.initIstiodCertLoader() + } ++ } else if strings.EqualFold(features.PilotCertProvider, constants.CertProviderSpiffe) { ++ chain, key, err := s.getCertAndKeyBytes() ++ if err != nil { ++ return err ++ } ++ ++ bundleBytes, err := s.getBundleBytes() ++ if err != nil { ++ return err ++ } ++ ++ s.setIstioCertBundleAndNotify(chain, key, bundleBytes) ++ s.watchIstioCertUpdates() ++ return nil + } + + return err +@@ -1279,3 +1308,57 @@ func (s *Server) initStatusManager(_ *PilotArgs) { + return nil + }) + } ++ ++func (s *Server) watchIstioCertUpdates() { ++ go func() { ++ updatedChan := s.x509Source.Updated() ++ for { ++ <-updatedChan ++ chain, key, err := s.getCertAndKeyBytes() ++ if err != nil { ++ log.Errorf("error watching SPIFFE updates: %v", err) ++ continue ++ } ++ ++ bundleBytes, err := s.getBundleBytes() ++ if err != nil { ++ log.Errorf("error watching SPIFFE updates: %v", err) ++ continue ++ } ++ ++ s.setIstioCertBundleAndNotify(chain, key, bundleBytes) ++ } ++ }() ++} ++ ++func (s *Server) getCertAndKeyBytes() ([]byte, []byte, error) { ++ svid, err := s.x509Source.GetX509SVID() ++ if err != nil { ++ return nil, nil, fmt.Errorf("failed fetching X.509 SVID: %v", err) ++ } ++ ++ chain, key, err := svid.Marshal() ++ if err != nil { ++ return nil, nil, fmt.Errorf("unable to marshal X.509 SVID: %v", err) ++ } ++ ++ return chain, key, nil ++} ++ ++func (s *Server) getBundleBytes() ([]byte, error) { ++ trustDomain, err := spiffeid.TrustDomainFromString(spiffe.GetTrustDomain()) ++ if err != nil { ++ return nil, fmt.Errorf("error trying to parse trust domain %q reason: %v", spiffe.GetTrustDomain(), err) ++ } ++ ++ bundle, err := s.x509Source.GetX509BundleForTrustDomain(trustDomain) ++ if err != nil { ++ return nil, fmt.Errorf("unable to find X.509 bundle for trust domain %q: %v", trustDomain, err) ++ } ++ ++ bundleBytes, err := bundle.Marshal() ++ if err != nil { ++ return nil, fmt.Errorf("unable to marshal X.509 byndle: %v", err) ++ } ++ return bundleBytes, nil ++} +diff --git a/pkg/config/constants/constants.go b/pkg/config/constants/constants.go +index 77b0956925..bfd88600b0 100644 +--- a/pkg/config/constants/constants.go ++++ b/pkg/config/constants/constants.go +@@ -139,4 +139,6 @@ const ( + // CertProviderNone does not create any certificates for the control plane. It is assumed that some external + // load balancer, such as an Istio Gateway, is terminating the TLS. + CertProviderNone = "none" ++ // CertProviderSpiffe uses the SPIFFE Workload API to fetch certificates ++ CertProviderSpiffe = "spiffe" + ) +diff --git a/pkg/istio-agent/agent.go b/pkg/istio-agent/agent.go +index 4ded6de207..987947aa43 100644 +--- a/pkg/istio-agent/agent.go ++++ b/pkg/istio-agent/agent.go +@@ -553,6 +553,9 @@ func (a *Agent) FindRootCAForXDS() (string, error) { + if a.cfg.XDSRootCerts == security.SystemRootCerts { + // Special case input for root cert configuration to use system root certificates + return "", nil ++ } else if strings.EqualFold(a.secOpts.CAProviderName, constants.CertProviderSpiffe) { ++ // The root cert is provided by the SPIFFE secret manager ++ return "", nil + } else if a.cfg.XDSRootCerts != "" { + // Using specific platform certs or custom roots + rootCAPath = a.cfg.XDSRootCerts +diff --git a/pkg/security/security.go b/pkg/security/security.go +index c1cf9b48f6..e0c919f5e0 100644 +--- a/pkg/security/security.go ++++ b/pkg/security/security.go +@@ -17,6 +17,7 @@ package security + import ( + "context" + "fmt" ++ "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" + "net/http" + "os" + "strings" +@@ -305,6 +306,9 @@ type SecretItem struct { + CreatedTime time.Time + + ExpireTime time.Time ++ ++ // Trust bundles keyed by trust domain ++ TrustBundles *x509bundle.Set + } + + type CredFetcher interface { +diff --git a/security/pkg/nodeagent/cache/spiffesecret.go b/security/pkg/nodeagent/cache/spiffesecret.go +new file mode 100644 +index 0000000000..47938e400c +--- /dev/null ++++ b/security/pkg/nodeagent/cache/spiffesecret.go +@@ -0,0 +1,217 @@ ++package cache ++ ++import ( ++ "bytes" ++ "fmt" ++ "sync" ++ "time" ++ ++ "github.com/spiffe/go-spiffe/v2/spiffeid" ++ "github.com/spiffe/go-spiffe/v2/workloadapi" ++ "golang.org/x/net/context" ++ "google.golang.org/grpc/codes" ++ "google.golang.org/grpc/status" ++ "istio.io/istio/pkg/security" ++ "istio.io/pkg/log" ++) ++ ++// SpiffeSecretManager is a source of SecretItems (X.509 SVIDs and trust bundles) maintained via the ++// Workload API. Implements the ++type SpiffeSecretManager struct { ++ sync.RWMutex ++ trustDomain spiffeid.TrustDomain ++ configTrustBundle []byte ++ secretItem *security.SecretItem ++ notifyCallback func(resourceName string) ++ cancelWatcher context.CancelFunc ++ updatedCh chan struct{} ++} ++ ++// NewSpiffeSecretManager creates a new SpiffeSecretManager. It blocks until the initial update ++// has been received from the Workload API. ++func NewSpiffeSecretManager(opt *security.Options) (*SpiffeSecretManager, error) { ++ td, err := spiffeid.TrustDomainFromString(opt.TrustDomain) ++ if err != nil { ++ return nil, fmt.Errorf("error trying to parse trust domain %q reason: %v", opt.TrustDomain, err) ++ } ++ ++ sm := &SpiffeSecretManager{ ++ trustDomain: td, ++ } ++ ++ ctx, cancel := context.WithCancel(context.Background()) ++ sm.cancelWatcher = cancel ++ sm.updatedCh = make(chan struct{}) ++ ++ go sm.watcherTask(ctx) ++ ++ err = sm.WaitUntilUpdated(ctx) ++ if err != nil { ++ return nil, fmt.Errorf("error initializing the SPIFFE secret manager") ++ } ++ ++ return sm, nil ++} ++ ++// WaitUntilUpdated waits until the secret manager is updated or the context is done, ++// in which case ctx.Err() is returned. ++func (w *SpiffeSecretManager) WaitUntilUpdated(ctx context.Context) error { ++ select { ++ case <-w.updatedCh: ++ return nil ++ case <-ctx.Done(): ++ return ctx.Err() ++ } ++} ++ ++// Updated returns a channel that is sent on whenever the secret manager is updated. ++func (w *SpiffeSecretManager) Updated() <-chan struct{} { ++ return w.updatedCh ++} ++ ++// GenerateSecret generates a SecretItem for the given resourceName (default or ROOTCA). ++func (s *SpiffeSecretManager) GenerateSecret(resourceName string) (*security.SecretItem, error) { ++ s.RLock() ++ defer s.RUnlock() ++ ++ si := s.secretItem ++ if si == nil { ++ return nil, fmt.Errorf("secret was not in cache for resource: %v", resourceName) ++ } ++ ++ if resourceName == security.RootCertReqResourceName { ++ ++ ns := &security.SecretItem{ ++ ResourceName: resourceName, ++ RootCert: si.RootCert, ++ // adding all trust bundles ++ TrustBundles: si.TrustBundles, ++ } ++ ++ cacheLog.WithLabels("ttl", time.Until(si.ExpireTime)).Info("returned workload trust anchor from cache") ++ return ns, nil ++ } ++ ++ ns := &security.SecretItem{ ++ ResourceName: resourceName, ++ CertificateChain: si.CertificateChain, ++ PrivateKey: si.PrivateKey, ++ ExpireTime: si.ExpireTime, ++ CreatedTime: si.CreatedTime, ++ } ++ cacheLog.WithLabels("ttl", time.Until(si.ExpireTime)).Info("returned workload certificate from cache") ++ return ns, nil ++} ++ ++// UpdateConfigTrustBundle updates the configTrustBundle and calls the notify callback function. ++func (s *SpiffeSecretManager) UpdateConfigTrustBundle(trustBundle []byte) error { ++ log.WithLabels("UpdateConfigTrustBundle").Info(string(trustBundle)) ++ s.Lock() ++ defer s.Unlock() ++ ++ if bytes.Equal(s.configTrustBundle, trustBundle) { ++ return nil ++ } ++ s.configTrustBundle = trustBundle ++ s.callUpdateCallback(security.RootCertReqResourceName) ++ return nil ++} ++ ++// Close closes the SPIFFE secret manager instance. ++func (s *SpiffeSecretManager) Close() { ++ if s.cancelWatcher != nil { ++ log.Info("closing SPIFFE secret manager") ++ s.cancelWatcher() ++ } ++} ++ ++// SetUpdateCallback configures the manager with a notify callback function. ++func (s *SpiffeSecretManager) SetUpdateCallback(f func(resourceName string)) { ++ s.Lock() ++ defer s.Unlock() ++ s.notifyCallback = f ++} ++ ++// OnX509ContextUpdate is run every time a new update is pushed by the SPIFFE Workload API. ++func (s *SpiffeSecretManager) OnX509ContextUpdate(c *workloadapi.X509Context) { ++ log.Info("got new identities from the SPIFFE Workload API") ++ if len(c.SVIDs) < 1 { ++ log.Error("identities were not found on workload API response") ++ return ++ } ++ if len(c.SVIDs[0].Certificates) < 1 { ++ log.Error("leaf certificate was not found on workload API response") ++ return ++ } ++ ++ svid := c.DefaultSVID() ++ workloadChain, workloadKey, err := svid.Marshal() ++ if err != nil { ++ log.Fatalf("unable to marshal X.509 SVID: %v", err) ++ return ++ } ++ ++ bundle, ok := c.Bundles.Get(s.trustDomain) ++ if !ok { ++ log.WithLabels("trust_domain", s.trustDomain).Fatal("unable to get trust bundle for trust domain") ++ return ++ } ++ ++ root, err := bundle.Marshal() ++ if err != nil { ++ log.Fatalf("unable to marshal trust bundle: %v", err) ++ return ++ } ++ ++ certChain := concatCerts([]string{string(workloadChain)}) ++ leaf := c.SVIDs[0].Certificates[0] ++ ++ item := &security.SecretItem{ ++ CertificateChain: certChain, ++ PrivateKey: workloadKey, ++ RootCert: root, ++ TrustBundles: c.Bundles, ++ ResourceName: security.WorkloadKeyCertResourceName, ++ CreatedTime: leaf.NotBefore, ++ ExpireTime: leaf.NotAfter, ++ } ++ ++ s.Lock() ++ defer s.Unlock() ++ ++ if s.secretItem == nil || !bytes.Equal(s.secretItem.RootCert, item.RootCert) { ++ s.callUpdateCallback(security.RootCertReqResourceName) ++ } ++ if s.secretItem == nil || !bytes.Equal(s.secretItem.CertificateChain, item.CertificateChain) { ++ s.callUpdateCallback(security.WorkloadKeyCertResourceName) ++ } ++ s.secretItem = item ++ ++ select { ++ case s.updatedCh <- struct{}{}: ++ log.Info("notify message sent on updateCh") ++ default: ++ log.Info("notify message dropped") ++ } ++} ++ ++// OnX509ContextWatchError is run when the client runs into an error. ++func (s *SpiffeSecretManager) OnX509ContextWatchError(err error) { ++ if status.Code(err) != codes.Canceled { ++ log.Infof("error calling SPIFE Workload API: %v", err) ++ } ++} ++ ++func (s *SpiffeSecretManager) callUpdateCallback(resourceName string) { ++ log.WithLabels("resource", resourceName).Info("fetched new identity from SPIFFE Workload API") ++ if s.notifyCallback != nil { ++ s.notifyCallback(resourceName) ++ } ++} ++ ++func (s *SpiffeSecretManager) watcherTask(ctx context.Context) { ++ err := workloadapi.WatchX509Context(ctx, s) ++ if err != nil && status.Code(err) != codes.Canceled { ++ log.Fatalf("error watching SPIFFE workload API: %v", err) ++ } ++} +diff --git a/security/pkg/nodeagent/sds/sdsservice.go b/security/pkg/nodeagent/sds/sdsservice.go +index 3ac84d6fd9..fd73728707 100644 +--- a/security/pkg/nodeagent/sds/sdsservice.go ++++ b/security/pkg/nodeagent/sds/sdsservice.go +@@ -18,6 +18,7 @@ package sds + import ( + "context" + "fmt" ++ "google.golang.org/protobuf/types/known/anypb" + "time" + + "github.com/cenkalti/backoff/v4" +@@ -148,7 +149,11 @@ func (s *sdsservice) generate(resourceNames []string) (model.Resources, error) { + return nil, fmt.Errorf("failed to generate secret for %v: %v", resourceName, err) + } + +- res := util.MessageToAny(toEnvoySecret(secret, s.rootCaPath)) ++ envoySecret, err := toEnvoySecret(secret, s.rootCaPath) ++ if err != nil { ++ return nil, fmt.Errorf("failed to generate secret for %v: %v", resourceName, err) ++ } ++ res := util.MessageToAny(envoySecret) + resources = append(resources, &discovery.Resource{ + Name: resourceName, + Resource: res, +@@ -203,7 +208,7 @@ func (s *sdsservice) Close() { + } + + // toEnvoySecret converts a security.SecretItem to an Envoy tls.Secret +-func toEnvoySecret(s *security.SecretItem, caRootPath string) *tls.Secret { ++func toEnvoySecret(s *security.SecretItem, caRootPath string) (*tls.Secret, error) { + secret := &tls.Secret{ + Name: s.ResourceName, + } +@@ -215,6 +220,15 @@ func toEnvoySecret(s *security.SecretItem, caRootPath string) *tls.Secret { + cfg, ok = security.SdsCertificateConfigFromResourceName(s.ResourceName) + } + if s.ResourceName == security.RootCertReqResourceName || (ok && cfg.IsRootCertificate()) { ++ // are there federated bundles? ++ if s.TrustBundles != nil && s.TrustBundles.Len() > 1 { ++ validatorConfig, err := buildSPIFFECertValidatorConfig(s) ++ if err != nil { ++ return nil, err ++ } ++ secret.Type = validatorConfig ++ return secret, nil ++ } + secret.Type = &tls.Secret_ValidationContext{ + ValidationContext: &tls.CertificateValidationContext{ + TrustedCa: &core.DataSource{ +@@ -241,7 +255,7 @@ func toEnvoySecret(s *security.SecretItem, caRootPath string) *tls.Secret { + } + } + +- return secret ++ return secret, nil + } + + func pushLog(names []string) model.XdsLogDetails { +@@ -251,3 +265,32 @@ func pushLog(names []string) model.XdsLogDetails { + } + return model.DefaultXdsLogDetails + } ++ ++func buildSPIFFECertValidatorConfig(s *security.SecretItem) (*tls.Secret_ValidationContext, error) { ++ var configTrustDomains []*tls.SPIFFECertValidatorConfig_TrustDomain ++ ++ for _, bundle := range s.TrustBundles.Bundles() { ++ caBytes, err := bundle.Marshal() ++ if err != nil { ++ return nil, err ++ } ++ configTrustDomains = append(configTrustDomains, &tls.SPIFFECertValidatorConfig_TrustDomain{ ++ Name: bundle.TrustDomain().String(), ++ TrustBundle: &core.DataSource{Specifier: &core.DataSource_InlineBytes{ ++ InlineBytes: caBytes, ++ }}, ++ }) ++ } ++ ++ typedConfig, err := anypb.New(&tls.SPIFFECertValidatorConfig{TrustDomains: configTrustDomains}) ++ if err != nil { ++ return nil, err ++ } ++ ++ return &tls.Secret_ValidationContext{ValidationContext: &tls.CertificateValidationContext{ ++ CustomValidatorConfig: &core.TypedExtensionConfig{ ++ Name: "envoy.tls.cert_validator.spiffe", ++ TypedConfig: typedConfig, ++ }, ++ }}, nil ++} diff --git a/POC/patches/poc.release-1.10.patch b/POC/patches/poc.release-1.10.patch new file mode 100644 index 00000000..5db1f780 --- /dev/null +++ b/POC/patches/poc.release-1.10.patch @@ -0,0 +1,646 @@ +diff --git a/pilot/pkg/bootstrap/server.go b/pilot/pkg/bootstrap/server.go +--- a/pilot/pkg/bootstrap/server.go (revision 77e71d7074d8d1141bb9f9f0abed130f80316244) ++++ b/pilot/pkg/bootstrap/server.go (date 1636568958443) +@@ -29,6 +29,9 @@ + "sync" + "time" + ++ "github.com/spiffe/go-spiffe/v2/spiffeid" ++ "github.com/spiffe/go-spiffe/v2/workloadapi" ++ + middleware "github.com/grpc-ecosystem/go-grpc-middleware" + prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + prom "github.com/prometheus/client_golang/prometheus" +@@ -175,6 +178,9 @@ + statusReporter *status.Reporter + // RWConfigStore is the configstore which allows updates, particularly for status. + RWConfigStore model.ConfigStoreCache ++ ++ // source of X.509 certs and bundle, when using SPIFFE Workload API as cert provider ++ x509Source *workloadapi.X509Source + } + + // NewServer creates a new Server instance based on the provided arguments. +@@ -205,6 +211,16 @@ + for _, fn := range initFuncs { + fn(s) + } ++ ++ if strings.EqualFold(features.PilotCertProvider.Get(), SpiffeCAProvider) { ++ ctx := context.Background() ++ x509Source, err := workloadapi.NewX509Source(ctx) ++ if err != nil { ++ return nil, fmt.Errorf("failed creating spiffe X.509 source: %v", err) ++ } ++ s.x509Source = x509Source ++ } ++ + // Initialize workload Trust Bundle before XDS Server + e.TrustBundle = s.workloadTrustBundle + s.XDSServer = xds.NewDiscoveryServer(e, args.Plugins, args.PodName, args.Namespace) +@@ -927,6 +943,29 @@ + + // initIstiodCerts creates Istiod certificates and also sets up watches to them. + func (s *Server) initIstiodCerts(args *PilotArgs, host string) error { ++ ++ pilotCertProvider := features.PilotCertProvider.Get() ++ log.Infof("pilotCertProvider: %s", pilotCertProvider) ++ ++ if strings.EqualFold(pilotCertProvider, SpiffeCAProvider) { ++ ++ chain, key, err := s.getCertAndKeyBytes() ++ if err != nil { ++ return err ++ } ++ ++ bundleBytes, err := s.getBundleBytes() ++ if err != nil { ++ return err ++ } ++ ++ s.setIstioCertBundleAndNotify(chain, key, bundleBytes) ++ ++ s.watchIstioCertUpdates() ++ ++ return nil ++ } ++ + if err := s.maybeInitDNSCerts(args, host); err != nil { + return fmt.Errorf("error initializing DNS certs: %v", err) + } +@@ -939,6 +978,60 @@ + return nil + } + ++func (s *Server) watchIstioCertUpdates() { ++ go func() { ++ updatedChan := s.x509Source.Updated() ++ for { ++ <-updatedChan ++ chain, key, err := s.getCertAndKeyBytes() ++ if err != nil { ++ log.Errorf("error watching SPIFFE updates: %v", err) ++ continue ++ } ++ ++ bundleBytes, err := s.getBundleBytes() ++ if err != nil { ++ log.Errorf("error watching SPIFFE updates: %v", err) ++ continue ++ } ++ ++ s.setIstioCertBundleAndNotify(chain, key, bundleBytes) ++ } ++ }() ++} ++ ++func (s *Server) getCertAndKeyBytes() ([]byte, []byte, error) { ++ svid, err := s.x509Source.GetX509SVID() ++ if err != nil { ++ return nil, nil, fmt.Errorf("failed fetching X.509 SVID: %v", err) ++ } ++ ++ chain, key, err := svid.Marshal() ++ if err != nil { ++ return nil, nil, fmt.Errorf("unable to marshal X.509 SVID: %v", err) ++ } ++ ++ return chain, key, nil ++} ++ ++func (s *Server) getBundleBytes() ([]byte, error) { ++ trustDomain, err := spiffeid.TrustDomainFromString(spiffe.GetTrustDomain()) ++ if err != nil { ++ return nil, fmt.Errorf("error trying to parse trust domain %q reason: %v", spiffe.GetTrustDomain(), err) ++ } ++ ++ bundle, err := s.x509Source.GetX509BundleForTrustDomain(trustDomain) ++ if err != nil { ++ return nil, fmt.Errorf("unable to find X.509 bundle for trust domain %q: %v", trustDomain, err) ++ } ++ ++ bundleBytes, err := bundle.Marshal() ++ if err != nil { ++ return nil, fmt.Errorf("unable to marshal X.509 byndle: %v", err) ++ } ++ return bundleBytes, nil ++} ++ + // shouldInitDNSCerts returns whether DNS certs need to be signed. + func (s *Server) shouldInitDNSCerts(args *PilotArgs) bool { + if hasCustomTLSCerts(args.ServerOptions.TLSOptions) { +@@ -968,7 +1061,7 @@ + + // createPeerCertVerifier creates a SPIFFE certificate verifier with the current istiod configuration. + func (s *Server) createPeerCertVerifier(tlsOptions TLSOptions) (*spiffe.PeerCertVerifier, error) { +- if tlsOptions.CaCertFile == "" && s.CA == nil && features.SpiffeBundleEndpoints == "" { ++ if tlsOptions.CaCertFile == "" && s.CA == nil && features.SpiffeBundleEndpoints == "" && strings.ToLower(features.PilotCertProvider.Get()) != SpiffeCAProvider { + // Running locally without configured certs - no TLS mode + return nil, nil + } +@@ -1004,6 +1097,18 @@ + peerCertVerifier.AddMappings(certMap) + } + ++ if strings.EqualFold(features.PilotCertProvider.Get(), SpiffeCAProvider) { ++ bundleBytes, err := s.getBundleBytes() ++ if err != nil { ++ return nil, err ++ } ++ ++ err = peerCertVerifier.AddMappingFromPEM(spiffe.GetTrustDomain(), bundleBytes) ++ if err != nil { ++ return nil, fmt.Errorf("add root CAs into peerCertVerifier failed: %v", err) ++ } ++ } ++ + return peerCertVerifier, nil + } + +diff --git a/pkg/istio-agent/xds_proxy.go b/pkg/istio-agent/xds_proxy.go +--- a/pkg/istio-agent/xds_proxy.go (revision 77e71d7074d8d1141bb9f9f0abed130f80316244) ++++ b/pkg/istio-agent/xds_proxy.go (date 1636569094090) +@@ -21,6 +21,7 @@ + "fmt" + "io" + "io/ioutil" ++ "istio.io/istio/pkg/security" + "math" + "net" + "os" +@@ -684,7 +685,24 @@ + var certPool *x509.CertPool + var err error + var rootCert []byte ++ ++ if strings.EqualFold(agent.secOpts.CAProviderName, security.SpiffeCAProvider) { ++ secretItem, err := agent.secretCache.GenerateSecret(security.RootCertReqResourceName) ++ if err != nil { ++ return nil, fmt.Errorf("failed to create TLS dial option with root certificates: %v", err) ++ } ++ ++ certPool = x509.NewCertPool() ++ ok := certPool.AppendCertsFromPEM(secretItem.RootCert) ++ if !ok { ++ return nil, fmt.Errorf("failed to create TLS dial option with root certificates") ++ } ++ ++ return certPool, nil ++ } ++ + xdsCACertPath := agent.FindRootCAForXDS() ++ + if xdsCACertPath != "" { + rootCert, err = ioutil.ReadFile(xdsCACertPath) + if err != nil { +diff --git a/pkg/istio-agent/agent.go b/pkg/istio-agent/agent.go +--- a/pkg/istio-agent/agent.go (revision 77e71d7074d8d1141bb9f9f0abed130f80316244) ++++ b/pkg/istio-agent/agent.go (date 1636569094084) +@@ -17,13 +17,12 @@ + import ( + "errors" + "fmt" ++ "google.golang.org/grpc" + "io/ioutil" + "os" + "path" + "strings" + +- "google.golang.org/grpc" +- + mesh "istio.io/api/mesh/v1alpha1" + "istio.io/istio/pilot/pkg/dns" + "istio.io/istio/pilot/pkg/model" +@@ -85,7 +84,7 @@ + secOpts *security.Options + + sdsServer *sds.Server +- secretCache *cache.SecretManagerClient ++ secretCache security.SecretProvider + + // Used when proxying envoy xds via istio-agent is enabled. + xdsProxy *XdsProxy +@@ -286,7 +285,12 @@ + } + + // newSecretManager creates the SecretManager for workload secrets +-func (a *Agent) newSecretManager() (*cache.SecretManagerClient, error) { ++func (a *Agent) newSecretManager() (security.SecretProvider, error) { ++ if strings.EqualFold(a.secOpts.CAProviderName, security.SpiffeCAProvider) { ++ log.Info("Using SPIFFE identity plane") ++ return cache.NewSpiffeSecretManager(a.secOpts) ++ } ++ + // If proxy is using file mounted certs, we do not have to connect to CA. + if a.secOpts.FileMountedCerts { + log.Info("Workload is using file mounted certificates. Skipping connecting to CA") +diff --git a/go.mod b/go.mod +--- a/go.mod (revision 77e71d7074d8d1141bb9f9f0abed130f80316244) ++++ b/go.mod (date 1636568958444) +@@ -71,6 +71,7 @@ + github.com/spf13/cobra v1.1.3 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.7.1 ++ github.com/spiffe/go-spiffe/v2 v2.0.0-beta.10 + github.com/stretchr/testify v1.7.0 + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/yl2chen/cidranger v1.0.2 +diff --git a/pkg/security/security.go b/pkg/security/security.go +--- a/pkg/security/security.go (revision 77e71d7074d8d1141bb9f9f0abed130f80316244) ++++ b/pkg/security/security.go (date 1636568958443) +@@ -20,6 +20,7 @@ + "strings" + "time" + ++ "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" + "google.golang.org/grpc/metadata" + + "istio.io/pkg/env" +@@ -56,6 +57,8 @@ + // Credential fetcher type + GCE = "GoogleComputeEngine" + Mock = "Mock" // testing only ++ ++ SpiffeCAProvider = "spiffe" + ) + + // TODO: For 1.8, make sure MeshConfig is updated with those settings, +@@ -231,6 +234,13 @@ + GenerateSecret(resourceName string) (*SecretItem, error) + } + ++type SecretProvider interface { ++ SecretManager ++ Close() ++ SetUpdateCallback(func(string)) ++ UpdateConfigTrustBundle([]byte) error ++} ++ + // TokenExchanger provides common interfaces so that authentication providers could choose to implement their specific logic. + type TokenExchanger interface { + // ExchangeToken provides a common interface to exchange an existing token for a new one. +@@ -244,6 +254,9 @@ + + RootCert []byte + ++ // Trust bundles keyed by trust domain ++ TrustBundles *x509bundle.Set ++ + // ResourceName passed from envoy SDS discovery request. + // "ROOTCA" for root cert request, "default" for key/cert request. + ResourceName string +diff --git a/security/pkg/nodeagent/cache/spiffesecret.go b/security/pkg/nodeagent/cache/spiffesecret.go +new file mode 100644 +--- /dev/null (date 1636568958443) ++++ b/security/pkg/nodeagent/cache/spiffesecret.go (date 1636568958443) +@@ -0,0 +1,217 @@ ++package cache ++ ++import ( ++ "bytes" ++ "fmt" ++ "sync" ++ "time" ++ ++ "github.com/spiffe/go-spiffe/v2/spiffeid" ++ "github.com/spiffe/go-spiffe/v2/workloadapi" ++ "golang.org/x/net/context" ++ "google.golang.org/grpc/codes" ++ "google.golang.org/grpc/status" ++ "istio.io/istio/pkg/security" ++ "istio.io/pkg/log" ++) ++ ++// SpiffeSecretManager is a source of SecretItems (X.509 SVIDs and trust bundles) maintained via the ++// Workload API. Implements the ++type SpiffeSecretManager struct { ++ sync.RWMutex ++ trustDomain spiffeid.TrustDomain ++ configTrustBundle []byte ++ secretItem *security.SecretItem ++ notifyCallback func(resourceName string) ++ cancelWatcher context.CancelFunc ++ updatedCh chan struct{} ++} ++ ++// NewSpiffeSecretManager creates a new SpiffeSecretManager. It blocks until the initial update ++// has been received from the Workload API. ++func NewSpiffeSecretManager(opt *security.Options) (*SpiffeSecretManager, error) { ++ td, err := spiffeid.TrustDomainFromString(opt.TrustDomain) ++ if err != nil { ++ return nil, fmt.Errorf("error trying to parse trust domain %q reason: %v", opt.TrustDomain, err) ++ } ++ ++ sm := &SpiffeSecretManager{ ++ trustDomain: td, ++ } ++ ++ ctx, cancel := context.WithCancel(context.Background()) ++ sm.cancelWatcher = cancel ++ sm.updatedCh = make(chan struct{}) ++ ++ go sm.watcherTask(ctx) ++ ++ err = sm.WaitUntilUpdated(ctx) ++ if err != nil { ++ return nil, fmt.Errorf("error initializing the SPIFFE secret manager") ++ } ++ ++ return sm, nil ++} ++ ++// WaitUntilUpdated waits until the secret manager is updated or the context is done, ++// in which case ctx.Err() is returned. ++func (w *SpiffeSecretManager) WaitUntilUpdated(ctx context.Context) error { ++ select { ++ case <-w.updatedCh: ++ return nil ++ case <-ctx.Done(): ++ return ctx.Err() ++ } ++} ++ ++// Updated returns a channel that is sent on whenever the secret manager is updated. ++func (w *SpiffeSecretManager) Updated() <-chan struct{} { ++ return w.updatedCh ++} ++ ++// GenerateSecret generates a SecretItem for the given resourceName (default or ROOTCA). ++func (s *SpiffeSecretManager) GenerateSecret(resourceName string) (*security.SecretItem, error) { ++ s.RLock() ++ defer s.RUnlock() ++ ++ si := s.secretItem ++ if si == nil { ++ return nil, fmt.Errorf("secret was not in cache for resource: %v", resourceName) ++ } ++ ++ if resourceName == security.RootCertReqResourceName { ++ ++ ns := &security.SecretItem{ ++ ResourceName: resourceName, ++ RootCert: si.RootCert, ++ // adding all trust bundles ++ TrustBundles: si.TrustBundles, ++ } ++ ++ cacheLog.WithLabels("ttl", time.Until(si.ExpireTime)).Info("returned workload trust anchor from cache") ++ return ns, nil ++ } ++ ++ ns := &security.SecretItem{ ++ ResourceName: resourceName, ++ CertificateChain: si.CertificateChain, ++ PrivateKey: si.PrivateKey, ++ ExpireTime: si.ExpireTime, ++ CreatedTime: si.CreatedTime, ++ } ++ cacheLog.WithLabels("ttl", time.Until(si.ExpireTime)).Info("returned workload certificate from cache") ++ return ns, nil ++} ++ ++// UpdateConfigTrustBundle updates the configTrustBundle and calls the notify callback function. ++func (s *SpiffeSecretManager) UpdateConfigTrustBundle(trustBundle []byte) error { ++ log.WithLabels("UpdateConfigTrustBundle").Info(string(trustBundle)) ++ s.Lock() ++ defer s.Unlock() ++ ++ if bytes.Equal(s.configTrustBundle, trustBundle) { ++ return nil ++ } ++ s.configTrustBundle = trustBundle ++ s.callUpdateCallback(security.RootCertReqResourceName) ++ return nil ++} ++ ++// Close closes the SPIFFE secret manager instance. ++func (s *SpiffeSecretManager) Close() { ++ if s.cancelWatcher != nil { ++ log.Info("closing SPIFFE secret manager") ++ s.cancelWatcher() ++ } ++} ++ ++// SetUpdateCallback configures the manager with a notify callback function. ++func (s *SpiffeSecretManager) SetUpdateCallback(f func(resourceName string)) { ++ s.Lock() ++ defer s.Unlock() ++ s.notifyCallback = f ++} ++ ++// OnX509ContextUpdate is run every time a new update is pushed by the SPIFFE Workload API. ++func (s *SpiffeSecretManager) OnX509ContextUpdate(c *workloadapi.X509Context) { ++ log.Info("got new identities from the SPIFFE Workload API") ++ if len(c.SVIDs) < 1 { ++ log.Error("identities were not found on workload API response") ++ return ++ } ++ if len(c.SVIDs[0].Certificates) < 1 { ++ log.Error("leaf certificate was not found on workload API response") ++ return ++ } ++ ++ svid := c.DefaultSVID() ++ workloadChain, workloadKey, err := svid.Marshal() ++ if err != nil { ++ log.Fatalf("unable to marshal X.509 SVID: %v", err) ++ return ++ } ++ ++ bundle, ok := c.Bundles.Get(s.trustDomain) ++ if !ok { ++ log.WithLabels("trust_domain", s.trustDomain).Fatal("unable to get trust bundle for trust domain") ++ return ++ } ++ ++ root, err := bundle.Marshal() ++ if err != nil { ++ log.Fatalf("unable to marshal trust bundle: %v", err) ++ return ++ } ++ ++ certChain := concatCerts([]string{string(workloadChain)}) ++ leaf := c.SVIDs[0].Certificates[0] ++ ++ item := &security.SecretItem{ ++ CertificateChain: certChain, ++ PrivateKey: workloadKey, ++ RootCert: root, ++ TrustBundles: c.Bundles, ++ ResourceName: security.WorkloadKeyCertResourceName, ++ CreatedTime: leaf.NotBefore, ++ ExpireTime: leaf.NotAfter, ++ } ++ ++ s.Lock() ++ defer s.Unlock() ++ ++ if s.secretItem == nil || !bytes.Equal(s.secretItem.RootCert, item.RootCert) { ++ s.callUpdateCallback(security.RootCertReqResourceName) ++ } ++ if s.secretItem == nil || !bytes.Equal(s.secretItem.CertificateChain, item.CertificateChain) { ++ s.callUpdateCallback(security.WorkloadKeyCertResourceName) ++ } ++ s.secretItem = item ++ ++ select { ++ case s.updatedCh <- struct{}{}: ++ log.Info("notify message sent on updateCh") ++ default: ++ log.Info("notify message dropped") ++ } ++} ++ ++// OnX509ContextWatchError is run when the client runs into an error. ++func (s *SpiffeSecretManager) OnX509ContextWatchError(err error) { ++ if status.Code(err) != codes.Canceled { ++ log.Infof("error calling SPIFE Workload API: %v", err) ++ } ++} ++ ++func (s *SpiffeSecretManager) callUpdateCallback(resourceName string) { ++ log.WithLabels("resource", resourceName).Info("fetched new identity from SPIFFE Workload API") ++ if s.notifyCallback != nil { ++ s.notifyCallback(resourceName) ++ } ++} ++ ++func (s *SpiffeSecretManager) watcherTask(ctx context.Context) { ++ err := workloadapi.WatchX509Context(ctx, s) ++ if err != nil && status.Code(err) != codes.Canceled { ++ log.Fatalf("error watching SPIFFE workload API: %v", err) ++ } ++} +diff --git a/security/pkg/nodeagent/sds/sdsservice.go b/security/pkg/nodeagent/sds/sdsservice.go +--- a/security/pkg/nodeagent/sds/sdsservice.go (revision 77e71d7074d8d1141bb9f9f0abed130f80316244) ++++ b/security/pkg/nodeagent/sds/sdsservice.go (date 1636568958443) +@@ -19,6 +19,7 @@ + "context" + "fmt" + "time" ++ "google.golang.org/protobuf/types/known/anypb" + + "github.com/cenkalti/backoff" + core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" +@@ -148,7 +149,11 @@ + return nil, fmt.Errorf("failed to generate secret for %v: %v", resourceName, err) + } + +- res := util.MessageToAny(toEnvoySecret(secret)) ++ envoySecret, err := toEnvoySecret(secret) ++ if err != nil { ++ return nil, fmt.Errorf("failed to generate secret for %v: %v", resourceName, err) ++ } ++ res := util.MessageToAny(envoySecret) + resources = append(resources, res) + } + return resources, nil +@@ -201,13 +206,22 @@ + } + + // toEnvoySecret converts a security.SecretItem to an Envoy tls.Secret +-func toEnvoySecret(s *security.SecretItem) *tls.Secret { ++func toEnvoySecret(s *security.SecretItem) (*tls.Secret, error) { + secret := &tls.Secret{ + Name: s.ResourceName, + } + + cfg, ok := model.SdsCertificateConfigFromResourceName(s.ResourceName) + if s.ResourceName == security.RootCertReqResourceName || (ok && cfg.IsRootCertificate()) { ++ // are there federated bundles? ++ if s.TrustBundles != nil && s.TrustBundles.Len() > 1 { ++ validatorConfig, err := buildSPIFFECertValidatorConfig(s) ++ if err != nil { ++ return nil, err ++ } ++ secret.Type = validatorConfig ++ return secret, nil ++ } + secret.Type = &tls.Secret_ValidationContext{ + ValidationContext: &tls.CertificateValidationContext{ + TrustedCa: &core.DataSource{ +@@ -234,7 +248,36 @@ + } + } + +- return secret ++ return secret, nil ++} ++ ++func buildSPIFFECertValidatorConfig(s *security.SecretItem) (*tls.Secret_ValidationContext, error) { ++ var configTrustDomains []*tls.SPIFFECertValidatorConfig_TrustDomain ++ ++ for _, bundle := range s.TrustBundles.Bundles() { ++ caBytes, err := bundle.Marshal() ++ if err != nil { ++ return nil, err ++ } ++ configTrustDomains = append(configTrustDomains, &tls.SPIFFECertValidatorConfig_TrustDomain{ ++ Name: bundle.TrustDomain().String(), ++ TrustBundle: &core.DataSource{Specifier: &core.DataSource_InlineBytes{ ++ InlineBytes: caBytes, ++ }}, ++ }) ++ } ++ ++ typedConfig, err := anypb.New(&tls.SPIFFECertValidatorConfig{TrustDomains: configTrustDomains}) ++ if err != nil { ++ return nil, err ++ } ++ ++ return &tls.Secret_ValidationContext{ValidationContext: &tls.CertificateValidationContext{ ++ CustomValidatorConfig: &core.TypedExtensionConfig{ ++ Name: "envoy.tls.cert_validator.spiffe", ++ TypedConfig: typedConfig, ++ }, ++ }}, nil + } + + func pushLog(names []string, err error) { +diff --git a/pilot/pkg/bootstrap/certcontroller.go b/pilot/pkg/bootstrap/certcontroller.go +--- a/pilot/pkg/bootstrap/certcontroller.go (revision 77e71d7074d8d1141bb9f9f0abed130f80316244) ++++ b/pilot/pkg/bootstrap/certcontroller.go (date 1636568958444) +@@ -48,6 +48,7 @@ + var ( + KubernetesCAProvider = "kubernetes" + IstiodCAProvider = "istiod" ++ SpiffeCAProvider = "spiffe" + ) + + // CertController can create certificates signed by K8S server. +@@ -199,6 +200,35 @@ + } + } + ++func (s *Server) setIstioCertBundleAndNotify(certChain []byte, key []byte, bundle []byte) { ++ keyPair, err := tls.X509KeyPair(certChain, key) ++ ++ if err != nil { ++ log.Errorf("istiod loading x509 key pairs failed: %v", err) ++ return ++ } ++ for _, c := range keyPair.Certificate { ++ x509Cert, err := x509.ParseCertificates(c) ++ if err != nil { ++ log.Errorf("x509 cert - ParseCertificates() error: %v", err) ++ return ++ } ++ for _, c := range x509Cert { ++ log.Infof("x509 cert - Issuer: %q, Subject: %q, SN: %x, NotBefore: %q, NotAfter: %q", ++ c.Issuer, c.Subject, c.SerialNumber, ++ c.NotBefore.Format(time.RFC3339), c.NotAfter.Format(time.RFC3339)) ++ } ++ } ++ ++ s.certMu.Lock() ++ s.istiodCert = &keyPair ++ s.certMu.Unlock() ++ log.Info("istiod certificates are set") ++ ++ s.istiodCertBundleWatcher.SetAndNotify(nil, nil, bundle) ++ log.Info("istiod Cert Bundle Watcher notified") ++} ++ + // initCertificateWatches sets up watches for the dns certs. + // 1. plugin cert + // 2. istiod signed certs. diff --git a/POC/patches/poc.release-1.11.patch b/POC/patches/poc.release-1.11.patch new file mode 100644 index 00000000..106c7482 --- /dev/null +++ b/POC/patches/poc.release-1.11.patch @@ -0,0 +1,628 @@ +diff --git a/pilot/pkg/bootstrap/server.go b/pilot/pkg/bootstrap/server.go +--- a/pilot/pkg/bootstrap/server.go (revision 8f9263961b52f11f9bfb731e0eeae096bb0acfd0) ++++ b/pilot/pkg/bootstrap/server.go (date 1636568600190) +@@ -29,6 +29,9 @@ + "sync" + "time" + ++ "github.com/spiffe/go-spiffe/v2/spiffeid" ++ "github.com/spiffe/go-spiffe/v2/workloadapi" ++ + prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + prom "github.com/prometheus/client_golang/prometheus" + "golang.org/x/net/http2" +@@ -180,6 +183,9 @@ + statusReporter *status.Reporter + // RWConfigStore is the configstore which allows updates, particularly for status. + RWConfigStore model.ConfigStoreCache ++ ++ // source of X.509 certs and bundle, when using SPIFFE Workload API as cert provider ++ x509Source *workloadapi.X509Source + } + + // NewServer creates a new Server instance based on the provided arguments. +@@ -212,6 +218,16 @@ + for _, fn := range initFuncs { + fn(s) + } ++ ++ if strings.EqualFold(features.PilotCertProvider, constants.CertProviderSpiffe) { ++ ctx := context.Background() ++ x509Source, err := workloadapi.NewX509Source(ctx) ++ if err != nil { ++ return nil, fmt.Errorf("failed creating spiffe X.509 source: %v", err) ++ } ++ s.x509Source = x509Source ++ } ++ + // Initialize workload Trust Bundle before XDS Server + e.TrustBundle = s.workloadTrustBundle + s.XDSServer = xds.NewDiscoveryServer(e, args.Plugins, args.PodName, args.Namespace) +@@ -931,6 +947,7 @@ + func (s *Server) initIstiodCerts(args *PilotArgs, host string) error { + // Skip all certificates + var err error ++ + if hasCustomTLSCerts(args.ServerOptions.TLSOptions) { + // Use the DNS certificate provided via args. + err = s.initCertificateWatches(args.ServerOptions.TLSOptions) +@@ -954,14 +971,29 @@ + if err == nil { + err = s.initIstiodCertLoader() + } ++ } else if strings.EqualFold(features.PilotCertProvider, constants.CertProviderSpiffe) { ++ chain, key, err := s.getCertAndKeyBytes() ++ if err != nil { ++ return err ++ } ++ ++ bundleBytes, err := s.getBundleBytes() ++ if err != nil { ++ return err ++ } ++ ++ s.setIstioCertBundleAndNotify(chain, key, bundleBytes) ++ s.watchIstioCertUpdates() ++ return nil + } ++ + + return err + } + + // createPeerCertVerifier creates a SPIFFE certificate verifier with the current istiod configuration. + func (s *Server) createPeerCertVerifier(tlsOptions TLSOptions) (*spiffe.PeerCertVerifier, error) { +- if tlsOptions.CaCertFile == "" && s.CA == nil && features.SpiffeBundleEndpoints == "" && !s.isDisableCa() { ++ if tlsOptions.CaCertFile == "" && s.CA == nil && features.SpiffeBundleEndpoints == "" && !s.isDisableCa() && !strings.EqualFold(features.PilotCertProvider, constants.CertProviderSpiffe) { + // Running locally without configured certs - no TLS mode + return nil, nil + } +@@ -997,6 +1029,18 @@ + peerCertVerifier.AddMappings(certMap) + } + ++ if strings.EqualFold(features.PilotCertProvider, constants.CertProviderSpiffe) { ++ bundleBytes, err := s.getBundleBytes() ++ if err != nil { ++ return nil, err ++ } ++ ++ err = peerCertVerifier.AddMappingFromPEM(spiffe.GetTrustDomain(), bundleBytes) ++ if err != nil { ++ return nil, fmt.Errorf("add root CAs into peerCertVerifier failed: %v", err) ++ } ++ } ++ + return peerCertVerifier, nil + } + +@@ -1063,6 +1107,60 @@ + return nil + } + ++func (s *Server) watchIstioCertUpdates() { ++ go func() { ++ updatedChan := s.x509Source.Updated() ++ for { ++ <-updatedChan ++ chain, key, err := s.getCertAndKeyBytes() ++ if err != nil { ++ log.Errorf("error watching SPIFFE updates: %v", err) ++ continue ++ } ++ ++ bundleBytes, err := s.getBundleBytes() ++ if err != nil { ++ log.Errorf("error watching SPIFFE updates: %v", err) ++ continue ++ } ++ ++ s.setIstioCertBundleAndNotify(chain, key, bundleBytes) ++ } ++ }() ++} ++ ++func (s *Server) getCertAndKeyBytes() ([]byte, []byte, error) { ++ svid, err := s.x509Source.GetX509SVID() ++ if err != nil { ++ return nil, nil, fmt.Errorf("failed fetching X.509 SVID: %v", err) ++ } ++ ++ chain, key, err := svid.Marshal() ++ if err != nil { ++ return nil, nil, fmt.Errorf("unable to marshal X.509 SVID: %v", err) ++ } ++ ++ return chain, key, nil ++} ++ ++func (s *Server) getBundleBytes() ([]byte, error) { ++ trustDomain, err := spiffeid.TrustDomainFromString(spiffe.GetTrustDomain()) ++ if err != nil { ++ return nil, fmt.Errorf("error trying to parse trust domain %q reason: %v", spiffe.GetTrustDomain(), err) ++ } ++ ++ bundle, err := s.x509Source.GetX509BundleForTrustDomain(trustDomain) ++ if err != nil { ++ return nil, fmt.Errorf("unable to find X.509 bundle for trust domain %q: %v", trustDomain, err) ++ } ++ ++ bundleBytes, err := bundle.Marshal() ++ if err != nil { ++ return nil, fmt.Errorf("unable to marshal X.509 byndle: %v", err) ++ } ++ return bundleBytes, nil ++} ++ + // StartCA starts the CA or RA server if configured. + func (s *Server) startCA(caOpts *caOptions) { + if s.CA == nil && s.RA == nil { +diff --git a/pkg/istio-agent/xds_proxy.go b/pkg/istio-agent/xds_proxy.go +--- a/pkg/istio-agent/xds_proxy.go (revision 8f9263961b52f11f9bfb731e0eeae096bb0acfd0) ++++ b/pkg/istio-agent/xds_proxy.go (date 1636568553844) +@@ -22,6 +22,7 @@ + "encoding/json" + "fmt" + "io/ioutil" ++ "istio.io/istio/pkg/security" + "math" + "net" + "net/http" +@@ -701,6 +702,21 @@ + var certPool *x509.CertPool + var rootCert []byte + ++ if strings.EqualFold(agent.secOpts.CAProviderName, constants.CertProviderSpiffe) { ++ secretItem, err := agent.secretCache.GenerateSecret(security.RootCertReqResourceName) ++ if err != nil { ++ return nil, fmt.Errorf("failed to create TLS dial option with root certificates: %v", err) ++ } ++ ++ certPool = x509.NewCertPool() ++ ok := certPool.AppendCertsFromPEM(secretItem.RootCert) ++ if !ok { ++ return nil, fmt.Errorf("failed to create TLS dial option with root certificates") ++ } ++ ++ return certPool, nil ++ } ++ + xdsCACertPath, err := agent.FindRootCAForXDS() + if err != nil { + return nil, fmt.Errorf("failed to find root CA cert for XDS: %v", err) +diff --git a/pkg/istio-agent/agent.go b/pkg/istio-agent/agent.go +--- a/pkg/istio-agent/agent.go (revision 8f9263961b52f11f9bfb731e0eeae096bb0acfd0) ++++ b/pkg/istio-agent/agent.go (date 1636568553837) +@@ -110,7 +110,7 @@ + envoyWaitCh chan error + + sdsServer *sds.Server +- secretCache *cache.SecretManagerClient ++ secretCache security.SecretProvider + + // Used when proxying envoy xds via istio-agent is enabled. + xdsProxy *XdsProxy +@@ -545,6 +545,9 @@ + if a.cfg.XDSRootCerts == security.SystemRootCerts { + // Special case input for root cert configuration to use system root certificates + return "", nil ++ } else if strings.EqualFold(a.secOpts.CAProviderName, constants.CertProviderSpiffe) { ++ // The root cert is provided by the SPIFFE secret manager ++ return "", nil + } else if a.cfg.XDSRootCerts != "" { + // Using specific platform certs or custom roots + rootCAPath = a.cfg.XDSRootCerts +@@ -625,7 +628,12 @@ + } + + // newSecretManager creates the SecretManager for workload secrets +-func (a *Agent) newSecretManager() (*cache.SecretManagerClient, error) { ++func (a *Agent) newSecretManager() (security.SecretProvider, error) { ++ if strings.EqualFold(a.secOpts.CAProviderName, constants.CertProviderSpiffe) { ++ log.Info("Using SPIFFE identity plane") ++ return cache.NewSpiffeSecretManager(a.secOpts) ++ } ++ + // If proxy is using file mounted certs, we do not have to connect to CA. + if a.secOpts.FileMountedCerts { + log.Info("Workload is using file mounted certificates. Skipping connecting to CA") +diff --git a/pkg/config/constants/constants.go b/pkg/config/constants/constants.go +--- a/pkg/config/constants/constants.go (revision 8f9263961b52f11f9bfb731e0eeae096bb0acfd0) ++++ b/pkg/config/constants/constants.go (date 1636568293867) +@@ -125,4 +125,6 @@ + // CertProviderNone does not create any certificates for the control plane. It is assumed that some external + // load balancer, such as an Istio Gateway, is terminating the TLS. + CertProviderNone = "none" ++ // SpiffeCertProvider uses the SPIFFE Workload API to fetch certificates ++ CertProviderSpiffe = "spiffe" + ) +diff --git a/go.mod b/go.mod +--- a/go.mod (revision 8f9263961b52f11f9bfb731e0eeae096bb0acfd0) ++++ b/go.mod (date 1636568293868) +@@ -70,6 +70,7 @@ + github.com/spf13/cobra v1.2.1 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.8.1 ++ github.com/spiffe/go-spiffe/v2 v2.0.0-beta.10 + github.com/stretchr/testify v1.7.0 + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/yl2chen/cidranger v1.0.2 +diff --git a/pkg/security/security.go b/pkg/security/security.go +--- a/pkg/security/security.go (revision 8f9263961b52f11f9bfb731e0eeae096bb0acfd0) ++++ b/pkg/security/security.go (date 1636568293868) +@@ -21,6 +21,7 @@ + "strings" + "time" + ++ "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" + "google.golang.org/grpc/metadata" + + "istio.io/pkg/env" +@@ -245,6 +246,13 @@ + GenerateSecret(resourceName string) (*SecretItem, error) + } + ++type SecretProvider interface { ++ SecretManager ++ Close() ++ SetUpdateCallback(func(string)) ++ UpdateConfigTrustBundle([]byte) error ++} ++ + // TokenExchanger provides common interfaces so that authentication providers could choose to implement their specific logic. + type TokenExchanger interface { + // ExchangeToken provides a common interface to exchange an existing token for a new one. +@@ -258,6 +266,9 @@ + + RootCert []byte + ++ // Trust bundles keyed by trust domain ++ TrustBundles *x509bundle.Set ++ + // ResourceName passed from envoy SDS discovery request. + // "ROOTCA" for root cert request, "default" for key/cert request. + ResourceName string +diff --git a/security/pkg/nodeagent/sds/sdsservice.go b/security/pkg/nodeagent/sds/sdsservice.go +--- a/security/pkg/nodeagent/sds/sdsservice.go (revision 8f9263961b52f11f9bfb731e0eeae096bb0acfd0) ++++ b/security/pkg/nodeagent/sds/sdsservice.go (date 1636568293868) +@@ -18,6 +18,7 @@ + import ( + "context" + "fmt" ++ "google.golang.org/protobuf/types/known/anypb" + "time" + + "github.com/cenkalti/backoff" +@@ -144,7 +145,11 @@ + return nil, fmt.Errorf("failed to generate secret for %v: %v", resourceName, err) + } + +- res := util.MessageToAny(toEnvoySecret(secret)) ++ envoySecret, err := toEnvoySecret(secret) ++ if err != nil { ++ return nil, fmt.Errorf("failed to generate secret for %v: %v", resourceName, err) ++ } ++ res := util.MessageToAny(envoySecret) + resources = append(resources, &discovery.Resource{ + Name: resourceName, + Resource: res, +@@ -199,13 +204,22 @@ + } + + // toEnvoySecret converts a security.SecretItem to an Envoy tls.Secret +-func toEnvoySecret(s *security.SecretItem) *tls.Secret { ++func toEnvoySecret(s *security.SecretItem) (*tls.Secret, error) { + secret := &tls.Secret{ + Name: s.ResourceName, + } + + cfg, ok := model.SdsCertificateConfigFromResourceName(s.ResourceName) + if s.ResourceName == security.RootCertReqResourceName || (ok && cfg.IsRootCertificate()) { ++ // are there federated bundles? ++ if s.TrustBundles != nil && s.TrustBundles.Len() > 1 { ++ validatorConfig, err := buildSPIFFECertValidatorConfig(s) ++ if err != nil { ++ return nil, err ++ } ++ secret.Type = validatorConfig ++ return secret, nil ++ } + secret.Type = &tls.Secret_ValidationContext{ + ValidationContext: &tls.CertificateValidationContext{ + TrustedCa: &core.DataSource{ +@@ -232,7 +246,36 @@ + } + } + +- return secret ++ return secret, nil ++} ++ ++func buildSPIFFECertValidatorConfig(s *security.SecretItem) (*tls.Secret_ValidationContext, error) { ++ var configTrustDomains []*tls.SPIFFECertValidatorConfig_TrustDomain ++ ++ for _, bundle := range s.TrustBundles.Bundles() { ++ caBytes, err := bundle.Marshal() ++ if err != nil { ++ return nil, err ++ } ++ configTrustDomains = append(configTrustDomains, &tls.SPIFFECertValidatorConfig_TrustDomain{ ++ Name: bundle.TrustDomain().String(), ++ TrustBundle: &core.DataSource{Specifier: &core.DataSource_InlineBytes{ ++ InlineBytes: caBytes, ++ }}, ++ }) ++ } ++ ++ typedConfig, err := anypb.New(&tls.SPIFFECertValidatorConfig{TrustDomains: configTrustDomains}) ++ if err != nil { ++ return nil, err ++ } ++ ++ return &tls.Secret_ValidationContext{ValidationContext: &tls.CertificateValidationContext{ ++ CustomValidatorConfig: &core.TypedExtensionConfig{ ++ Name: "envoy.tls.cert_validator.spiffe", ++ TypedConfig: typedConfig, ++ }, ++ }}, nil + } + + func pushLog(names []string) model.XdsLogDetails { +diff --git a/pilot/pkg/bootstrap/certcontroller.go b/pilot/pkg/bootstrap/certcontroller.go +--- a/pilot/pkg/bootstrap/certcontroller.go (revision 8f9263961b52f11f9bfb731e0eeae096bb0acfd0) ++++ b/pilot/pkg/bootstrap/certcontroller.go (date 1636568293868) +@@ -290,3 +290,32 @@ + s.certMu.Unlock() + return nil + } ++ ++func (s *Server) setIstioCertBundleAndNotify(certChain []byte, key []byte, bundle []byte) { ++ keyPair, err := tls.X509KeyPair(certChain, key) ++ ++ if err != nil { ++ log.Errorf("istiod loading x509 key pairs failed: %v", err) ++ return ++ } ++ for _, c := range keyPair.Certificate { ++ x509Cert, err := x509.ParseCertificates(c) ++ if err != nil { ++ log.Errorf("x509 cert - ParseCertificates() error: %v", err) ++ return ++ } ++ for _, c := range x509Cert { ++ log.Infof("x509 cert - Issuer: %q, Subject: %q, SN: %x, NotBefore: %q, NotAfter: %q", ++ c.Issuer, c.Subject, c.SerialNumber, ++ c.NotBefore.Format(time.RFC3339), c.NotAfter.Format(time.RFC3339)) ++ } ++ } ++ ++ s.certMu.Lock() ++ s.istiodCert = &keyPair ++ s.certMu.Unlock() ++ log.Info("istiod certificates are set") ++ ++ s.istiodCertBundleWatcher.SetAndNotify(nil, nil, bundle) ++ log.Info("istiod Cert Bundle Watcher notified") ++} +diff --git a/security/pkg/nodeagent/cache/spiffesecret.go b/security/pkg/nodeagent/cache/spiffesecret.go +new file mode 100644 +--- /dev/null (date 1636568293868) ++++ b/security/pkg/nodeagent/cache/spiffesecret.go (date 1636568293868) +@@ -0,0 +1,217 @@ ++package cache ++ ++import ( ++ "bytes" ++ "fmt" ++ "sync" ++ "time" ++ ++ "github.com/spiffe/go-spiffe/v2/spiffeid" ++ "github.com/spiffe/go-spiffe/v2/workloadapi" ++ "golang.org/x/net/context" ++ "google.golang.org/grpc/codes" ++ "google.golang.org/grpc/status" ++ "istio.io/istio/pkg/security" ++ "istio.io/pkg/log" ++) ++ ++// SpiffeSecretManager is a source of SecretItems (X.509 SVIDs and trust bundles) maintained via the ++// Workload API. Implements the ++type SpiffeSecretManager struct { ++ sync.RWMutex ++ trustDomain spiffeid.TrustDomain ++ configTrustBundle []byte ++ secretItem *security.SecretItem ++ notifyCallback func(resourceName string) ++ cancelWatcher context.CancelFunc ++ updatedCh chan struct{} ++} ++ ++// NewSpiffeSecretManager creates a new SpiffeSecretManager. It blocks until the initial update ++// has been received from the Workload API. ++func NewSpiffeSecretManager(opt *security.Options) (*SpiffeSecretManager, error) { ++ td, err := spiffeid.TrustDomainFromString(opt.TrustDomain) ++ if err != nil { ++ return nil, fmt.Errorf("error trying to parse trust domain %q reason: %v", opt.TrustDomain, err) ++ } ++ ++ sm := &SpiffeSecretManager{ ++ trustDomain: td, ++ } ++ ++ ctx, cancel := context.WithCancel(context.Background()) ++ sm.cancelWatcher = cancel ++ sm.updatedCh = make(chan struct{}) ++ ++ go sm.watcherTask(ctx) ++ ++ err = sm.WaitUntilUpdated(ctx) ++ if err != nil { ++ return nil, fmt.Errorf("error initializing the SPIFFE secret manager") ++ } ++ ++ return sm, nil ++} ++ ++// WaitUntilUpdated waits until the secret manager is updated or the context is done, ++// in which case ctx.Err() is returned. ++func (w *SpiffeSecretManager) WaitUntilUpdated(ctx context.Context) error { ++ select { ++ case <-w.updatedCh: ++ return nil ++ case <-ctx.Done(): ++ return ctx.Err() ++ } ++} ++ ++// Updated returns a channel that is sent on whenever the secret manager is updated. ++func (w *SpiffeSecretManager) Updated() <-chan struct{} { ++ return w.updatedCh ++} ++ ++// GenerateSecret generates a SecretItem for the given resourceName (default or ROOTCA). ++func (s *SpiffeSecretManager) GenerateSecret(resourceName string) (*security.SecretItem, error) { ++ s.RLock() ++ defer s.RUnlock() ++ ++ si := s.secretItem ++ if si == nil { ++ return nil, fmt.Errorf("secret was not in cache for resource: %v", resourceName) ++ } ++ ++ if resourceName == security.RootCertReqResourceName { ++ ++ ns := &security.SecretItem{ ++ ResourceName: resourceName, ++ RootCert: si.RootCert, ++ // adding all trust bundles ++ TrustBundles: si.TrustBundles, ++ } ++ ++ cacheLog.WithLabels("ttl", time.Until(si.ExpireTime)).Info("returned workload trust anchor from cache") ++ return ns, nil ++ } ++ ++ ns := &security.SecretItem{ ++ ResourceName: resourceName, ++ CertificateChain: si.CertificateChain, ++ PrivateKey: si.PrivateKey, ++ ExpireTime: si.ExpireTime, ++ CreatedTime: si.CreatedTime, ++ } ++ cacheLog.WithLabels("ttl", time.Until(si.ExpireTime)).Info("returned workload certificate from cache") ++ return ns, nil ++} ++ ++// UpdateConfigTrustBundle updates the configTrustBundle and calls the notify callback function. ++func (s *SpiffeSecretManager) UpdateConfigTrustBundle(trustBundle []byte) error { ++ log.WithLabels("UpdateConfigTrustBundle").Info(string(trustBundle)) ++ s.Lock() ++ defer s.Unlock() ++ ++ if bytes.Equal(s.configTrustBundle, trustBundle) { ++ return nil ++ } ++ s.configTrustBundle = trustBundle ++ s.callUpdateCallback(security.RootCertReqResourceName) ++ return nil ++} ++ ++// Close closes the SPIFFE secret manager instance. ++func (s *SpiffeSecretManager) Close() { ++ if s.cancelWatcher != nil { ++ log.Info("closing SPIFFE secret manager") ++ s.cancelWatcher() ++ } ++} ++ ++// SetUpdateCallback configures the manager with a notify callback function. ++func (s *SpiffeSecretManager) SetUpdateCallback(f func(resourceName string)) { ++ s.Lock() ++ defer s.Unlock() ++ s.notifyCallback = f ++} ++ ++// OnX509ContextUpdate is run every time a new update is pushed by the SPIFFE Workload API. ++func (s *SpiffeSecretManager) OnX509ContextUpdate(c *workloadapi.X509Context) { ++ log.Info("got new identities from the SPIFFE Workload API") ++ if len(c.SVIDs) < 1 { ++ log.Error("identities were not found on workload API response") ++ return ++ } ++ if len(c.SVIDs[0].Certificates) < 1 { ++ log.Error("leaf certificate was not found on workload API response") ++ return ++ } ++ ++ svid := c.DefaultSVID() ++ workloadChain, workloadKey, err := svid.Marshal() ++ if err != nil { ++ log.Fatalf("unable to marshal X.509 SVID: %v", err) ++ return ++ } ++ ++ bundle, ok := c.Bundles.Get(s.trustDomain) ++ if !ok { ++ log.WithLabels("trust_domain", s.trustDomain).Fatal("unable to get trust bundle for trust domain") ++ return ++ } ++ ++ root, err := bundle.Marshal() ++ if err != nil { ++ log.Fatalf("unable to marshal trust bundle: %v", err) ++ return ++ } ++ ++ certChain := concatCerts([]string{string(workloadChain)}) ++ leaf := c.SVIDs[0].Certificates[0] ++ ++ item := &security.SecretItem{ ++ CertificateChain: certChain, ++ PrivateKey: workloadKey, ++ RootCert: root, ++ TrustBundles: c.Bundles, ++ ResourceName: security.WorkloadKeyCertResourceName, ++ CreatedTime: leaf.NotBefore, ++ ExpireTime: leaf.NotAfter, ++ } ++ ++ s.Lock() ++ defer s.Unlock() ++ ++ if s.secretItem == nil || !bytes.Equal(s.secretItem.RootCert, item.RootCert) { ++ s.callUpdateCallback(security.RootCertReqResourceName) ++ } ++ if s.secretItem == nil || !bytes.Equal(s.secretItem.CertificateChain, item.CertificateChain) { ++ s.callUpdateCallback(security.WorkloadKeyCertResourceName) ++ } ++ s.secretItem = item ++ ++ select { ++ case s.updatedCh <- struct{}{}: ++ log.Info("notify message sent on updateCh") ++ default: ++ log.Info("notify message dropped") ++ } ++} ++ ++// OnX509ContextWatchError is run when the client runs into an error. ++func (s *SpiffeSecretManager) OnX509ContextWatchError(err error) { ++ if status.Code(err) != codes.Canceled { ++ log.Infof("error calling SPIFE Workload API: %v", err) ++ } ++} ++ ++func (s *SpiffeSecretManager) callUpdateCallback(resourceName string) { ++ log.WithLabels("resource", resourceName).Info("fetched new identity from SPIFFE Workload API") ++ if s.notifyCallback != nil { ++ s.notifyCallback(resourceName) ++ } ++} ++ ++func (s *SpiffeSecretManager) watcherTask(ctx context.Context) { ++ err := workloadapi.WatchX509Context(ctx, s) ++ if err != nil && status.Code(err) != codes.Canceled { ++ log.Fatalf("error watching SPIFFE workload API: %v", err) ++ } ++} diff --git a/POC/patches/poc.release-1.12.patch b/POC/patches/poc.release-1.12.patch new file mode 100644 index 00000000..d0753424 --- /dev/null +++ b/POC/patches/poc.release-1.12.patch @@ -0,0 +1,621 @@ +diff --git a/pilot/pkg/bootstrap/server.go b/pilot/pkg/bootstrap/server.go +--- a/pilot/pkg/bootstrap/server.go (revision c999d8a6bb03b6c814e9169ab9676d19e261a8fc) ++++ b/pilot/pkg/bootstrap/server.go (date 1636569704605) +@@ -28,6 +28,9 @@ + "sync" + "time" + ++ "github.com/spiffe/go-spiffe/v2/spiffeid" ++ "github.com/spiffe/go-spiffe/v2/workloadapi" ++ + "github.com/fsnotify/fsnotify" + prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + prom "github.com/prometheus/client_golang/prometheus" +@@ -184,6 +187,9 @@ + statusReporter *status.Reporter + // RWConfigStore is the configstore which allows updates, particularly for status. + RWConfigStore model.ConfigStoreCache ++ ++ // source of X.509 certs and bundle, when using SPIFFE Workload API as cert provider ++ x509Source *workloadapi.X509Source + } + + // NewServer creates a new Server instance based on the provided arguments. +@@ -216,6 +222,16 @@ + for _, fn := range initFuncs { + fn(s) + } ++ ++ if strings.EqualFold(features.PilotCertProvider, constants.CertProviderSpiffe) { ++ ctx := context.Background() ++ x509Source, err := workloadapi.NewX509Source(ctx) ++ if err != nil { ++ return nil, fmt.Errorf("failed creating spiffe X.509 source: %v", err) ++ } ++ s.x509Source = x509Source ++ } ++ + // Initialize workload Trust Bundle before XDS Server + e.TrustBundle = s.workloadTrustBundle + s.XDSServer = xds.NewDiscoveryServer(e, args.Plugins, args.PodName, args.Namespace, args.RegistryOptions.KubeOptions.ClusterAliases) +@@ -991,6 +1007,20 @@ + if err == nil { + err = s.initIstiodCertLoader() + } ++ } else if strings.EqualFold(features.PilotCertProvider, constants.CertProviderSpiffe) { ++ chain, key, err := s.getCertAndKeyBytes() ++ if err != nil { ++ return err ++ } ++ ++ bundleBytes, err := s.getBundleBytes() ++ if err != nil { ++ return err ++ } ++ ++ s.setIstioCertBundleAndNotify(chain, key, bundleBytes) ++ s.watchIstioCertUpdates() ++ return nil + } + + return err +@@ -998,7 +1028,7 @@ + + // createPeerCertVerifier creates a SPIFFE certificate verifier with the current istiod configuration. + func (s *Server) createPeerCertVerifier(tlsOptions TLSOptions) (*spiffe.PeerCertVerifier, error) { +- if tlsOptions.CaCertFile == "" && s.CA == nil && features.SpiffeBundleEndpoints == "" && !s.isDisableCa() { ++ if tlsOptions.CaCertFile == "" && s.CA == nil && features.SpiffeBundleEndpoints == "" && !s.isDisableCa() && strings.ToLower(features.PilotCertProvider) != constants.CertProviderSpiffe { + // Running locally without configured certs - no TLS mode + return nil, nil + } +@@ -1034,6 +1064,18 @@ + peerCertVerifier.AddMappings(certMap) + } + ++ if strings.EqualFold(features.PilotCertProvider, constants.CertProviderSpiffe) { ++ bundleBytes, err := s.getBundleBytes() ++ if err != nil { ++ return nil, err ++ } ++ ++ err = peerCertVerifier.AddMappingFromPEM(spiffe.GetTrustDomain(), bundleBytes) ++ if err != nil { ++ return nil, fmt.Errorf("add root CAs into peerCertVerifier failed: %v", err) ++ } ++ } ++ + return peerCertVerifier, nil + } + +@@ -1251,3 +1293,57 @@ + func (s *Server) isDisableCa() bool { + return features.PilotCertProvider == constants.CertProviderKubernetes && s.RA != nil + } ++ ++func (s *Server) watchIstioCertUpdates() { ++ go func() { ++ updatedChan := s.x509Source.Updated() ++ for { ++ <-updatedChan ++ chain, key, err := s.getCertAndKeyBytes() ++ if err != nil { ++ log.Errorf("error watching SPIFFE updates: %v", err) ++ continue ++ } ++ ++ bundleBytes, err := s.getBundleBytes() ++ if err != nil { ++ log.Errorf("error watching SPIFFE updates: %v", err) ++ continue ++ } ++ ++ s.setIstioCertBundleAndNotify(chain, key, bundleBytes) ++ } ++ }() ++} ++ ++func (s *Server) getCertAndKeyBytes() ([]byte, []byte, error) { ++ svid, err := s.x509Source.GetX509SVID() ++ if err != nil { ++ return nil, nil, fmt.Errorf("failed fetching X.509 SVID: %v", err) ++ } ++ ++ chain, key, err := svid.Marshal() ++ if err != nil { ++ return nil, nil, fmt.Errorf("unable to marshal X.509 SVID: %v", err) ++ } ++ ++ return chain, key, nil ++} ++ ++func (s *Server) getBundleBytes() ([]byte, error) { ++ trustDomain, err := spiffeid.TrustDomainFromString(spiffe.GetTrustDomain()) ++ if err != nil { ++ return nil, fmt.Errorf("error trying to parse trust domain %q reason: %v", spiffe.GetTrustDomain(), err) ++ } ++ ++ bundle, err := s.x509Source.GetX509BundleForTrustDomain(trustDomain) ++ if err != nil { ++ return nil, fmt.Errorf("unable to find X.509 bundle for trust domain %q: %v", trustDomain, err) ++ } ++ ++ bundleBytes, err := bundle.Marshal() ++ if err != nil { ++ return nil, fmt.Errorf("unable to marshal X.509 byndle: %v", err) ++ } ++ return bundleBytes, nil ++} +diff --git a/pkg/istio-agent/xds_proxy.go b/pkg/istio-agent/xds_proxy.go +--- a/pkg/istio-agent/xds_proxy.go (revision c999d8a6bb03b6c814e9169ab9676d19e261a8fc) ++++ b/pkg/istio-agent/xds_proxy.go (date 1636569704595) +@@ -20,6 +20,7 @@ + "crypto/x509" + "encoding/json" + "fmt" ++ "istio.io/istio/pkg/security" + "math" + "net" + "net/http" +@@ -704,6 +705,21 @@ + var certPool *x509.CertPool + var rootCert []byte + ++ if strings.EqualFold(agent.secOpts.CAProviderName, constants.CertProviderSpiffe) { ++ secretItem, err := agent.secretCache.GenerateSecret(security.RootCertReqResourceName) ++ if err != nil { ++ return nil, fmt.Errorf("failed to create TLS dial option with root certificates: %v", err) ++ } ++ ++ certPool = x509.NewCertPool() ++ ok := certPool.AppendCertsFromPEM(secretItem.RootCert) ++ if !ok { ++ return nil, fmt.Errorf("failed to create TLS dial option with root certificates") ++ } ++ ++ return certPool, nil ++ } ++ + xdsCACertPath, err := agent.FindRootCAForXDS() + if err != nil { + return nil, fmt.Errorf("failed to find root CA cert for XDS: %v", err) +diff --git a/pkg/istio-agent/agent.go b/pkg/istio-agent/agent.go +--- a/pkg/istio-agent/agent.go (revision c999d8a6bb03b6c814e9169ab9676d19e261a8fc) ++++ b/pkg/istio-agent/agent.go (date 1636569972515) +@@ -109,7 +109,7 @@ + envoyWaitCh chan error + + sdsServer *sds.Server +- secretCache *cache.SecretManagerClient ++ secretCache security.SecretProvider + + // Used when proxying envoy xds via istio-agent is enabled. + xdsProxy *XdsProxy +@@ -552,6 +552,9 @@ + if a.cfg.XDSRootCerts == security.SystemRootCerts { + // Special case input for root cert configuration to use system root certificates + return "", nil ++ } else if strings.EqualFold(a.secOpts.CAProviderName, constants.CertProviderSpiffe) { ++ // The root cert is provided by the SPIFFE secret manager ++ return "", nil + } else if a.cfg.XDSRootCerts != "" { + // Using specific platform certs or custom roots + rootCAPath = a.cfg.XDSRootCerts +@@ -632,7 +635,12 @@ + } + + // newSecretManager creates the SecretManager for workload secrets +-func (a *Agent) newSecretManager() (*cache.SecretManagerClient, error) { ++func (a *Agent) newSecretManager() (security.SecretProvider, error) { ++ if strings.EqualFold(a.secOpts.CAProviderName, constants.CertProviderSpiffe) { ++ log.Info("Using SPIFFE identity plane") ++ return cache.NewSpiffeSecretManager(a.secOpts) ++ } ++ + // If proxy is using file mounted certs, we do not have to connect to CA. + if a.secOpts.FileMountedCerts { + log.Info("Workload is using file mounted certificates. Skipping connecting to CA") +diff --git a/pkg/config/constants/constants.go b/pkg/config/constants/constants.go +--- a/pkg/config/constants/constants.go (revision c999d8a6bb03b6c814e9169ab9676d19e261a8fc) ++++ b/pkg/config/constants/constants.go (date 1636569704597) +@@ -137,4 +137,6 @@ + // CertProviderNone does not create any certificates for the control plane. It is assumed that some external + // load balancer, such as an Istio Gateway, is terminating the TLS. + CertProviderNone = "none" ++ // CertProviderSpiffe uses the SPIFFE Workload API to fetch certificates ++ CertProviderSpiffe = "spiffe" + ) +diff --git a/go.mod b/go.mod +--- a/go.mod (revision c999d8a6bb03b6c814e9169ab9676d19e261a8fc) ++++ b/go.mod (date 1636569434653) +@@ -68,6 +68,7 @@ + github.com/spf13/cobra v1.2.1 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.8.1 ++ github.com/spiffe/go-spiffe/v2 v2.0.0-beta.10 + github.com/stretchr/testify v1.7.0 + github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect +diff --git a/pilot/pkg/bootstrap/certcontroller.go b/pilot/pkg/bootstrap/certcontroller.go +--- a/pilot/pkg/bootstrap/certcontroller.go (revision c999d8a6bb03b6c814e9169ab9676d19e261a8fc) ++++ b/pilot/pkg/bootstrap/certcontroller.go (date 1636569434653) +@@ -279,3 +279,32 @@ + s.certMu.Unlock() + return nil + } ++ ++func (s *Server) setIstioCertBundleAndNotify(certChain []byte, key []byte, bundle []byte) { ++ keyPair, err := tls.X509KeyPair(certChain, key) ++ ++ if err != nil { ++ log.Errorf("istiod loading x509 key pairs failed: %v", err) ++ return ++ } ++ for _, c := range keyPair.Certificate { ++ x509Cert, err := x509.ParseCertificates(c) ++ if err != nil { ++ log.Errorf("x509 cert - ParseCertificates() error: %v", err) ++ return ++ } ++ for _, c := range x509Cert { ++ log.Infof("x509 cert - Issuer: %q, Subject: %q, SN: %x, NotBefore: %q, NotAfter: %q", ++ c.Issuer, c.Subject, c.SerialNumber, ++ c.NotBefore.Format(time.RFC3339), c.NotAfter.Format(time.RFC3339)) ++ } ++ } ++ ++ s.certMu.Lock() ++ s.istiodCert = &keyPair ++ s.certMu.Unlock() ++ log.Info("istiod certificates are set") ++ ++ s.istiodCertBundleWatcher.SetAndNotify(nil, nil, bundle) ++ log.Info("istiod Cert Bundle Watcher notified") ++} +diff --git a/pkg/security/security.go b/pkg/security/security.go +--- a/pkg/security/security.go (revision c999d8a6bb03b6c814e9169ab9676d19e261a8fc) ++++ b/pkg/security/security.go (date 1636569434653) +@@ -24,6 +24,7 @@ + + "google.golang.org/grpc/metadata" + ++ "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" + "istio.io/pkg/env" + istiolog "istio.io/pkg/log" + ) +@@ -285,6 +286,13 @@ + GenerateSecret(resourceName string) (*SecretItem, error) + } + ++type SecretProvider interface { ++ SecretManager ++ Close() ++ SetUpdateCallback(func(string)) ++ UpdateConfigTrustBundle([]byte) error ++} ++ + // TokenExchanger provides common interfaces so that authentication providers could choose to implement their specific logic. + type TokenExchanger interface { + // ExchangeToken provides a common interface to exchange an existing token for a new one. +@@ -298,6 +306,9 @@ + + RootCert []byte + ++ // Trust bundles keyed by trust domain ++ TrustBundles *x509bundle.Set ++ + // ResourceName passed from envoy SDS discovery request. + // "ROOTCA" for root cert request, "default" for key/cert request. + ResourceName string +diff --git a/security/pkg/nodeagent/sds/sdsservice.go b/security/pkg/nodeagent/sds/sdsservice.go +--- a/security/pkg/nodeagent/sds/sdsservice.go (revision c999d8a6bb03b6c814e9169ab9676d19e261a8fc) ++++ b/security/pkg/nodeagent/sds/sdsservice.go (date 1636569434654) +@@ -18,6 +18,7 @@ + import ( + "context" + "fmt" ++ "google.golang.org/protobuf/types/known/anypb" + "time" + + "github.com/cenkalti/backoff/v4" +@@ -148,7 +149,11 @@ + return nil, fmt.Errorf("failed to generate secret for %v: %v", resourceName, err) + } + +- res := util.MessageToAny(toEnvoySecret(secret, s.rootCaPath)) ++ envoySecret, err := toEnvoySecret(secret, s.rootCaPath) ++ if err != nil { ++ return nil, fmt.Errorf("failed to generate secret for %v: %v", resourceName, err) ++ } ++ res := util.MessageToAny(envoySecret) + resources = append(resources, &discovery.Resource{ + Name: resourceName, + Resource: res, +@@ -203,7 +208,7 @@ + } + + // toEnvoySecret converts a security.SecretItem to an Envoy tls.Secret +-func toEnvoySecret(s *security.SecretItem, caRootPath string) *tls.Secret { ++func toEnvoySecret(s *security.SecretItem, caRootPath string) (*tls.Secret, error) { + secret := &tls.Secret{ + Name: s.ResourceName, + } +@@ -215,6 +220,15 @@ + cfg, ok = security.SdsCertificateConfigFromResourceName(s.ResourceName) + } + if s.ResourceName == security.RootCertReqResourceName || (ok && cfg.IsRootCertificate()) { ++ // are there federated bundles? ++ if s.TrustBundles != nil && s.TrustBundles.Len() > 1 { ++ validatorConfig, err := buildSPIFFECertValidatorConfig(s) ++ if err != nil { ++ return nil, err ++ } ++ secret.Type = validatorConfig ++ return secret, nil ++ } + secret.Type = &tls.Secret_ValidationContext{ + ValidationContext: &tls.CertificateValidationContext{ + TrustedCa: &core.DataSource{ +@@ -241,7 +255,7 @@ + } + } + +- return secret ++ return secret, nil + } + + func pushLog(names []string) model.XdsLogDetails { +@@ -251,3 +265,32 @@ + } + return model.DefaultXdsLogDetails + } ++ ++func buildSPIFFECertValidatorConfig(s *security.SecretItem) (*tls.Secret_ValidationContext, error) { ++ var configTrustDomains []*tls.SPIFFECertValidatorConfig_TrustDomain ++ ++ for _, bundle := range s.TrustBundles.Bundles() { ++ caBytes, err := bundle.Marshal() ++ if err != nil { ++ return nil, err ++ } ++ configTrustDomains = append(configTrustDomains, &tls.SPIFFECertValidatorConfig_TrustDomain{ ++ Name: bundle.TrustDomain().String(), ++ TrustBundle: &core.DataSource{Specifier: &core.DataSource_InlineBytes{ ++ InlineBytes: caBytes, ++ }}, ++ }) ++ } ++ ++ typedConfig, err := anypb.New(&tls.SPIFFECertValidatorConfig{TrustDomains: configTrustDomains}) ++ if err != nil { ++ return nil, err ++ } ++ ++ return &tls.Secret_ValidationContext{ValidationContext: &tls.CertificateValidationContext{ ++ CustomValidatorConfig: &core.TypedExtensionConfig{ ++ Name: "envoy.tls.cert_validator.spiffe", ++ TypedConfig: typedConfig, ++ }, ++ }}, nil ++} +diff --git a/security/pkg/nodeagent/cache/spiffesecret.go b/security/pkg/nodeagent/cache/spiffesecret.go +new file mode 100644 +--- /dev/null (date 1636569704608) ++++ b/security/pkg/nodeagent/cache/spiffesecret.go (date 1636569704608) +@@ -0,0 +1,217 @@ ++package cache ++ ++import ( ++ "bytes" ++ "fmt" ++ "sync" ++ "time" ++ ++ "github.com/spiffe/go-spiffe/v2/spiffeid" ++ "github.com/spiffe/go-spiffe/v2/workloadapi" ++ "golang.org/x/net/context" ++ "google.golang.org/grpc/codes" ++ "google.golang.org/grpc/status" ++ "istio.io/istio/pkg/security" ++ "istio.io/pkg/log" ++) ++ ++// SpiffeSecretManager is a source of SecretItems (X.509 SVIDs and trust bundles) maintained via the ++// Workload API. Implements the ++type SpiffeSecretManager struct { ++ sync.RWMutex ++ trustDomain spiffeid.TrustDomain ++ configTrustBundle []byte ++ secretItem *security.SecretItem ++ notifyCallback func(resourceName string) ++ cancelWatcher context.CancelFunc ++ updatedCh chan struct{} ++} ++ ++// NewSpiffeSecretManager creates a new SpiffeSecretManager. It blocks until the initial update ++// has been received from the Workload API. ++func NewSpiffeSecretManager(opt *security.Options) (*SpiffeSecretManager, error) { ++ td, err := spiffeid.TrustDomainFromString(opt.TrustDomain) ++ if err != nil { ++ return nil, fmt.Errorf("error trying to parse trust domain %q reason: %v", opt.TrustDomain, err) ++ } ++ ++ sm := &SpiffeSecretManager{ ++ trustDomain: td, ++ } ++ ++ ctx, cancel := context.WithCancel(context.Background()) ++ sm.cancelWatcher = cancel ++ sm.updatedCh = make(chan struct{}) ++ ++ go sm.watcherTask(ctx) ++ ++ err = sm.WaitUntilUpdated(ctx) ++ if err != nil { ++ return nil, fmt.Errorf("error initializing the SPIFFE secret manager") ++ } ++ ++ return sm, nil ++} ++ ++// WaitUntilUpdated waits until the secret manager is updated or the context is done, ++// in which case ctx.Err() is returned. ++func (w *SpiffeSecretManager) WaitUntilUpdated(ctx context.Context) error { ++ select { ++ case <-w.updatedCh: ++ return nil ++ case <-ctx.Done(): ++ return ctx.Err() ++ } ++} ++ ++// Updated returns a channel that is sent on whenever the secret manager is updated. ++func (w *SpiffeSecretManager) Updated() <-chan struct{} { ++ return w.updatedCh ++} ++ ++// GenerateSecret generates a SecretItem for the given resourceName (default or ROOTCA). ++func (s *SpiffeSecretManager) GenerateSecret(resourceName string) (*security.SecretItem, error) { ++ s.RLock() ++ defer s.RUnlock() ++ ++ si := s.secretItem ++ if si == nil { ++ return nil, fmt.Errorf("secret was not in cache for resource: %v", resourceName) ++ } ++ ++ if resourceName == security.RootCertReqResourceName { ++ ++ ns := &security.SecretItem{ ++ ResourceName: resourceName, ++ RootCert: si.RootCert, ++ // adding all trust bundles ++ TrustBundles: si.TrustBundles, ++ } ++ ++ cacheLog.WithLabels("ttl", time.Until(si.ExpireTime)).Info("returned workload trust anchor from cache") ++ return ns, nil ++ } ++ ++ ns := &security.SecretItem{ ++ ResourceName: resourceName, ++ CertificateChain: si.CertificateChain, ++ PrivateKey: si.PrivateKey, ++ ExpireTime: si.ExpireTime, ++ CreatedTime: si.CreatedTime, ++ } ++ cacheLog.WithLabels("ttl", time.Until(si.ExpireTime)).Info("returned workload certificate from cache") ++ return ns, nil ++} ++ ++// UpdateConfigTrustBundle updates the configTrustBundle and calls the notify callback function. ++func (s *SpiffeSecretManager) UpdateConfigTrustBundle(trustBundle []byte) error { ++ log.WithLabels("UpdateConfigTrustBundle").Info(string(trustBundle)) ++ s.Lock() ++ defer s.Unlock() ++ ++ if bytes.Equal(s.configTrustBundle, trustBundle) { ++ return nil ++ } ++ s.configTrustBundle = trustBundle ++ s.callUpdateCallback(security.RootCertReqResourceName) ++ return nil ++} ++ ++// Close closes the SPIFFE secret manager instance. ++func (s *SpiffeSecretManager) Close() { ++ if s.cancelWatcher != nil { ++ log.Info("closing SPIFFE secret manager") ++ s.cancelWatcher() ++ } ++} ++ ++// SetUpdateCallback configures the manager with a notify callback function. ++func (s *SpiffeSecretManager) SetUpdateCallback(f func(resourceName string)) { ++ s.Lock() ++ defer s.Unlock() ++ s.notifyCallback = f ++} ++ ++// OnX509ContextUpdate is run every time a new update is pushed by the SPIFFE Workload API. ++func (s *SpiffeSecretManager) OnX509ContextUpdate(c *workloadapi.X509Context) { ++ log.Info("got new identities from the SPIFFE Workload API") ++ if len(c.SVIDs) < 1 { ++ log.Error("identities were not found on workload API response") ++ return ++ } ++ if len(c.SVIDs[0].Certificates) < 1 { ++ log.Error("leaf certificate was not found on workload API response") ++ return ++ } ++ ++ svid := c.DefaultSVID() ++ workloadChain, workloadKey, err := svid.Marshal() ++ if err != nil { ++ log.Fatalf("unable to marshal X.509 SVID: %v", err) ++ return ++ } ++ ++ bundle, ok := c.Bundles.Get(s.trustDomain) ++ if !ok { ++ log.WithLabels("trust_domain", s.trustDomain).Fatal("unable to get trust bundle for trust domain") ++ return ++ } ++ ++ root, err := bundle.Marshal() ++ if err != nil { ++ log.Fatalf("unable to marshal trust bundle: %v", err) ++ return ++ } ++ ++ certChain := concatCerts([]string{string(workloadChain)}) ++ leaf := c.SVIDs[0].Certificates[0] ++ ++ item := &security.SecretItem{ ++ CertificateChain: certChain, ++ PrivateKey: workloadKey, ++ RootCert: root, ++ TrustBundles: c.Bundles, ++ ResourceName: security.WorkloadKeyCertResourceName, ++ CreatedTime: leaf.NotBefore, ++ ExpireTime: leaf.NotAfter, ++ } ++ ++ s.Lock() ++ defer s.Unlock() ++ ++ if s.secretItem == nil || !bytes.Equal(s.secretItem.RootCert, item.RootCert) { ++ s.callUpdateCallback(security.RootCertReqResourceName) ++ } ++ if s.secretItem == nil || !bytes.Equal(s.secretItem.CertificateChain, item.CertificateChain) { ++ s.callUpdateCallback(security.WorkloadKeyCertResourceName) ++ } ++ s.secretItem = item ++ ++ select { ++ case s.updatedCh <- struct{}{}: ++ log.Info("notify message sent on updateCh") ++ default: ++ log.Info("notify message dropped") ++ } ++} ++ ++// OnX509ContextWatchError is run when the client runs into an error. ++func (s *SpiffeSecretManager) OnX509ContextWatchError(err error) { ++ if status.Code(err) != codes.Canceled { ++ log.Infof("error calling SPIFE Workload API: %v", err) ++ } ++} ++ ++func (s *SpiffeSecretManager) callUpdateCallback(resourceName string) { ++ log.WithLabels("resource", resourceName).Info("fetched new identity from SPIFFE Workload API") ++ if s.notifyCallback != nil { ++ s.notifyCallback(resourceName) ++ } ++} ++ ++func (s *SpiffeSecretManager) watcherTask(ctx context.Context) { ++ err := workloadapi.WatchX509Context(ctx, s) ++ if err != nil && status.Code(err) != codes.Canceled { ++ log.Fatalf("error watching SPIFFE workload API: %v", err) ++ } ++} diff --git a/POC/patches/sds-approach.master-fix.patch b/POC/patches/sds-approach.master-fix.patch new file mode 100644 index 00000000..533895b9 --- /dev/null +++ b/POC/patches/sds-approach.master-fix.patch @@ -0,0 +1,3052 @@ +diff --git a/manifests/charts/gateways/istio-egress/templates/deployment.yaml b/manifests/charts/gateways/istio-egress/templates/deployment.yaml +index 8c71ea1890..fec4323520 100644 +--- a/manifests/charts/gateways/istio-egress/templates/deployment.yaml ++++ b/manifests/charts/gateways/istio-egress/templates/deployment.yaml +@@ -225,6 +225,8 @@ spec: + - name: ISTIO_META_CLUSTER_ID + value: "{{ $.Values.global.multiCluster.clusterName | default `Kubernetes` }}" + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + - name: istio-envoy + mountPath: /etc/istio/proxy + - name: config-volume +@@ -264,7 +266,9 @@ spec: + {{ toYaml $gateway.additionalContainers | indent 8 }} + {{- end }} + volumes: +-{{- if eq .Values.global.pilotCertProvider "istiod" }} ++ - emptyDir: ++ name: workload-identity ++ {{- if eq .Values.global.pilotCertProvider "istiod" }} + - name: istiod-ca-cert + configMap: + name: istio-ca-root-cert +diff --git a/manifests/charts/gateways/istio-ingress/templates/deployment.yaml b/manifests/charts/gateways/istio-ingress/templates/deployment.yaml +index 45d7695a40..64f0173e53 100644 +--- a/manifests/charts/gateways/istio-ingress/templates/deployment.yaml ++++ b/manifests/charts/gateways/istio-ingress/templates/deployment.yaml +@@ -225,6 +225,8 @@ spec: + - name: ISTIO_META_CLUSTER_ID + value: "{{ $.Values.global.multiCluster.clusterName | default `Kubernetes` }}" + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + - name: istio-envoy + mountPath: /etc/istio/proxy + - name: config-volume +@@ -264,6 +266,8 @@ spec: + {{ toYaml $gateway.additionalContainers | indent 8 }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.pilotCertProvider "istiod" }} + - name: istiod-ca-cert + configMap: +diff --git a/manifests/charts/gateways/istio-ingress/templates/injected-deployment.yaml b/manifests/charts/gateways/istio-ingress/templates/injected-deployment.yaml +index 13e75d0e64..b28f6f39a7 100644 +--- a/manifests/charts/gateways/istio-ingress/templates/injected-deployment.yaml ++++ b/manifests/charts/gateways/istio-ingress/templates/injected-deployment.yaml +@@ -102,6 +102,8 @@ spec: + value: {{ $val | quote }} + {{- end }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- range $gateway.secretVolumes }} + - name: {{ .name }} + mountPath: {{ .mountPath | quote }} +@@ -118,6 +120,8 @@ spec: + {{ toYaml $gateway.additionalContainers | indent 8 }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + {{- range $gateway.secretVolumes }} + - name: {{ .name }} + secret: +diff --git a/manifests/charts/istio-control/istio-discovery/files/gateway-injection-template.yaml b/manifests/charts/istio-control/istio-discovery/files/gateway-injection-template.yaml +index d91e17f7df..4f3dc16d55 100644 +--- a/manifests/charts/istio-control/istio-discovery/files/gateway-injection-template.yaml ++++ b/manifests/charts/istio-control/istio-discovery/files/gateway-injection-template.yaml +@@ -131,6 +131,8 @@ spec: + timeoutSeconds: 3 + failureThreshold: {{ .Values.global.proxy.readinessFailureThreshold }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + mountPath: /var/run/secrets/workload-spiffe-credentials +@@ -158,6 +160,8 @@ spec: + - name: istio-podinfo + mountPath: /etc/istio/pod + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + csi: +diff --git a/manifests/charts/istio-control/istio-discovery/files/gen-istio.yaml b/manifests/charts/istio-control/istio-discovery/files/gen-istio.yaml +index c5201dffed..11bf5bd58b 100644 +--- a/manifests/charts/istio-control/istio-discovery/files/gen-istio.yaml ++++ b/manifests/charts/istio-control/istio-discovery/files/gen-istio.yaml +@@ -616,6 +616,8 @@ data: + {{ end }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + csi: +@@ -822,6 +824,8 @@ data: + timeoutSeconds: 3 + failureThreshold: {{ .Values.global.proxy.readinessFailureThreshold }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + mountPath: /var/run/secrets/workload-spiffe-credentials +diff --git a/manifests/charts/istio-control/istio-discovery/files/grpc-agent.yaml b/manifests/charts/istio-control/istio-discovery/files/grpc-agent.yaml +index 182e39b713..824d0573b0 100644 +--- a/manifests/charts/istio-control/istio-discovery/files/grpc-agent.yaml ++++ b/manifests/charts/istio-control/istio-discovery/files/grpc-agent.yaml +@@ -177,6 +177,8 @@ spec: + {{- end }} + {{- end }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.pilotCertProvider "istiod" }} + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert +@@ -199,6 +201,8 @@ spec: + {{ end }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + # UDS channel between istioagent and gRPC client for XDS/SDS + - emptyDir: + medium: Memory +diff --git a/manifests/charts/istio-control/istio-discovery/files/injection-template.yaml b/manifests/charts/istio-control/istio-discovery/files/injection-template.yaml +index dbe7708093..cee0ecf6e6 100644 +--- a/manifests/charts/istio-control/istio-discovery/files/injection-template.yaml ++++ b/manifests/charts/istio-control/istio-discovery/files/injection-template.yaml +@@ -356,6 +356,8 @@ spec: + resources: + {{ template "resources" . }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + mountPath: /var/run/secrets/workload-spiffe-credentials +@@ -398,6 +400,8 @@ spec: + {{ end }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + csi: +diff --git a/manifests/charts/istiod-remote/files/gateway-injection-template.yaml b/manifests/charts/istiod-remote/files/gateway-injection-template.yaml +index d91e17f7df..4f3dc16d55 100644 +--- a/manifests/charts/istiod-remote/files/gateway-injection-template.yaml ++++ b/manifests/charts/istiod-remote/files/gateway-injection-template.yaml +@@ -131,6 +131,8 @@ spec: + timeoutSeconds: 3 + failureThreshold: {{ .Values.global.proxy.readinessFailureThreshold }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + mountPath: /var/run/secrets/workload-spiffe-credentials +@@ -158,6 +160,8 @@ spec: + - name: istio-podinfo + mountPath: /etc/istio/pod + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + csi: +diff --git a/manifests/charts/istiod-remote/files/injection-template.yaml b/manifests/charts/istiod-remote/files/injection-template.yaml +index dbe7708093..cee0ecf6e6 100644 +--- a/manifests/charts/istiod-remote/files/injection-template.yaml ++++ b/manifests/charts/istiod-remote/files/injection-template.yaml +@@ -356,6 +356,8 @@ spec: + resources: + {{ template "resources" . }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + mountPath: /var/run/secrets/workload-spiffe-credentials +@@ -398,6 +400,8 @@ spec: + {{ end }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + csi: +diff --git a/pilot/cmd/pilot-agent/options/security.go b/pilot/cmd/pilot-agent/options/security.go +index 0a15a08ed6..7801a34f8d 100644 +--- a/pilot/cmd/pilot-agent/options/security.go ++++ b/pilot/cmd/pilot-agent/options/security.go +@@ -17,7 +17,6 @@ package options + import ( + "fmt" + "os" +- "path/filepath" + "strings" + + meshconfig "istio.io/api/mesh/v1alpha1" +@@ -40,7 +39,6 @@ func NewSecurityOptions(proxyConfig *meshconfig.ProxyConfig, stsPort int, tokenM + PilotCertProvider: features.PilotCertProvider, + OutputKeyCertToDir: outputKeyCertToDir, + ProvCert: provCert, +- WorkloadUDSPath: filepath.Join(proxyConfig.ConfigPath, "SDS"), + ClusterID: clusterIDVar.Get(), + FileMountedCerts: fileMountedCertsEnv, + WorkloadNamespace: PodNamespaceVar.Get(), +diff --git a/pkg/bootstrap/testdata/all_golden.json b/pkg/bootstrap/testdata/all_golden.json +index 16b371dc57..c58cc1052d 100644 +--- a/pkg/bootstrap/testdata/all_golden.json ++++ b/pkg/bootstrap/testdata/all_golden.json +@@ -349,7 +349,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "/tmp/bootstrap/all/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } +diff --git a/pkg/bootstrap/testdata/auth_golden.json b/pkg/bootstrap/testdata/auth_golden.json +index d468b349eb..6f0c8f41a6 100644 +--- a/pkg/bootstrap/testdata/auth_golden.json ++++ b/pkg/bootstrap/testdata/auth_golden.json +@@ -349,7 +349,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "/tmp/bootstrap/auth/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } +diff --git a/pkg/bootstrap/testdata/authsds_golden.json b/pkg/bootstrap/testdata/authsds_golden.json +index 0bf9437ced..1ff64cfe76 100644 +--- a/pkg/bootstrap/testdata/authsds_golden.json ++++ b/pkg/bootstrap/testdata/authsds_golden.json +@@ -349,7 +349,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "/tmp/bootstrap/authsds/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } +diff --git a/pkg/bootstrap/testdata/default_golden.json b/pkg/bootstrap/testdata/default_golden.json +index 4806f87bfa..8cf41f79d1 100644 +--- a/pkg/bootstrap/testdata/default_golden.json ++++ b/pkg/bootstrap/testdata/default_golden.json +@@ -349,7 +349,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "/tmp/bootstrap/default/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } +diff --git a/pkg/bootstrap/testdata/metrics_no_statsd_golden.json b/pkg/bootstrap/testdata/metrics_no_statsd_golden.json +index 6a63ea0cb0..bfeef41f62 100644 +--- a/pkg/bootstrap/testdata/metrics_no_statsd_golden.json ++++ b/pkg/bootstrap/testdata/metrics_no_statsd_golden.json +@@ -349,7 +349,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "/tmp/bootstrap/metrics_no_statsd/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } +diff --git a/pkg/bootstrap/testdata/running_golden.json b/pkg/bootstrap/testdata/running_golden.json +index 4411247954..c4e33acb08 100644 +--- a/pkg/bootstrap/testdata/running_golden.json ++++ b/pkg/bootstrap/testdata/running_golden.json +@@ -354,7 +354,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "/tmp/bootstrap/running/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } +diff --git a/pkg/bootstrap/testdata/runningsds_golden.json b/pkg/bootstrap/testdata/runningsds_golden.json +index a40a1715b7..6b450061a2 100644 +--- a/pkg/bootstrap/testdata/runningsds_golden.json ++++ b/pkg/bootstrap/testdata/runningsds_golden.json +@@ -354,7 +354,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "/tmp/bootstrap/runningsds/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } +diff --git a/pkg/bootstrap/testdata/stats_inclusion_golden.json b/pkg/bootstrap/testdata/stats_inclusion_golden.json +index b55a100502..6c755d99ec 100644 +--- a/pkg/bootstrap/testdata/stats_inclusion_golden.json ++++ b/pkg/bootstrap/testdata/stats_inclusion_golden.json +@@ -442,7 +442,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "/tmp/bootstrap/stats_inclusion/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } +diff --git a/pkg/bootstrap/testdata/tracing_datadog_golden.json b/pkg/bootstrap/testdata/tracing_datadog_golden.json +index 5e7638c704..da5848ec9f 100644 +--- a/pkg/bootstrap/testdata/tracing_datadog_golden.json ++++ b/pkg/bootstrap/testdata/tracing_datadog_golden.json +@@ -349,7 +349,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "/tmp/bootstrap/tracing_datadog/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } +diff --git a/pkg/bootstrap/testdata/tracing_lightstep_golden.json b/pkg/bootstrap/testdata/tracing_lightstep_golden.json +index 5685281d31..d84e555c0a 100644 +--- a/pkg/bootstrap/testdata/tracing_lightstep_golden.json ++++ b/pkg/bootstrap/testdata/tracing_lightstep_golden.json +@@ -349,7 +349,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "/tmp/bootstrap/tracing_lightstep/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } +diff --git a/pkg/bootstrap/testdata/tracing_opencensusagent_golden.json b/pkg/bootstrap/testdata/tracing_opencensusagent_golden.json +index 1b267e3e81..0f6f4079a0 100644 +--- a/pkg/bootstrap/testdata/tracing_opencensusagent_golden.json ++++ b/pkg/bootstrap/testdata/tracing_opencensusagent_golden.json +@@ -349,7 +349,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "/tmp/bootstrap/tracing_opencensusagent/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } +diff --git a/pkg/bootstrap/testdata/tracing_stackdriver_golden.json b/pkg/bootstrap/testdata/tracing_stackdriver_golden.json +index e34b4445f2..f587c5d650 100644 +--- a/pkg/bootstrap/testdata/tracing_stackdriver_golden.json ++++ b/pkg/bootstrap/testdata/tracing_stackdriver_golden.json +@@ -349,7 +349,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "/tmp/bootstrap/tracing_stackdriver/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } +diff --git a/pkg/bootstrap/testdata/tracing_tls_custom_sni_golden.json b/pkg/bootstrap/testdata/tracing_tls_custom_sni_golden.json +index 7b2acde9cb..10b7b0bb18 100644 +--- a/pkg/bootstrap/testdata/tracing_tls_custom_sni_golden.json ++++ b/pkg/bootstrap/testdata/tracing_tls_custom_sni_golden.json +@@ -349,7 +349,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "/tmp/bootstrap/tracing_tls_custom_sni/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } +diff --git a/pkg/bootstrap/testdata/tracing_tls_golden.json b/pkg/bootstrap/testdata/tracing_tls_golden.json +index 4c0124768f..efb8e0f1d4 100644 +--- a/pkg/bootstrap/testdata/tracing_tls_golden.json ++++ b/pkg/bootstrap/testdata/tracing_tls_golden.json +@@ -349,7 +349,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "/tmp/bootstrap/tracing_tls/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } +diff --git a/pkg/bootstrap/testdata/tracing_zipkin_golden.json b/pkg/bootstrap/testdata/tracing_zipkin_golden.json +index 6fc99b7604..e8096e7560 100644 +--- a/pkg/bootstrap/testdata/tracing_zipkin_golden.json ++++ b/pkg/bootstrap/testdata/tracing_zipkin_golden.json +@@ -349,7 +349,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "/tmp/bootstrap/tracing_zipkin/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } +diff --git a/pkg/bootstrap/testdata/xdsproxy_golden.json b/pkg/bootstrap/testdata/xdsproxy_golden.json +index 1a1c068117..1eef91b094 100644 +--- a/pkg/bootstrap/testdata/xdsproxy_golden.json ++++ b/pkg/bootstrap/testdata/xdsproxy_golden.json +@@ -349,7 +349,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "/tmp/bootstrap/xdsproxy/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } +diff --git a/pkg/config/constants/constants.go b/pkg/config/constants/constants.go +index 77b0956925..4b536b88e7 100644 +--- a/pkg/config/constants/constants.go ++++ b/pkg/config/constants/constants.go +@@ -42,6 +42,21 @@ const ( + // ConfigPathDir config directory for storing envoy json config files. + ConfigPathDir = "./etc/istio/proxy" + ++ // WorkloadIdentityPath is the path to the folder where workload identity materials are placed ++ WorkloadIdentityPath = "./var/run/secrets/workload-identity/" ++ ++ // WorkloadIdentitySocketPath is the path to the Unix Domain Socket for SDS ++ WorkloadIdentitySocketPath = WorkloadIdentityPath + "socket" ++ ++ // WorkloadIdentityCertChainPath is path to an existing workload certificate chain file ++ WorkloadIdentityCertChainPath = WorkloadIdentityPath + "cert-chain.pem" ++ ++ // WorkloadIdentityKeyPath is path to an existing workload key file ++ WorkloadIdentityKeyPath = WorkloadIdentityPath + "key.pem" ++ ++ // WorkloadIdentityRootCertPath is path to an existing workload root certificate file ++ WorkloadIdentityRootCertPath = WorkloadIdentityPath + "root-cert.pem" ++ + // IstioDataDir is the directory to store binary data such as envoy core dump, profile, and downloaded Wasm modules. + IstioDataDir = "/var/lib/istio/data" + +diff --git a/pkg/istio-agent/agent.go b/pkg/istio-agent/agent.go +index b8fc77b9a7..6fcfde9746 100644 +--- a/pkg/istio-agent/agent.go ++++ b/pkg/istio-agent/agent.go +@@ -403,13 +403,33 @@ func (a *Agent) Run(ctx context.Context) (func(), error) { + return nil, fmt.Errorf("failed to start local DNS server: %v", err) + } + +- a.secretCache, err = a.newSecretManager() +- if err != nil { +- return nil, fmt.Errorf("failed to start workload secret manager %v", err) +- } ++ socketExists := socketFileExists(constants.WorkloadIdentitySocketPath) ++ ++ if !socketExists { ++ log.Info("SDS socket not detected, creating own SDS Server") + +- a.sdsServer = sds.NewServer(a.secOpts, a.secretCache) +- a.secretCache.SetUpdateCallback(a.sdsServer.UpdateCallback) ++ if workloadCertFilesExist() { ++ log.Info("workload identity cert files detected, creating secret manager without caClient") ++ a.secOpts.RootCertFilePath = constants.WorkloadIdentityRootCertPath ++ a.secOpts.CertChainFilePath = constants.WorkloadIdentityCertChainPath ++ a.secOpts.KeyFilePath = constants.WorkloadIdentityKeyPath ++ ++ a.secretCache, err = cache.NewSecretManagerClient(nil, a.secOpts) ++ if err != nil { ++ return nil, fmt.Errorf("failed to start workload secret manager %v", err) ++ } ++ } else { ++ log.Info("workload identity cert files not found, create secret manager with caClient") ++ a.secretCache, err = a.newSecretManager() ++ if err != nil { ++ return nil, fmt.Errorf("failed to start workload secret manager %v", err) ++ } ++ } ++ a.sdsServer = sds.NewServer(a.secOpts, a.secretCache) ++ a.secretCache.SetUpdateCallback(a.sdsServer.UpdateCallback) ++ } else { ++ log.Info("SDS socket detected, don't start SDS Server") ++ } + + a.xdsProxy, err = initXdsProxy(a) + if err != nil { +@@ -627,6 +647,20 @@ func fileExists(path string) bool { + return false + } + ++func socketFileExists(path string) bool { ++ if fi, err := os.Stat(path); err == nil && !fi.Mode().IsRegular() { ++ return true ++ } ++ return false ++} ++ ++func workloadCertFilesExist() bool { ++ rootCertExists := fileExists(constants.WorkloadIdentityRootCertPath) ++ certChainExists := fileExists(constants.WorkloadIdentityCertChainPath) ++ keyExists := fileExists(constants.WorkloadIdentityKeyPath) ++ return rootCertExists && certChainExists && keyExists ++} ++ + // FindRootCAForCA Find the root CA to use when connecting to the CA (Istiod or external). + func (a *Agent) FindRootCAForCA() (string, error) { + var rootCAPath string +diff --git a/pkg/istio-agent/agent_test.go b/pkg/istio-agent/agent_test.go +index 56add5d0bf..65ff9059d0 100644 +--- a/pkg/istio-agent/agent_test.go ++++ b/pkg/istio-agent/agent_test.go +@@ -20,6 +20,9 @@ import ( + "crypto/x509" + "encoding/json" + "fmt" ++ "istio.io/istio/pkg/config/constants" ++ "istio.io/istio/security/pkg/nodeagent/cache" ++ "istio.io/istio/security/pkg/nodeagent/sds" + "net" + "os" + "path" +@@ -259,6 +262,47 @@ func TestAgent(t *testing.T) { + return a + }).Check(t, security.WorkloadKeyCertResourceName, security.RootCertReqResourceName) + }) ++ t.Run("External SDS socket", func(t *testing.T) { ++ ++ dir := mktemp() ++ copyCerts(t, dir) ++ ++ secOpts := &security.Options{} ++ secOpts.RootCertFilePath = dir + "/root-cert.pem" ++ secOpts.CertChainFilePath = dir + "/cert-chain.pem" ++ secOpts.KeyFilePath = dir + "/key.pem" ++ ++ secretCache, err := cache.NewSecretManagerClient(nil, secOpts) ++ if err != nil { ++ t.Fatal(err) ++ } ++ defer secretCache.Close() ++ ++ // this SDS Server listens on the fixed socket path serving the certs copied to the temp directory, ++ // and acts as the external SDS Server that the Agent will detect at startup ++ sdsServer := sds.NewServer(secOpts, secretCache) ++ defer sdsServer.Stop() ++ ++ Setup(t).Check(t, security.WorkloadKeyCertResourceName, security.RootCertReqResourceName) ++ ++ t.Cleanup(func() { ++ _ = os.RemoveAll(dir) ++ }) ++ }) ++ t.Run("Workload certificates", func(t *testing.T) { ++ ++ dir := constants.WorkloadIdentityPath ++ if err := os.MkdirAll(dir, 0o755); err != nil { ++ t.Fatal(err) ++ } ++ copyCerts(t, dir) ++ ++ Setup(t).Check(t, security.WorkloadKeyCertResourceName, security.RootCertReqResourceName) ++ ++ t.Cleanup(func() { ++ _ = os.RemoveAll(dir) ++ }) ++ }) + t.Run("VMs", func(t *testing.T) { + // Bootstrap sets up a short lived JWT token and root certificate. The initial run will fetch + // a certificate and write it to disk. This will be used (by mTLS authenticator) for future +@@ -543,7 +587,7 @@ func Setup(t *testing.T, opts ...func(a AgentTest) AgentTest) *AgentTest { + } + ca := setupCa(t, resp.CaAuthenticator) + resp.Security = security.Options{ +- WorkloadUDSPath: filepath.Join(d, "SDS"), ++ WorkloadUDSPath: "./var/run/secrets/workload-identity/socket", + CAEndpoint: ca.URL, + CAProviderName: "Citadel", + TrustDomain: "cluster.local", +@@ -590,7 +634,6 @@ func Setup(t *testing.T, opts ...func(a AgentTest) AgentTest) *AgentTest { + a := NewAgent(&resp.ProxyConfig, &resp.AgentConfig, &resp.Security, envoy.ProxyConfig{TestOnly: !resp.envoyEnable}) + t.Cleanup(a.Close) + ctx, done := context.WithCancel(context.Background()) +- + wait, err := a.Run(ctx) + if err != nil { + t.Fatal(err) +diff --git a/pkg/kube/inject/testdata/inject/auth.non-default-service-account.yaml.injected b/pkg/kube/inject/testdata/inject/auth.non-default-service-account.yaml.injected +index 67714154e7..4d31db6051 100644 +--- a/pkg/kube/inject/testdata/inject/auth.non-default-service-account.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/auth.non-default-service-account.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -182,6 +184,7 @@ spec: + fsGroup: 1337 + serviceAccountName: non-default + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/auth.yaml.injected b/pkg/kube/inject/testdata/inject/auth.yaml.injected +index 4d299834d8..7057c4a22c 100644 +--- a/pkg/kube/inject/testdata/inject/auth.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/auth.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -181,6 +183,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/cronjob.yaml.injected b/pkg/kube/inject/testdata/inject/cronjob.yaml.injected +index 42ed659209..9ea3ef372f 100644 +--- a/pkg/kube/inject/testdata/inject/cronjob.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/cronjob.yaml.injected +@@ -12,7 +12,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + security.istio.io/tlsMode: istio +@@ -121,6 +121,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -176,6 +178,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/daemonset.yaml.injected b/pkg/kube/inject/testdata/inject/daemonset.yaml.injected +index 1a421484c7..41529899b4 100644 +--- a/pkg/kube/inject/testdata/inject/daemonset.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/daemonset.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -125,6 +125,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -179,6 +181,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/deploymentconfig-multi.yaml.injected b/pkg/kube/inject/testdata/inject/deploymentconfig-multi.yaml.injected +index 4de2b3797b..ba8eea9f32 100644 +--- a/pkg/kube/inject/testdata/inject/deploymentconfig-multi.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/deploymentconfig-multi.yaml.injected +@@ -32,7 +32,7 @@ items: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -140,6 +140,8 @@ items: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -194,6 +196,7 @@ items: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/deploymentconfig-with-canonical-service-label.yaml.injected b/pkg/kube/inject/testdata/inject/deploymentconfig-with-canonical-service-label.yaml.injected +index c19a216dc4..bb1a8e3b4e 100644 +--- a/pkg/kube/inject/testdata/inject/deploymentconfig-with-canonical-service-label.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/deploymentconfig-with-canonical-service-label.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -125,6 +125,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -179,6 +181,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/deploymentconfig.yaml.injected b/pkg/kube/inject/testdata/inject/deploymentconfig.yaml.injected +index 380c804799..d51a6aa050 100644 +--- a/pkg/kube/inject/testdata/inject/deploymentconfig.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/deploymentconfig.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -125,6 +125,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -179,6 +181,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/enable-core-dump-annotation.yaml.injected b/pkg/kube/inject/testdata/inject/enable-core-dump-annotation.yaml.injected +index d936ac8284..294e1c1176 100644 +--- a/pkg/kube/inject/testdata/inject/enable-core-dump-annotation.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/enable-core-dump-annotation.yaml.injected +@@ -20,7 +20,7 @@ spec: + prometheus.io/port: "15020" + prometheus.io/scrape: "true" + sidecar.istio.io/enableCoreDump: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init","enable-core-dump"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init","enable-core-dump"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -128,6 +128,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -209,6 +211,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/enable-core-dump.yaml.injected b/pkg/kube/inject/testdata/inject/enable-core-dump.yaml.injected +index 42daf082a8..36b7d7ccd8 100644 +--- a/pkg/kube/inject/testdata/inject/enable-core-dump.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/enable-core-dump.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init","enable-core-dump"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init","enable-core-dump"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -208,6 +210,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/explicit-security-context.yaml.injected b/pkg/kube/inject/testdata/inject/explicit-security-context.yaml.injected +index c8bebc6312..af636f6696 100644 +--- a/pkg/kube/inject/testdata/inject/explicit-security-context.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/explicit-security-context.yaml.injected +@@ -16,7 +16,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -118,6 +118,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -172,6 +174,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/format-duration.yaml.injected b/pkg/kube/inject/testdata/inject/format-duration.yaml.injected +index 161e6a49a2..a02a1ed679 100644 +--- a/pkg/kube/inject/testdata/inject/format-duration.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/format-duration.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -181,6 +183,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/frontend.yaml.injected b/pkg/kube/inject/testdata/inject/frontend.yaml.injected +index b38b8ab764..b1a63bca51 100644 +--- a/pkg/kube/inject/testdata/inject/frontend.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/frontend.yaml.injected +@@ -33,7 +33,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -144,6 +144,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -198,6 +200,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/grpc-agent.yaml.injected b/pkg/kube/inject/testdata/inject/grpc-agent.yaml.injected +index 1cf5cb2688..2805a8bfdb 100644 +--- a/pkg/kube/inject/testdata/inject/grpc-agent.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/grpc-agent.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/scrape: "true" + proxy.istio.io/overrides: '{"containers":[{"name":"traffic","image":"fake.docker.io/google-samples/traffic-go-gke:1.0","resources":{},"readinessProbe":{"httpGet":{"port":80}}}]}' + sidecar.istio.io/rewriteAppHTTPProbers: "false" +- sidecar.istio.io/status: '{"initContainers":null,"containers":["traffic","istio-proxy"],"volumes":["istio-xds","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":null,"containers":["traffic","istio-proxy"],"volumes":["workload-identity","istio-xds","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: grpc +@@ -123,6 +123,8 @@ spec: + cpu: 100m + memory: 128Mi + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -134,6 +136,7 @@ spec: + - mountPath: /etc/istio/pod + name: istio-podinfo + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-xds +diff --git a/pkg/kube/inject/testdata/inject/hello-always.yaml.injected b/pkg/kube/inject/testdata/inject/hello-always.yaml.injected +index f7e068f150..a334c1ae36 100644 +--- a/pkg/kube/inject/testdata/inject/hello-always.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-always.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -128,6 +128,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -183,6 +185,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-cncf-networks.yaml.injected b/pkg/kube/inject/testdata/inject/hello-cncf-networks.yaml.injected +index 5acb34d3d9..92b9685ac3 100644 +--- a/pkg/kube/inject/testdata/inject/hello-cncf-networks.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-cncf-networks.yaml.injected +@@ -21,7 +21,7 @@ spec: + prometheus.io/port: "15020" + prometheus.io/scrape: "true" + sidecar.istio.io/interceptionMode: REDIRECT +- sidecar.istio.io/status: '{"initContainers":["istio-validation"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-validation"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + traffic.sidecar.istio.io/excludeInboundPorts: "15020" + traffic.sidecar.istio.io/includeInboundPorts: '*' + traffic.sidecar.istio.io/includeOutboundIPRanges: '*' +@@ -132,6 +132,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -185,6 +187,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-existing-cncf-networks-json.yaml.injected b/pkg/kube/inject/testdata/inject/hello-existing-cncf-networks-json.yaml.injected +index 7d763c2cc6..bf153e6d36 100644 +--- a/pkg/kube/inject/testdata/inject/hello-existing-cncf-networks-json.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-existing-cncf-networks-json.yaml.injected +@@ -21,7 +21,7 @@ spec: + prometheus.io/port: "15020" + prometheus.io/scrape: "true" + sidecar.istio.io/interceptionMode: REDIRECT +- sidecar.istio.io/status: '{"initContainers":["istio-validation"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-validation"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + traffic.sidecar.istio.io/excludeInboundPorts: "15020" + traffic.sidecar.istio.io/includeInboundPorts: '*' + traffic.sidecar.istio.io/includeOutboundIPRanges: '*' +@@ -132,6 +132,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -185,6 +187,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-existing-cncf-networks.yaml.injected b/pkg/kube/inject/testdata/inject/hello-existing-cncf-networks.yaml.injected +index d2e4be6645..283cff4f8d 100644 +--- a/pkg/kube/inject/testdata/inject/hello-existing-cncf-networks.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-existing-cncf-networks.yaml.injected +@@ -21,7 +21,7 @@ spec: + prometheus.io/port: "15020" + prometheus.io/scrape: "true" + sidecar.istio.io/interceptionMode: REDIRECT +- sidecar.istio.io/status: '{"initContainers":["istio-validation"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-validation"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + traffic.sidecar.istio.io/excludeInboundPorts: "15020" + traffic.sidecar.istio.io/includeInboundPorts: '*' + traffic.sidecar.istio.io/includeOutboundIPRanges: '*' +@@ -132,6 +132,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -185,6 +187,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-image-pull-secret.yaml.injected b/pkg/kube/inject/testdata/inject/hello-image-pull-secret.yaml.injected +index 4d68cbcd13..22e5b6cf12 100644 +--- a/pkg/kube/inject/testdata/inject/hello-image-pull-secret.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-image-pull-secret.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -183,6 +185,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-image-secrets-in-values.yaml.injected b/pkg/kube/inject/testdata/inject/hello-image-secrets-in-values.yaml.injected +index 985b233a37..b6159ce266 100644 +--- a/pkg/kube/inject/testdata/inject/hello-image-secrets-in-values.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-image-secrets-in-values.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":["barSecret"],"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":["barSecret"],"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -183,6 +185,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-mount-mtls-certs.yaml.injected b/pkg/kube/inject/testdata/inject/hello-mount-mtls-certs.yaml.injected +index 982a842094..12d4170967 100644 +--- a/pkg/kube/inject/testdata/inject/hello-mount-mtls-certs.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-mount-mtls-certs.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert","istio-certs"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert","istio-certs"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -184,6 +186,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-mtls-not-ready.yaml.injected b/pkg/kube/inject/testdata/inject/hello-mtls-not-ready.yaml.injected +index ae62446840..db471d46af 100644 +--- a/pkg/kube/inject/testdata/inject/hello-mtls-not-ready.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-mtls-not-ready.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -181,6 +183,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-multi.yaml.injected b/pkg/kube/inject/testdata/inject/hello-multi.yaml.injected +index f2aeb14317..e37ad527c6 100644 +--- a/pkg/kube/inject/testdata/inject/hello-multi.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-multi.yaml.injected +@@ -20,7 +20,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -129,6 +129,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -183,6 +185,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +@@ -231,7 +234,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -340,6 +343,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -394,6 +399,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-multiple-image-secrets.yaml.injected b/pkg/kube/inject/testdata/inject/hello-multiple-image-secrets.yaml.injected +index 11ef382132..9735706a74 100644 +--- a/pkg/kube/inject/testdata/inject/hello-multiple-image-secrets.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-multiple-image-secrets.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":["barSecret"],"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":["barSecret"],"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -184,6 +186,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-namespace.yaml.injected b/pkg/kube/inject/testdata/inject/hello-namespace.yaml.injected +index 42aced6663..ea94261d05 100644 +--- a/pkg/kube/inject/testdata/inject/hello-namespace.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-namespace.yaml.injected +@@ -20,7 +20,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -128,6 +128,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -182,6 +184,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-never.yaml.injected b/pkg/kube/inject/testdata/inject/hello-never.yaml.injected +index 8ebfde83f1..bc741d5ef1 100644 +--- a/pkg/kube/inject/testdata/inject/hello-never.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-never.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -128,6 +128,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -183,6 +185,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-no-seccontext.yaml.injected b/pkg/kube/inject/testdata/inject/hello-no-seccontext.yaml.injected +index 34948d182b..7f882baa7b 100644 +--- a/pkg/kube/inject/testdata/inject/hello-no-seccontext.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-no-seccontext.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -179,6 +181,7 @@ spec: + runAsNonRoot: false + runAsUser: 0 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-probes-noProxyHoldApplication-ProxyConfig.yaml.injected b/pkg/kube/inject/testdata/inject/hello-probes-noProxyHoldApplication-ProxyConfig.yaml.injected +index 04f77fb466..8ea486eec2 100644 +--- a/pkg/kube/inject/testdata/inject/hello-probes-noProxyHoldApplication-ProxyConfig.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-probes-noProxyHoldApplication-ProxyConfig.yaml.injected +@@ -18,7 +18,7 @@ spec: + prometheus.io/port: "15020" + prometheus.io/scrape: "true" + proxy.istio.io/config: '{ "holdApplicationUntilProxyStarts": false }' +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -129,6 +129,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -212,6 +214,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-probes-proxyHoldApplication-ProxyConfig.yaml.injected b/pkg/kube/inject/testdata/inject/hello-probes-proxyHoldApplication-ProxyConfig.yaml.injected +index 3beaa853bf..79ebf5c0d7 100644 +--- a/pkg/kube/inject/testdata/inject/hello-probes-proxyHoldApplication-ProxyConfig.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-probes-proxyHoldApplication-ProxyConfig.yaml.injected +@@ -18,7 +18,7 @@ spec: + prometheus.io/port: "15020" + prometheus.io/scrape: "true" + proxy.istio.io/config: '{ "holdApplicationUntilProxyStarts": true }' +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -129,6 +129,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -212,6 +214,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-probes-with-flag-set-in-annotation.yaml.injected b/pkg/kube/inject/testdata/inject/hello-probes-with-flag-set-in-annotation.yaml.injected +index 39983854dc..1cf386b422 100644 +--- a/pkg/kube/inject/testdata/inject/hello-probes-with-flag-set-in-annotation.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-probes-with-flag-set-in-annotation.yaml.injected +@@ -18,7 +18,7 @@ spec: + prometheus.io/port: "15020" + prometheus.io/scrape: "true" + sidecar.istio.io/rewriteAppHTTPProbers: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -152,6 +152,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -206,6 +208,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-probes-with-flag-unset-in-annotation.yaml.injected b/pkg/kube/inject/testdata/inject/hello-probes-with-flag-unset-in-annotation.yaml.injected +index f5b9f261f8..21f38417ce 100644 +--- a/pkg/kube/inject/testdata/inject/hello-probes-with-flag-unset-in-annotation.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-probes-with-flag-unset-in-annotation.yaml.injected +@@ -18,7 +18,7 @@ spec: + prometheus.io/port: "15020" + prometheus.io/scrape: "true" + sidecar.istio.io/rewriteAppHTTPProbers: "false" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -147,6 +147,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -201,6 +203,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-probes.proxyHoldsApplication.yaml.injected b/pkg/kube/inject/testdata/inject/hello-probes.proxyHoldsApplication.yaml.injected +index 459f8cf58d..a0b359815a 100644 +--- a/pkg/kube/inject/testdata/inject/hello-probes.proxyHoldsApplication.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-probes.proxyHoldsApplication.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -128,6 +128,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -211,6 +213,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-probes.yaml.injected b/pkg/kube/inject/testdata/inject/hello-probes.yaml.injected +index f672231395..a6c9585ed1 100644 +--- a/pkg/kube/inject/testdata/inject/hello-probes.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-probes.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -151,6 +151,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -205,6 +207,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-proxy-override.yaml.injected b/pkg/kube/inject/testdata/inject/hello-proxy-override.yaml.injected +index b5977a21de..024a624439 100644 +--- a/pkg/kube/inject/testdata/inject/hello-proxy-override.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-proxy-override.yaml.injected +@@ -20,7 +20,7 @@ spec: + prometheus.io/port: "15020" + prometheus.io/scrape: "true" + sidecar.istio.io/proxyImage: docker.io/istio/proxy2_debug:unittest +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -128,6 +128,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -182,6 +184,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-readiness.yaml.injected b/pkg/kube/inject/testdata/inject/hello-readiness.yaml.injected +index db73f7e2c0..97253e25da 100644 +--- a/pkg/kube/inject/testdata/inject/hello-readiness.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-readiness.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -133,6 +133,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -187,6 +189,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-template-in-values.yaml.injected b/pkg/kube/inject/testdata/inject/hello-template-in-values.yaml.injected +index 4d299834d8..7057c4a22c 100644 +--- a/pkg/kube/inject/testdata/inject/hello-template-in-values.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-template-in-values.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -181,6 +183,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello-tproxy.yaml.injected b/pkg/kube/inject/testdata/inject/hello-tproxy.yaml.injected +index 612fc0d5b0..4336814bc5 100644 +--- a/pkg/kube/inject/testdata/inject/hello-tproxy.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello-tproxy.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -129,6 +129,8 @@ spec: + runAsNonRoot: false + runAsUser: 0 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -183,6 +185,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello.proxyHoldsApplication.yaml.injected b/pkg/kube/inject/testdata/inject/hello.proxyHoldsApplication.yaml.injected +index 4b588e1488..290a412b37 100644 +--- a/pkg/kube/inject/testdata/inject/hello.proxyHoldsApplication.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello.proxyHoldsApplication.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -187,6 +189,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello.yaml.cni.injected b/pkg/kube/inject/testdata/inject/hello.yaml.cni.injected +index 463bddc8cb..b8ea1b1a23 100644 +--- a/pkg/kube/inject/testdata/inject/hello.yaml.cni.injected ++++ b/pkg/kube/inject/testdata/inject/hello.yaml.cni.injected +@@ -20,7 +20,7 @@ spec: + prometheus.io/port: "15020" + prometheus.io/scrape: "true" + sidecar.istio.io/interceptionMode: REDIRECT +- sidecar.istio.io/status: '{"initContainers":["istio-validation"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-validation"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + traffic.sidecar.istio.io/excludeInboundPorts: "15020" + traffic.sidecar.istio.io/includeInboundPorts: '*' + traffic.sidecar.istio.io/includeOutboundIPRanges: '*' +@@ -134,6 +134,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -187,6 +189,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello.yaml.injected b/pkg/kube/inject/testdata/inject/hello.yaml.injected +index 4d299834d8..7057c4a22c 100644 +--- a/pkg/kube/inject/testdata/inject/hello.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/hello.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -181,6 +183,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/hello.yaml.proxyImageName.injected b/pkg/kube/inject/testdata/inject/hello.yaml.proxyImageName.injected +index a0118b952c..96e4c7a237 100644 +--- a/pkg/kube/inject/testdata/inject/hello.yaml.proxyImageName.injected ++++ b/pkg/kube/inject/testdata/inject/hello.yaml.proxyImageName.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -181,6 +183,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/https-probes.yaml.injected b/pkg/kube/inject/testdata/inject/https-probes.yaml.injected +index d89e9800a8..472ff48fcd 100644 +--- a/pkg/kube/inject/testdata/inject/https-probes.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/https-probes.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -152,6 +152,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -206,6 +208,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/job.yaml.injected b/pkg/kube/inject/testdata/inject/job.yaml.injected +index 43674b26b0..2a498e765e 100644 +--- a/pkg/kube/inject/testdata/inject/job.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/job.yaml.injected +@@ -12,7 +12,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + security.istio.io/tlsMode: istio +@@ -119,6 +119,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -174,6 +176,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/kubevirtInterfaces.yaml.injected b/pkg/kube/inject/testdata/inject/kubevirtInterfaces.yaml.injected +index 3b39eb7e9a..3343932615 100644 +--- a/pkg/kube/inject/testdata/inject/kubevirtInterfaces.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/kubevirtInterfaces.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + traffic.sidecar.istio.io/kubevirtInterfaces: net1 + creationTimestamp: null + labels: +@@ -128,6 +128,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -184,6 +186,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/kubevirtInterfaces_list.yaml.injected b/pkg/kube/inject/testdata/inject/kubevirtInterfaces_list.yaml.injected +index eb205a6862..31df226174 100644 +--- a/pkg/kube/inject/testdata/inject/kubevirtInterfaces_list.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/kubevirtInterfaces_list.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + traffic.sidecar.istio.io/kubevirtInterfaces: net1,net2 + creationTimestamp: null + labels: +@@ -128,6 +128,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -184,6 +186,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/list-frontend.yaml.injected b/pkg/kube/inject/testdata/inject/list-frontend.yaml.injected +index f5a12440d5..0875531bb5 100644 +--- a/pkg/kube/inject/testdata/inject/list-frontend.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/list-frontend.yaml.injected +@@ -34,7 +34,7 @@ items: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -145,6 +145,8 @@ items: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -199,6 +201,7 @@ items: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/list.yaml.injected b/pkg/kube/inject/testdata/inject/list.yaml.injected +index d6669c5de7..6ff36a5497 100644 +--- a/pkg/kube/inject/testdata/inject/list.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/list.yaml.injected +@@ -22,7 +22,7 @@ items: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -131,6 +131,8 @@ items: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -185,6 +187,7 @@ items: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +@@ -232,7 +235,7 @@ items: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -341,6 +344,8 @@ items: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -395,6 +400,7 @@ items: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/multi-container.yaml.injected b/pkg/kube/inject/testdata/inject/multi-container.yaml.injected +index 58333f4bba..e39cd88e04 100644 +--- a/pkg/kube/inject/testdata/inject/multi-container.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/multi-container.yaml.injected +@@ -15,7 +15,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: app +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -181,6 +183,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/multi-init.yaml.injected b/pkg/kube/inject/testdata/inject/multi-init.yaml.injected +index b9bf345d19..e569f1abf6 100644 +--- a/pkg/kube/inject/testdata/inject/multi-init.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/multi-init.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -195,6 +197,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/multiple-templates.yaml.injected b/pkg/kube/inject/testdata/inject/multiple-templates.yaml.injected +index 8d21a6bb5c..e9cf5f3ebc 100644 +--- a/pkg/kube/inject/testdata/inject/multiple-templates.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/multiple-templates.yaml.injected +@@ -18,7 +18,7 @@ spec: + prometheus.io/port: "15020" + prometheus.io/scrape: "true" + proxy.istio.io/overrides: '{"containers":[{"name":"istio-proxy","image":"foo/bar","resources":{}}]}' +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -120,6 +120,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -174,6 +176,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/named_port.yaml.injected b/pkg/kube/inject/testdata/inject/named_port.yaml.injected +index 88840da562..d66678acb8 100644 +--- a/pkg/kube/inject/testdata/inject/named_port.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/named_port.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -133,6 +133,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -187,6 +189,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/one_container.yaml.injected b/pkg/kube/inject/testdata/inject/one_container.yaml.injected +index ea55030225..1b4fc0f20f 100644 +--- a/pkg/kube/inject/testdata/inject/one_container.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/one_container.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -137,6 +137,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -191,6 +193,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/only-proxy-container.yaml.injected b/pkg/kube/inject/testdata/inject/only-proxy-container.yaml.injected +index 6767851ca3..0b8d3565cb 100644 +--- a/pkg/kube/inject/testdata/inject/only-proxy-container.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/only-proxy-container.yaml.injected +@@ -15,7 +15,7 @@ spec: + prometheus.io/port: "15020" + prometheus.io/scrape: "true" + proxy.istio.io/overrides: '{"containers":[{"name":"istio-proxy","resources":{}}]}' +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + istio: ingressgateway +@@ -113,6 +113,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -167,6 +169,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/pod.yaml.injected b/pkg/kube/inject/testdata/inject/pod.yaml.injected +index a03fd5fcb5..cb6af057f4 100644 +--- a/pkg/kube/inject/testdata/inject/pod.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/pod.yaml.injected +@@ -7,7 +7,7 @@ metadata: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + security.istio.io/tlsMode: istio +@@ -113,6 +113,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -167,6 +169,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/proxy-override-args.yaml.injected b/pkg/kube/inject/testdata/inject/proxy-override-args.yaml.injected +index 98ed4769a7..9663b4c222 100644 +--- a/pkg/kube/inject/testdata/inject/proxy-override-args.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/proxy-override-args.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/port: "15020" + prometheus.io/scrape: "true" + proxy.istio.io/overrides: '{"containers":[{"name":"istio-proxy","command":["envoy"],"args":["-c","my-config.yaml"],"resources":{}}]}' +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -114,6 +114,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -168,6 +170,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/proxy-override.yaml.injected b/pkg/kube/inject/testdata/inject/proxy-override.yaml.injected +index 4baba73fd9..f6261204f5 100644 +--- a/pkg/kube/inject/testdata/inject/proxy-override.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/proxy-override.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/port: "15020" + prometheus.io/scrape: "true" + proxy.istio.io/overrides: '{"containers":[{"name":"istio-proxy","resources":{"requests":{"cpu":"123m"}},"volumeMounts":[{"name":"certs","mountPath":"/etc/certs"}],"livenessProbe":{"httpGet":{"path":"/healthz/ready","port":15021},"initialDelaySeconds":10,"timeoutSeconds":3,"periodSeconds":2,"failureThreshold":30},"lifecycle":{"preStop":{"exec":{"command":["sleep","10"]}}},"terminationMessagePath":"/foo/bar","securityContext":{"readOnlyRootFilesystem":false,"allowPrivilegeEscalation":true},"tty":true}],"initContainers":[{"name":"istio-init","image":"fake/custom-image","args":["my","custom","args"],"resources":{}}]}' +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -135,6 +135,8 @@ spec: + terminationMessagePath: /foo/bar + tty: true + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -177,6 +179,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/ready_live.yaml.injected b/pkg/kube/inject/testdata/inject/ready_live.yaml.injected +index f672231395..a6c9585ed1 100644 +--- a/pkg/kube/inject/testdata/inject/ready_live.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/ready_live.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -151,6 +151,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -205,6 +207,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/ready_only.yaml.injected b/pkg/kube/inject/testdata/inject/ready_only.yaml.injected +index e0ea3d093e..808728bc68 100644 +--- a/pkg/kube/inject/testdata/inject/ready_only.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/ready_only.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -133,6 +133,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -187,6 +189,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/replicaset.yaml.injected b/pkg/kube/inject/testdata/inject/replicaset.yaml.injected +index 2aa768339f..0c13fb8000 100644 +--- a/pkg/kube/inject/testdata/inject/replicaset.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/replicaset.yaml.injected +@@ -16,7 +16,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -122,6 +122,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -176,6 +178,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/replicationcontroller.yaml.injected b/pkg/kube/inject/testdata/inject/replicationcontroller.yaml.injected +index e2d1308208..94878b39c7 100644 +--- a/pkg/kube/inject/testdata/inject/replicationcontroller.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/replicationcontroller.yaml.injected +@@ -15,7 +15,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: nginx +@@ -121,6 +121,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -175,6 +177,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/resource_annotations.yaml.injected b/pkg/kube/inject/testdata/inject/resource_annotations.yaml.injected +index 69bd61110c..c875487c03 100644 +--- a/pkg/kube/inject/testdata/inject/resource_annotations.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/resource_annotations.yaml.injected +@@ -21,7 +21,7 @@ spec: + sidecar.istio.io/proxyCPULimit: 1000m + sidecar.istio.io/proxyMemory: 1Gi + sidecar.istio.io/proxyMemoryLimit: 2Gi +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: resource +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -181,6 +183,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/startup_live.yaml.injected b/pkg/kube/inject/testdata/inject/startup_live.yaml.injected +index ebb0533277..409231173e 100644 +--- a/pkg/kube/inject/testdata/inject/startup_live.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/startup_live.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -151,6 +151,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -205,6 +207,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/startup_only.yaml.injected b/pkg/kube/inject/testdata/inject/startup_only.yaml.injected +index b99d964f03..e618514cc4 100644 +--- a/pkg/kube/inject/testdata/inject/startup_only.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/startup_only.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -133,6 +133,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -187,6 +189,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/startup_ready_live.yaml.injected b/pkg/kube/inject/testdata/inject/startup_ready_live.yaml.injected +index 813c5fd4cf..aaab18e46b 100644 +--- a/pkg/kube/inject/testdata/inject/startup_ready_live.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/startup_ready_live.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -159,6 +159,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -213,6 +215,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/statefulset.yaml.injected b/pkg/kube/inject/testdata/inject/statefulset.yaml.injected +index b42813d94d..0903be8212 100644 +--- a/pkg/kube/inject/testdata/inject/statefulset.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/statefulset.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -130,6 +130,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -184,6 +186,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/status_annotations.yaml.injected b/pkg/kube/inject/testdata/inject/status_annotations.yaml.injected +index 23fb621f9d..fbc5739bfb 100644 +--- a/pkg/kube/inject/testdata/inject/status_annotations.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/status_annotations.yaml.injected +@@ -21,7 +21,7 @@ spec: + readiness.status.sidecar.istio.io/failureThreshold: "300" + readiness.status.sidecar.istio.io/initialDelaySeconds: "100" + readiness.status.sidecar.istio.io/periodSeconds: "200" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + status.sidecar.istio.io/port: "123" + creationTimestamp: null + labels: +@@ -128,6 +128,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -182,6 +184,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/status_annotations_zeroport.yaml.injected b/pkg/kube/inject/testdata/inject/status_annotations_zeroport.yaml.injected +index a76ee29dca..93aaf1187f 100644 +--- a/pkg/kube/inject/testdata/inject/status_annotations_zeroport.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/status_annotations_zeroport.yaml.injected +@@ -21,7 +21,7 @@ spec: + readiness.status.sidecar.istio.io/failureThreshold: "300" + readiness.status.sidecar.istio.io/initialDelaySeconds: "100" + readiness.status.sidecar.istio.io/periodSeconds: "200" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + status.sidecar.istio.io/port: "0" + creationTimestamp: null + labels: +@@ -120,6 +120,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -174,6 +176,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/status_params.yaml.injected b/pkg/kube/inject/testdata/inject/status_params.yaml.injected +index a2e885b4de..323e3767fe 100644 +--- a/pkg/kube/inject/testdata/inject/status_params.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/status_params.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: status +@@ -123,6 +123,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -177,6 +179,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/tcp-probes-disabled.yaml.injected b/pkg/kube/inject/testdata/inject/tcp-probes-disabled.yaml.injected +index 4108ec37fa..dece85a23c 100644 +--- a/pkg/kube/inject/testdata/inject/tcp-probes-disabled.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/tcp-probes-disabled.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -133,6 +133,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -187,6 +189,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/tcp-probes.yaml.injected b/pkg/kube/inject/testdata/inject/tcp-probes.yaml.injected +index 853d31c3d4..90f55e0ef2 100644 +--- a/pkg/kube/inject/testdata/inject/tcp-probes.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/tcp-probes.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -137,6 +137,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -191,6 +193,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/traffic-annotations-empty-includes.yaml.injected b/pkg/kube/inject/testdata/inject/traffic-annotations-empty-includes.yaml.injected +index 7a2088d3bf..ca0a160328 100644 +--- a/pkg/kube/inject/testdata/inject/traffic-annotations-empty-includes.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/traffic-annotations-empty-includes.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + traffic.sidecar.istio.io/excludeInboundPorts: 4,5,6 + traffic.sidecar.istio.io/excludeOutboundIPRanges: 10.96.0.2/24,10.96.0.3/24 + traffic.sidecar.istio.io/includeInboundPorts: "" +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -181,6 +183,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/traffic-annotations-wildcards.yaml.injected b/pkg/kube/inject/testdata/inject/traffic-annotations-wildcards.yaml.injected +index 8ce9a2ce93..ac8fc1608f 100644 +--- a/pkg/kube/inject/testdata/inject/traffic-annotations-wildcards.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/traffic-annotations-wildcards.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + traffic.sidecar.istio.io/excludeInboundPorts: 4,5,6 + traffic.sidecar.istio.io/excludeOutboundIPRanges: 10.96.0.2/24,10.96.0.3/24 + traffic.sidecar.istio.io/includeInboundPorts: '*' +@@ -127,6 +127,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -181,6 +183,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/traffic-annotations.yaml.injected b/pkg/kube/inject/testdata/inject/traffic-annotations.yaml.injected +index ef4a0b9b28..7c970f46f1 100644 +--- a/pkg/kube/inject/testdata/inject/traffic-annotations.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/traffic-annotations.yaml.injected +@@ -24,7 +24,7 @@ spec: + FOO: bar + ISTIO_META_TLS_CLIENT_KEY: /etc/identity2/client/keys/client-key.pem + sidecar.istio.io/proxyCPU: 4000m +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + traffic.sidecar.istio.io/excludeInboundPorts: 4,5,6 + traffic.sidecar.istio.io/excludeOutboundIPRanges: 10.96.0.2/24,10.96.0.3/24 + traffic.sidecar.istio.io/excludeOutboundPorts: 7,8,9 +@@ -135,6 +135,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -192,6 +194,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/traffic-params-empty-includes.yaml.injected b/pkg/kube/inject/testdata/inject/traffic-params-empty-includes.yaml.injected +index 43d0a98416..2bba484418 100644 +--- a/pkg/kube/inject/testdata/inject/traffic-params-empty-includes.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/traffic-params-empty-includes.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: traffic +@@ -123,6 +123,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -177,6 +179,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/traffic-params.yaml.injected b/pkg/kube/inject/testdata/inject/traffic-params.yaml.injected +index a455ef9ba8..51ea36277e 100644 +--- a/pkg/kube/inject/testdata/inject/traffic-params.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/traffic-params.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: traffic +@@ -115,6 +115,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -169,6 +171,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/two_container.yaml.injected b/pkg/kube/inject/testdata/inject/two_container.yaml.injected +index 5d77be618f..babaedb106 100644 +--- a/pkg/kube/inject/testdata/inject/two_container.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/two_container.yaml.injected +@@ -17,7 +17,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert"],"imagePullSecrets":null,"revision":"default"}' + creationTimestamp: null + labels: + app: hello +@@ -142,6 +142,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -196,6 +198,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/pkg/kube/inject/testdata/inject/user-volume.yaml.injected b/pkg/kube/inject/testdata/inject/user-volume.yaml.injected +index 9b35146f93..f71bbb1ba2 100644 +--- a/pkg/kube/inject/testdata/inject/user-volume.yaml.injected ++++ b/pkg/kube/inject/testdata/inject/user-volume.yaml.injected +@@ -19,7 +19,7 @@ spec: + prometheus.io/path: /stats/prometheus + prometheus.io/port: "15020" + prometheus.io/scrape: "true" +- sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert","user-volume-1","user-volume-2"],"imagePullSecrets":null,"revision":"default"}' ++ sidecar.istio.io/status: '{"initContainers":["istio-init"],"containers":["istio-proxy"],"volumes":["workload-identity","istio-envoy","istio-data","istio-podinfo","istio-token","istiod-ca-cert","user-volume-1","user-volume-2"],"imagePullSecrets":null,"revision":"default"}' + sidecar.istio.io/userVolume: '{"user-volume-1":{"persistentVolumeClaim":{"claimName":"pvc-claim"}},"user-volume-2":{"configMap":{"name":"configmap-volume","items":[{"key":"some-key","path":"/some-path"}]}}}' + sidecar.istio.io/userVolumeMount: '{"user-volume-1":{"mountPath":"/mnt/volume-1","readOnly":true},"user-volume-2":{"mountPath":"/mnt/volume-2"}}' + creationTimestamp: null +@@ -129,6 +129,8 @@ spec: + runAsNonRoot: true + runAsUser: 1337 + volumeMounts: ++ - mountPath: /var/run/secrets/workload-identity ++ name: workload-identity + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert + - mountPath: /var/lib/istio/data +@@ -188,6 +190,7 @@ spec: + securityContext: + fsGroup: 1337 + volumes: ++ - name: workload-identity + - emptyDir: + medium: Memory + name: istio-envoy +diff --git a/security/pkg/nodeagent/caclient/credentials_test.go b/security/pkg/nodeagent/caclient/credentials_test.go +index dfddc36fda..3bbc1cf6c2 100644 +--- a/security/pkg/nodeagent/caclient/credentials_test.go ++++ b/security/pkg/nodeagent/caclient/credentials_test.go +@@ -58,7 +58,6 @@ func TestGetTokenForXDS(t *testing.T) { + PilotCertProvider: "istiod", + OutputKeyCertToDir: "", + ProvCert: "", +- WorkloadUDSPath: "./etc/istio/proxy/SDS", + ClusterID: "", + FileMountedCerts: false, + WorkloadNamespace: "", +diff --git a/security/pkg/nodeagent/sds/sdsservice_test.go b/security/pkg/nodeagent/sds/sdsservice_test.go +index 7f9346f61d..f84332eec8 100644 +--- a/security/pkg/nodeagent/sds/sdsservice_test.go ++++ b/security/pkg/nodeagent/sds/sdsservice_test.go +@@ -15,6 +15,7 @@ package sds + + import ( + "fmt" ++ "istio.io/istio/pkg/config/constants" + "net" + "strings" + "testing" +@@ -24,7 +25,6 @@ import ( + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +- "k8s.io/apimachinery/pkg/util/uuid" + + "istio.io/istio/pilot/pkg/xds" + "istio.io/istio/pilot/test/xdstest" +@@ -109,9 +109,7 @@ func setupSDS(t *testing.T) *TestServer { + ResourceName: ca2.RootCertReqResourceName, + }) + +- opts := &ca2.Options{ +- WorkloadUDSPath: fmt.Sprintf("/tmp/workload_gotest%s.sock", string(uuid.NewUUID())), +- } ++ opts := &ca2.Options{} + server := NewServer(opts, st) + t.Cleanup(func() { + server.Stop() +@@ -120,7 +118,7 @@ func setupSDS(t *testing.T) *TestServer { + t: t, + server: server, + store: st, +- udsPath: opts.WorkloadUDSPath, ++ udsPath: constants.WorkloadIdentitySocketPath, + } + } + +diff --git a/security/pkg/nodeagent/sds/server.go b/security/pkg/nodeagent/sds/server.go +index ac4e78a0a2..ebbb45b01c 100644 +--- a/security/pkg/nodeagent/sds/server.go ++++ b/security/pkg/nodeagent/sds/server.go +@@ -15,6 +15,7 @@ + package sds + + import ( ++ "istio.io/istio/pkg/config/constants" + "net" + "time" + +@@ -46,7 +47,7 @@ type Server struct { + func NewServer(options *security.Options, workloadSecretCache security.SecretManager) *Server { + s := &Server{stopped: atomic.NewBool(false)} + s.workloadSds = newSDSService(workloadSecretCache, options) +- s.initWorkloadSdsService(options) ++ s.initWorkloadSdsService() + sdsServiceLog.Infof("SDS server for workload certificates started, listening on %q", options.WorkloadUDSPath) + return s + } +@@ -81,12 +82,12 @@ func (s *Server) Stop() { + } + } + +-func (s *Server) initWorkloadSdsService(options *security.Options) { ++func (s *Server) initWorkloadSdsService() { + s.grpcWorkloadServer = grpc.NewServer(s.grpcServerOptions()...) + s.workloadSds.register(s.grpcWorkloadServer) + + var err error +- s.grpcWorkloadListener, err = uds.NewListener(options.WorkloadUDSPath) ++ s.grpcWorkloadListener, err = uds.NewListener(constants.WorkloadIdentitySocketPath) + if err != nil { + sdsServiceLog.Errorf("Failed to set up UDS path: %v", err) + } +@@ -102,7 +103,7 @@ func (s *Server) initWorkloadSdsService(options *security.Options) { + serverOk := true + setUpUdsOK := true + if s.grpcWorkloadListener == nil { +- if s.grpcWorkloadListener, err = uds.NewListener(options.WorkloadUDSPath); err != nil { ++ if s.grpcWorkloadListener, err = uds.NewListener(constants.WorkloadIdentitySocketPath); err != nil { + sdsServiceLog.Errorf("SDS grpc server for workload proxies failed to set up UDS: %v", err) + setUpUdsOK = false + } +diff --git a/security/pkg/nodeagent/test/setup.go b/security/pkg/nodeagent/test/setup.go +index 2f07dcdcad..06e8f7faef 100644 +--- a/security/pkg/nodeagent/test/setup.go ++++ b/security/pkg/nodeagent/test/setup.go +@@ -161,7 +161,6 @@ func (e *Env) StartProxy(t *testing.T) { + // StartSDSServer starts SDS server + func (e *Env) StartSDSServer(t *testing.T) { + serverOptions := &security.Options{ +- WorkloadUDSPath: e.ProxySetup.SDSPath(), + JWTPath: proxyTokenPath, + CAEndpoint: fmt.Sprintf("127.0.0.1:%d", e.ProxySetup.Ports().ExtraPort), + } +diff --git a/tools/packaging/common/envoy_bootstrap.json b/tools/packaging/common/envoy_bootstrap.json +index be5b65b179..bca00764d6 100644 +--- a/tools/packaging/common/envoy_bootstrap.json ++++ b/tools/packaging/common/envoy_bootstrap.json +@@ -245,7 +245,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "{{ .config.ConfigPath }}/SDS" ++ "path": "./var/run/secrets/workload-identity/socket" + } + } + } diff --git a/POC/patches/sds-approach.master.patch b/POC/patches/sds-approach.master.patch new file mode 100644 index 00000000..86817e6a --- /dev/null +++ b/POC/patches/sds-approach.master.patch @@ -0,0 +1,521 @@ +diff --git a/manifests/charts/gateways/istio-egress/templates/deployment.yaml b/manifests/charts/gateways/istio-egress/templates/deployment.yaml +index 8c71ea1890..fec4323520 100644 +--- a/manifests/charts/gateways/istio-egress/templates/deployment.yaml ++++ b/manifests/charts/gateways/istio-egress/templates/deployment.yaml +@@ -225,6 +225,8 @@ spec: + - name: ISTIO_META_CLUSTER_ID + value: "{{ $.Values.global.multiCluster.clusterName | default `Kubernetes` }}" + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + - name: istio-envoy + mountPath: /etc/istio/proxy + - name: config-volume +@@ -264,7 +266,9 @@ spec: + {{ toYaml $gateway.additionalContainers | indent 8 }} + {{- end }} + volumes: +-{{- if eq .Values.global.pilotCertProvider "istiod" }} ++ - emptyDir: ++ name: workload-identity ++ {{- if eq .Values.global.pilotCertProvider "istiod" }} + - name: istiod-ca-cert + configMap: + name: istio-ca-root-cert +diff --git a/manifests/charts/gateways/istio-ingress/templates/deployment.yaml b/manifests/charts/gateways/istio-ingress/templates/deployment.yaml +index 45d7695a40..64f0173e53 100644 +--- a/manifests/charts/gateways/istio-ingress/templates/deployment.yaml ++++ b/manifests/charts/gateways/istio-ingress/templates/deployment.yaml +@@ -225,6 +225,8 @@ spec: + - name: ISTIO_META_CLUSTER_ID + value: "{{ $.Values.global.multiCluster.clusterName | default `Kubernetes` }}" + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + - name: istio-envoy + mountPath: /etc/istio/proxy + - name: config-volume +@@ -264,6 +266,8 @@ spec: + {{ toYaml $gateway.additionalContainers | indent 8 }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.pilotCertProvider "istiod" }} + - name: istiod-ca-cert + configMap: +diff --git a/manifests/charts/gateways/istio-ingress/templates/injected-deployment.yaml b/manifests/charts/gateways/istio-ingress/templates/injected-deployment.yaml +index 1115d18dd5..a0522c072b 100644 +--- a/manifests/charts/gateways/istio-ingress/templates/injected-deployment.yaml ++++ b/manifests/charts/gateways/istio-ingress/templates/injected-deployment.yaml +@@ -102,6 +102,8 @@ spec: + value: {{ $val }} + {{- end }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- range $gateway.secretVolumes }} + - name: {{ .name }} + mountPath: {{ .mountPath | quote }} +@@ -118,6 +120,8 @@ spec: + {{ toYaml $gateway.additionalContainers | indent 8 }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + {{- range $gateway.secretVolumes }} + - name: {{ .name }} + secret: +diff --git a/manifests/charts/istio-control/istio-discovery/files/gateway-injection-template.yaml b/manifests/charts/istio-control/istio-discovery/files/gateway-injection-template.yaml +index d91e17f7df..4f3dc16d55 100644 +--- a/manifests/charts/istio-control/istio-discovery/files/gateway-injection-template.yaml ++++ b/manifests/charts/istio-control/istio-discovery/files/gateway-injection-template.yaml +@@ -131,6 +131,8 @@ spec: + timeoutSeconds: 3 + failureThreshold: {{ .Values.global.proxy.readinessFailureThreshold }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + mountPath: /var/run/secrets/workload-spiffe-credentials +@@ -158,6 +160,8 @@ spec: + - name: istio-podinfo + mountPath: /etc/istio/pod + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + csi: +diff --git a/manifests/charts/istio-control/istio-discovery/files/gen-istio.yaml b/manifests/charts/istio-control/istio-discovery/files/gen-istio.yaml +index c5201dffed..11bf5bd58b 100644 +--- a/manifests/charts/istio-control/istio-discovery/files/gen-istio.yaml ++++ b/manifests/charts/istio-control/istio-discovery/files/gen-istio.yaml +@@ -616,6 +616,8 @@ data: + {{ end }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + csi: +@@ -822,6 +824,8 @@ data: + timeoutSeconds: 3 + failureThreshold: {{ .Values.global.proxy.readinessFailureThreshold }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + mountPath: /var/run/secrets/workload-spiffe-credentials +diff --git a/manifests/charts/istio-control/istio-discovery/files/grpc-agent.yaml b/manifests/charts/istio-control/istio-discovery/files/grpc-agent.yaml +index 182e39b713..824d0573b0 100644 +--- a/manifests/charts/istio-control/istio-discovery/files/grpc-agent.yaml ++++ b/manifests/charts/istio-control/istio-discovery/files/grpc-agent.yaml +@@ -177,6 +177,8 @@ spec: + {{- end }} + {{- end }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.pilotCertProvider "istiod" }} + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert +@@ -199,6 +201,8 @@ spec: + {{ end }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + # UDS channel between istioagent and gRPC client for XDS/SDS + - emptyDir: + medium: Memory +diff --git a/manifests/charts/istio-control/istio-discovery/files/injection-template.yaml b/manifests/charts/istio-control/istio-discovery/files/injection-template.yaml +index dbe7708093..cee0ecf6e6 100644 +--- a/manifests/charts/istio-control/istio-discovery/files/injection-template.yaml ++++ b/manifests/charts/istio-control/istio-discovery/files/injection-template.yaml +@@ -356,6 +356,8 @@ spec: + resources: + {{ template "resources" . }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + mountPath: /var/run/secrets/workload-spiffe-credentials +@@ -398,6 +400,8 @@ spec: + {{ end }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + csi: +diff --git a/manifests/charts/istiod-remote/files/gateway-injection-template.yaml b/manifests/charts/istiod-remote/files/gateway-injection-template.yaml +index d91e17f7df..4f3dc16d55 100644 +--- a/manifests/charts/istiod-remote/files/gateway-injection-template.yaml ++++ b/manifests/charts/istiod-remote/files/gateway-injection-template.yaml +@@ -131,6 +131,8 @@ spec: + timeoutSeconds: 3 + failureThreshold: {{ .Values.global.proxy.readinessFailureThreshold }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + mountPath: /var/run/secrets/workload-spiffe-credentials +@@ -158,6 +160,8 @@ spec: + - name: istio-podinfo + mountPath: /etc/istio/pod + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + csi: +diff --git a/manifests/charts/istiod-remote/files/injection-template.yaml b/manifests/charts/istiod-remote/files/injection-template.yaml +index dbe7708093..cee0ecf6e6 100644 +--- a/manifests/charts/istiod-remote/files/injection-template.yaml ++++ b/manifests/charts/istiod-remote/files/injection-template.yaml +@@ -356,6 +356,8 @@ spec: + resources: + {{ template "resources" . }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + mountPath: /var/run/secrets/workload-spiffe-credentials +@@ -398,6 +400,8 @@ spec: + {{ end }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + csi: +diff --git a/pilot/cmd/pilot-agent/options/security.go b/pilot/cmd/pilot-agent/options/security.go +index 0a15a08ed6..7801a34f8d 100644 +--- a/pilot/cmd/pilot-agent/options/security.go ++++ b/pilot/cmd/pilot-agent/options/security.go +@@ -17,7 +17,6 @@ package options + import ( + "fmt" + "os" +- "path/filepath" + "strings" + + meshconfig "istio.io/api/mesh/v1alpha1" +@@ -40,7 +39,6 @@ func NewSecurityOptions(proxyConfig *meshconfig.ProxyConfig, stsPort int, tokenM + PilotCertProvider: features.PilotCertProvider, + OutputKeyCertToDir: outputKeyCertToDir, + ProvCert: provCert, +- WorkloadUDSPath: filepath.Join(proxyConfig.ConfigPath, "SDS"), + ClusterID: clusterIDVar.Get(), + FileMountedCerts: fileMountedCertsEnv, + WorkloadNamespace: PodNamespaceVar.Get(), +diff --git a/pkg/config/constants/constants.go b/pkg/config/constants/constants.go +index 77b0956925..4b536b88e7 100644 +--- a/pkg/config/constants/constants.go ++++ b/pkg/config/constants/constants.go +@@ -42,6 +42,21 @@ const ( + // ConfigPathDir config directory for storing envoy json config files. + ConfigPathDir = "./etc/istio/proxy" + ++ // WorkloadIdentityPath is the path to the folder where workload identity materials are placed ++ WorkloadIdentityPath = "./var/run/secrets/workload-identity/" ++ ++ // WorkloadIdentitySocketPath is the path to the Unix Domain Socket for SDS ++ WorkloadIdentitySocketPath = WorkloadIdentityPath + "socket" ++ ++ // WorkloadIdentityCertChainPath is path to an existing workload certificate chain file ++ WorkloadIdentityCertChainPath = WorkloadIdentityPath + "cert-chain.pem" ++ ++ // WorkloadIdentityKeyPath is path to an existing workload key file ++ WorkloadIdentityKeyPath = WorkloadIdentityPath + "key.pem" ++ ++ // WorkloadIdentityRootCertPath is path to an existing workload root certificate file ++ WorkloadIdentityRootCertPath = WorkloadIdentityPath + "root-cert.pem" ++ + // IstioDataDir is the directory to store binary data such as envoy core dump, profile, and downloaded Wasm modules. + IstioDataDir = "/var/lib/istio/data" + +diff --git a/pkg/istio-agent/agent.go b/pkg/istio-agent/agent.go +index c4f6cad222..913a0d0827 100644 +--- a/pkg/istio-agent/agent.go ++++ b/pkg/istio-agent/agent.go +@@ -400,13 +400,34 @@ func (a *Agent) Run(ctx context.Context) (func(), error) { + return nil, fmt.Errorf("failed to start local DNS server: %v", err) + } + +- a.secretCache, err = a.newSecretManager() +- if err != nil { +- return nil, fmt.Errorf("failed to start workload secret manager %v", err) +- } ++ socketExists := socketFileExists(constants.WorkloadIdentitySocketPath) ++ ++ if !socketExists { ++ log.Info("SDS socket not detected, creating own SDS Server") ++ ++ if workloadCertFilesExist() { ++ log.Info("workload identity cert files detected, creating secret manager without caClient") ++ a.secOpts.RootCertFilePath = constants.WorkloadIdentityRootCertPath ++ a.secOpts.CertChainFilePath = constants.WorkloadIdentityCertChainPath ++ a.secOpts.KeyFilePath = constants.WorkloadIdentityKeyPath ++ ++ a.secretCache, err = cache.NewSecretManagerClient(nil, a.secOpts) ++ if err != nil { ++ return nil, fmt.Errorf("failed to start workload secret manager %v", err) ++ } ++ } else { ++ log.Info("workload identity cert files not found, create secret manager with caClient") ++ a.secretCache, err = a.newSecretManager() ++ if err != nil { ++ return nil, fmt.Errorf("failed to start workload secret manager %v", err) ++ } ++ } + +- a.sdsServer = sds.NewServer(a.secOpts, a.secretCache) +- a.secretCache.SetUpdateCallback(a.sdsServer.UpdateCallback) ++ a.sdsServer = sds.NewServer(a.secOpts, a.secretCache) ++ a.secretCache.SetUpdateCallback(a.sdsServer.UpdateCallback) ++ } else { ++ log.Info("SDS socket detected, don't start SDS Server") ++ } + + a.xdsProxy, err = initXdsProxy(a) + if err != nil { +@@ -596,6 +617,20 @@ func fileExists(path string) bool { + return false + } + ++func socketFileExists(path string) bool { ++ if fi, err := os.Stat(path); err == nil && !fi.Mode().IsRegular() { ++ return true ++ } ++ return false ++} ++ ++func workloadCertFilesExist() bool { ++ rootCertExists := fileExists(constants.WorkloadIdentityRootCertPath) ++ certChainExists := fileExists(constants.WorkloadIdentityCertChainPath) ++ keyExists := fileExists(constants.WorkloadIdentityKeyPath) ++ return rootCertExists && certChainExists && keyExists ++} ++ + // Find the root CA to use when connecting to the CA (Istiod or external). + func (a *Agent) FindRootCAForCA() (string, error) { + var rootCAPath string +diff --git a/pkg/istio-agent/agent_test.go b/pkg/istio-agent/agent_test.go +index 020104ed3e..d35e500cfe 100644 +--- a/pkg/istio-agent/agent_test.go ++++ b/pkg/istio-agent/agent_test.go +@@ -20,6 +20,9 @@ import ( + "crypto/x509" + "encoding/json" + "fmt" ++ "istio.io/istio/pkg/config/constants" ++ "istio.io/istio/security/pkg/nodeagent/cache" ++ "istio.io/istio/security/pkg/nodeagent/sds" + "net" + "os" + "path" +@@ -259,6 +262,47 @@ func TestAgent(t *testing.T) { + return a + }).Check(t, security.WorkloadKeyCertResourceName, security.RootCertReqResourceName) + }) ++ t.Run("External SDS socket", func(t *testing.T) { ++ ++ dir := mktemp() ++ copyCerts(t, dir) ++ ++ secOpts := &security.Options {} ++ secOpts.RootCertFilePath = dir + "/root-cert.pem" ++ secOpts.CertChainFilePath = dir + "/cert-chain.pem" ++ secOpts.KeyFilePath = dir + "/key.pem" ++ ++ secretCache, err := cache.NewSecretManagerClient(nil, secOpts) ++ if err != nil { ++ t.Fatal(err) ++ } ++ defer secretCache.Close() ++ ++ // this SDS Server listens on the fixed socket path serving the certs copied to the temp directory, ++ // and acts as the external SDS Server that the Agent will detect at startup ++ sdsServer := sds.NewServer(secOpts, secretCache) ++ defer sdsServer.Stop() ++ ++ Setup(t).Check(t, security.WorkloadKeyCertResourceName, security.RootCertReqResourceName) ++ ++ t.Cleanup(func() { ++ _ = os.RemoveAll(dir) ++ }) ++ }) ++ t.Run("Workload certificates", func(t *testing.T) { ++ ++ dir := constants.WorkloadIdentityPath ++ if err := os.MkdirAll(dir, 0o755); err != nil { ++ t.Fatal(err) ++ } ++ copyCerts(t, dir) ++ ++ Setup(t).Check(t, security.WorkloadKeyCertResourceName, security.RootCertReqResourceName) ++ ++ t.Cleanup(func() { ++ _ = os.RemoveAll(dir) ++ }) ++ }) + t.Run("VMs", func(t *testing.T) { + // Bootstrap sets up a short lived JWT token and root certificate. The initial run will fetch + // a certificate and write it to disk. This will be used (by mTLS authenticator) for future +@@ -515,7 +559,6 @@ func Setup(t *testing.T, opts ...func(a AgentTest) AgentTest) *AgentTest { + } + ca := setupCa(t, resp.CaAuthenticator) + resp.Security = security.Options{ +- WorkloadUDSPath: filepath.Join(d, "SDS"), + CAEndpoint: ca.URL, + CAProviderName: "Citadel", + TrustDomain: "cluster.local", +@@ -593,7 +636,7 @@ func (a *AgentTest) Check(t *testing.T, expectedSDS ...string) map[string]*xds.A + sdsStreams := map[string]*xds.AdsTest{} + gotKeys := []string{} + for _, res := range xdstest.ExtractSecretResources(t, resp.Resources) { +- sds := xds.NewSdsTest(t, setupDownstreamConnectionUDS(t, a.Security.WorkloadUDSPath)). ++ sds := xds.NewSdsTest(t, setupDownstreamConnectionUDS(t, constants.WorkloadIdentitySocketPath)). + WithMetadata(meta). + WithTimeout(time.Second * 20) // CSR can be extremely slow with race detection enabled due to 2048 RSA + sds.RequestResponseAck(t, &discovery.DiscoveryRequest{ResourceNames: []string{res}}) +diff --git a/pkg/security/security.go b/pkg/security/security.go +index c1cf9b48f6..2a60e60552 100644 +--- a/pkg/security/security.go ++++ b/pkg/security/security.go +@@ -114,9 +114,6 @@ const ( + // TODO: ProxyConfig should have most of those, and be passed to all components + // (as source of truth) + type Options struct { +- // WorkloadUDSPath is the unix domain socket through which SDS server communicates with workload proxies. +- WorkloadUDSPath string +- + // CAEndpoint is the CA endpoint to which node agent sends CSR request. + CAEndpoint string + +diff --git a/security/pkg/nodeagent/caclient/credentials_test.go b/security/pkg/nodeagent/caclient/credentials_test.go +index dfddc36fda..3bbc1cf6c2 100644 +--- a/security/pkg/nodeagent/caclient/credentials_test.go ++++ b/security/pkg/nodeagent/caclient/credentials_test.go +@@ -58,7 +58,6 @@ func TestGetTokenForXDS(t *testing.T) { + PilotCertProvider: "istiod", + OutputKeyCertToDir: "", + ProvCert: "", +- WorkloadUDSPath: "./etc/istio/proxy/SDS", + ClusterID: "", + FileMountedCerts: false, + WorkloadNamespace: "", +diff --git a/security/pkg/nodeagent/sds/sdsservice_test.go b/security/pkg/nodeagent/sds/sdsservice_test.go +index 7f9346f61d..0e84983aa6 100644 +--- a/security/pkg/nodeagent/sds/sdsservice_test.go ++++ b/security/pkg/nodeagent/sds/sdsservice_test.go +@@ -15,6 +15,7 @@ package sds + + import ( + "fmt" ++ "istio.io/istio/pkg/config/constants" + "net" + "strings" + "testing" +@@ -109,9 +110,7 @@ func setupSDS(t *testing.T) *TestServer { + ResourceName: ca2.RootCertReqResourceName, + }) + +- opts := &ca2.Options{ +- WorkloadUDSPath: fmt.Sprintf("/tmp/workload_gotest%s.sock", string(uuid.NewUUID())), +- } ++ opts := &ca2.Options{} + server := NewServer(opts, st) + t.Cleanup(func() { + server.Stop() +@@ -120,7 +119,7 @@ func setupSDS(t *testing.T) *TestServer { + t: t, + server: server, + store: st, +- udsPath: opts.WorkloadUDSPath, ++ udsPath: constants.WorkloadIdentitySocketPath, + } + } + +diff --git a/security/pkg/nodeagent/sds/server.go b/security/pkg/nodeagent/sds/server.go +index ac4e78a0a2..0ae86bbf9a 100644 +--- a/security/pkg/nodeagent/sds/server.go ++++ b/security/pkg/nodeagent/sds/server.go +@@ -15,6 +15,7 @@ + package sds + + import ( ++ "istio.io/istio/pkg/config/constants" + "net" + "time" + +@@ -46,8 +47,8 @@ type Server struct { + func NewServer(options *security.Options, workloadSecretCache security.SecretManager) *Server { + s := &Server{stopped: atomic.NewBool(false)} + s.workloadSds = newSDSService(workloadSecretCache, options) +- s.initWorkloadSdsService(options) +- sdsServiceLog.Infof("SDS server for workload certificates started, listening on %q", options.WorkloadUDSPath) ++ s.initWorkloadSdsService() ++ sdsServiceLog.Infof("SDS server for workload certificates started, listening on %q", constants.WorkloadIdentitySocketPath) + return s + } + +@@ -81,12 +82,12 @@ func (s *Server) Stop() { + } + } + +-func (s *Server) initWorkloadSdsService(options *security.Options) { ++func (s *Server) initWorkloadSdsService() { + s.grpcWorkloadServer = grpc.NewServer(s.grpcServerOptions()...) + s.workloadSds.register(s.grpcWorkloadServer) + + var err error +- s.grpcWorkloadListener, err = uds.NewListener(options.WorkloadUDSPath) ++ s.grpcWorkloadListener, err = uds.NewListener(constants.WorkloadIdentitySocketPath) + if err != nil { + sdsServiceLog.Errorf("Failed to set up UDS path: %v", err) + } +@@ -102,7 +103,7 @@ func (s *Server) initWorkloadSdsService(options *security.Options) { + serverOk := true + setUpUdsOK := true + if s.grpcWorkloadListener == nil { +- if s.grpcWorkloadListener, err = uds.NewListener(options.WorkloadUDSPath); err != nil { ++ if s.grpcWorkloadListener, err = uds.NewListener(constants.WorkloadIdentitySocketPath); err != nil { + sdsServiceLog.Errorf("SDS grpc server for workload proxies failed to set up UDS: %v", err) + setUpUdsOK = false + } +diff --git a/security/pkg/nodeagent/test/setup.go b/security/pkg/nodeagent/test/setup.go +index 2f07dcdcad..06e8f7faef 100644 +--- a/security/pkg/nodeagent/test/setup.go ++++ b/security/pkg/nodeagent/test/setup.go +@@ -161,7 +161,6 @@ func (e *Env) StartProxy(t *testing.T) { + // StartSDSServer starts SDS server + func (e *Env) StartSDSServer(t *testing.T) { + serverOptions := &security.Options{ +- WorkloadUDSPath: e.ProxySetup.SDSPath(), + JWTPath: proxyTokenPath, + CAEndpoint: fmt.Sprintf("127.0.0.1:%d", e.ProxySetup.Ports().ExtraPort), + } +diff --git a/tools/packaging/common/envoy_bootstrap.json b/tools/packaging/common/envoy_bootstrap.json +index be5b65b179..5d26f1b3dd 100644 +--- a/tools/packaging/common/envoy_bootstrap.json ++++ b/tools/packaging/common/envoy_bootstrap.json +@@ -245,7 +245,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "{{ .config.ConfigPath }}/SDS" ++ "path": "/var/run/secrets/workload-identity/socket" + } + } + } diff --git a/POC/patches/sds-approach.release-1.12.patch b/POC/patches/sds-approach.release-1.12.patch new file mode 100644 index 00000000..0f100cbb --- /dev/null +++ b/POC/patches/sds-approach.release-1.12.patch @@ -0,0 +1,530 @@ +diff --git a/manifests/charts/gateways/istio-egress/templates/deployment.yaml b/manifests/charts/gateways/istio-egress/templates/deployment.yaml +index 8c71ea1890..fec4323520 100644 +--- a/manifests/charts/gateways/istio-egress/templates/deployment.yaml ++++ b/manifests/charts/gateways/istio-egress/templates/deployment.yaml +@@ -225,6 +225,8 @@ spec: + - name: ISTIO_META_CLUSTER_ID + value: "{{ $.Values.global.multiCluster.clusterName | default `Kubernetes` }}" + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + - name: istio-envoy + mountPath: /etc/istio/proxy + - name: config-volume +@@ -264,7 +266,9 @@ spec: + {{ toYaml $gateway.additionalContainers | indent 8 }} + {{- end }} + volumes: +-{{- if eq .Values.global.pilotCertProvider "istiod" }} ++ - emptyDir: ++ name: workload-identity ++ {{- if eq .Values.global.pilotCertProvider "istiod" }} + - name: istiod-ca-cert + configMap: + name: istio-ca-root-cert +diff --git a/manifests/charts/gateways/istio-ingress/templates/deployment.yaml b/manifests/charts/gateways/istio-ingress/templates/deployment.yaml +index 45d7695a40..64f0173e53 100644 +--- a/manifests/charts/gateways/istio-ingress/templates/deployment.yaml ++++ b/manifests/charts/gateways/istio-ingress/templates/deployment.yaml +@@ -225,6 +225,8 @@ spec: + - name: ISTIO_META_CLUSTER_ID + value: "{{ $.Values.global.multiCluster.clusterName | default `Kubernetes` }}" + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + - name: istio-envoy + mountPath: /etc/istio/proxy + - name: config-volume +@@ -264,6 +266,8 @@ spec: + {{ toYaml $gateway.additionalContainers | indent 8 }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.pilotCertProvider "istiod" }} + - name: istiod-ca-cert + configMap: +diff --git a/manifests/charts/gateways/istio-ingress/templates/injected-deployment.yaml b/manifests/charts/gateways/istio-ingress/templates/injected-deployment.yaml +index 1115d18dd5..a0522c072b 100644 +--- a/manifests/charts/gateways/istio-ingress/templates/injected-deployment.yaml ++++ b/manifests/charts/gateways/istio-ingress/templates/injected-deployment.yaml +@@ -102,6 +102,8 @@ spec: + value: {{ $val }} + {{- end }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- range $gateway.secretVolumes }} + - name: {{ .name }} + mountPath: {{ .mountPath | quote }} +@@ -118,6 +120,8 @@ spec: + {{ toYaml $gateway.additionalContainers | indent 8 }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + {{- range $gateway.secretVolumes }} + - name: {{ .name }} + secret: +diff --git a/manifests/charts/istio-control/istio-discovery/files/gateway-injection-template.yaml b/manifests/charts/istio-control/istio-discovery/files/gateway-injection-template.yaml +index 9ce002a5bc..a7657693e8 100644 +--- a/manifests/charts/istio-control/istio-discovery/files/gateway-injection-template.yaml ++++ b/manifests/charts/istio-control/istio-discovery/files/gateway-injection-template.yaml +@@ -131,6 +131,8 @@ spec: + timeoutSeconds: 3 + failureThreshold: {{ .Values.global.proxy.readinessFailureThreshold }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + mountPath: /var/run/secrets/workload-spiffe-credentials +@@ -158,6 +160,8 @@ spec: + - name: istio-podinfo + mountPath: /etc/istio/pod + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + csi: +diff --git a/manifests/charts/istio-control/istio-discovery/files/gen-istio.yaml b/manifests/charts/istio-control/istio-discovery/files/gen-istio.yaml +index 10cb3d9e73..3804ab6b5b 100644 +--- a/manifests/charts/istio-control/istio-discovery/files/gen-istio.yaml ++++ b/manifests/charts/istio-control/istio-discovery/files/gen-istio.yaml +@@ -593,6 +593,8 @@ data: + {{- end }} + {{- end }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + mountPath: /var/run/secrets/workload-spiffe-credentials +@@ -635,6 +637,8 @@ data: + {{ end }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + csi: +diff --git a/manifests/charts/istio-control/istio-discovery/files/grpc-agent.yaml b/manifests/charts/istio-control/istio-discovery/files/grpc-agent.yaml +index 547e03e0cc..0c40313610 100644 +--- a/manifests/charts/istio-control/istio-discovery/files/grpc-agent.yaml ++++ b/manifests/charts/istio-control/istio-discovery/files/grpc-agent.yaml +@@ -174,6 +174,8 @@ spec: + {{- end }} + {{- end }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.pilotCertProvider "istiod" }} + - mountPath: /var/run/secrets/istio + name: istiod-ca-cert +@@ -196,6 +198,8 @@ spec: + {{ end }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + # UDS channel between istioagent and gRPC client for XDS/SDS + - emptyDir: + medium: Memory +diff --git a/manifests/charts/istio-control/istio-discovery/files/injection-template.yaml b/manifests/charts/istio-control/istio-discovery/files/injection-template.yaml +index c1fc20b3ea..aab7f5e7e3 100644 +--- a/manifests/charts/istio-control/istio-discovery/files/injection-template.yaml ++++ b/manifests/charts/istio-control/istio-discovery/files/injection-template.yaml +@@ -375,6 +375,8 @@ spec: + {{- end }} + {{- end }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + mountPath: /var/run/secrets/workload-spiffe-credentials +@@ -417,6 +419,8 @@ spec: + {{ end }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + csi: +diff --git a/manifests/charts/istiod-remote/files/gateway-injection-template.yaml b/manifests/charts/istiod-remote/files/gateway-injection-template.yaml +index 9ce002a5bc..a7657693e8 100644 +--- a/manifests/charts/istiod-remote/files/gateway-injection-template.yaml ++++ b/manifests/charts/istiod-remote/files/gateway-injection-template.yaml +@@ -131,6 +131,8 @@ spec: + timeoutSeconds: 3 + failureThreshold: {{ .Values.global.proxy.readinessFailureThreshold }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + mountPath: /var/run/secrets/workload-spiffe-credentials +@@ -158,6 +160,8 @@ spec: + - name: istio-podinfo + mountPath: /etc/istio/pod + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + csi: +diff --git a/manifests/charts/istiod-remote/files/injection-template.yaml b/manifests/charts/istiod-remote/files/injection-template.yaml +index c1fc20b3ea..aab7f5e7e3 100644 +--- a/manifests/charts/istiod-remote/files/injection-template.yaml ++++ b/manifests/charts/istiod-remote/files/injection-template.yaml +@@ -375,6 +375,8 @@ spec: + {{- end }} + {{- end }} + volumeMounts: ++ - name: workload-identity ++ mountPath: /var/run/secrets/workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + mountPath: /var/run/secrets/workload-spiffe-credentials +@@ -417,6 +419,8 @@ spec: + {{ end }} + {{- end }} + volumes: ++ - emptyDir: ++ name: workload-identity + {{- if eq .Values.global.caName "GkeWorkloadCertificate" }} + - name: gke-workload-certificate + csi: +diff --git a/pilot/cmd/pilot-agent/options/security.go b/pilot/cmd/pilot-agent/options/security.go +index 0a15a08ed6..7801a34f8d 100644 +--- a/pilot/cmd/pilot-agent/options/security.go ++++ b/pilot/cmd/pilot-agent/options/security.go +@@ -17,7 +17,6 @@ package options + import ( + "fmt" + "os" +- "path/filepath" + "strings" + + meshconfig "istio.io/api/mesh/v1alpha1" +@@ -40,7 +39,6 @@ func NewSecurityOptions(proxyConfig *meshconfig.ProxyConfig, stsPort int, tokenM + PilotCertProvider: features.PilotCertProvider, + OutputKeyCertToDir: outputKeyCertToDir, + ProvCert: provCert, +- WorkloadUDSPath: filepath.Join(proxyConfig.ConfigPath, "SDS"), + ClusterID: clusterIDVar.Get(), + FileMountedCerts: fileMountedCertsEnv, + WorkloadNamespace: PodNamespaceVar.Get(), +diff --git a/pkg/config/constants/constants.go b/pkg/config/constants/constants.go +index 0d0da3abb2..cfde466a9d 100644 +--- a/pkg/config/constants/constants.go ++++ b/pkg/config/constants/constants.go +@@ -42,6 +42,21 @@ const ( + // ConfigPathDir config directory for storing envoy json config files. + ConfigPathDir = "./etc/istio/proxy" + ++ // WorkloadIdentityPath is the path to the folder where workload identity materials are placed ++ WorkloadIdentityPath = "./var/run/secrets/workload-identity/" ++ ++ // WorkloadIdentitySocketPath is the path to the Unix Domain Socket for SDS ++ WorkloadIdentitySocketPath = WorkloadIdentityPath + "socket" ++ ++ // WorkloadIdentityCertChainPath is path to an existing workload certificate chain file ++ WorkloadIdentityCertChainPath = WorkloadIdentityPath + "cert-chain.pem" ++ ++ // WorkloadIdentityKeyPath is path to an existing workload key file ++ WorkloadIdentityKeyPath = WorkloadIdentityPath + "key.pem" ++ ++ // WorkloadIdentityRootCertPath is path to an existing workload root certificate file ++ WorkloadIdentityRootCertPath = WorkloadIdentityPath + "root-cert.pem" ++ + // IstioDataDir is the directory to store binary data such as envoy core dump, profile, and downloaded Wasm modules. + IstioDataDir = "/var/lib/istio/data" + +diff --git a/pkg/istio-agent/agent.go b/pkg/istio-agent/agent.go +index cf4ec0cd82..f35952ba0c 100644 +--- a/pkg/istio-agent/agent.go ++++ b/pkg/istio-agent/agent.go +@@ -408,13 +408,34 @@ func (a *Agent) Run(ctx context.Context) (func(), error) { + return nil, fmt.Errorf("failed to start local DNS server: %v", err) + } + +- a.secretCache, err = a.newSecretManager() +- if err != nil { +- return nil, fmt.Errorf("failed to start workload secret manager %v", err) +- } ++ socketExists := socketFileExists(constants.WorkloadIdentitySocketPath) ++ ++ if !socketExists { ++ log.Info("SDS socket not detected, creating own SDS Server") ++ ++ if workloadCertFilesExist() { ++ log.Info("workload identity cert files detected, creating secret manager without caClient") ++ a.secOpts.RootCertFilePath = constants.WorkloadIdentityRootCertPath ++ a.secOpts.CertChainFilePath = constants.WorkloadIdentityCertChainPath ++ a.secOpts.KeyFilePath = constants.WorkloadIdentityKeyPath ++ ++ a.secretCache, err = cache.NewSecretManagerClient(nil, a.secOpts) ++ if err != nil { ++ return nil, fmt.Errorf("failed to start workload secret manager %v", err) ++ } ++ } else { ++ log.Info("workload identity cert files not found, create secret manager with caClient") ++ a.secretCache, err = a.newSecretManager() ++ if err != nil { ++ return nil, fmt.Errorf("failed to start workload secret manager %v", err) ++ } ++ } + +- a.sdsServer = sds.NewServer(a.secOpts, a.secretCache) +- a.secretCache.SetUpdateCallback(a.sdsServer.UpdateCallback) ++ a.sdsServer = sds.NewServer(a.secOpts, a.secretCache) ++ a.secretCache.SetUpdateCallback(a.sdsServer.UpdateCallback) ++ } else { ++ log.Info("SDS socket detected, don't start SDS Server") ++ } + + a.xdsProxy, err = initXdsProxy(a) + if err != nil { +@@ -595,6 +616,20 @@ func fileExists(path string) bool { + return false + } + ++func socketFileExists(path string) bool { ++ if fi, err := os.Stat(path); err == nil && !fi.Mode().IsRegular() { ++ return true ++ } ++ return false ++} ++ ++func workloadCertFilesExist() bool { ++ rootCertExists := fileExists(constants.WorkloadIdentityRootCertPath) ++ certChainExists := fileExists(constants.WorkloadIdentityCertChainPath) ++ keyExists := fileExists(constants.WorkloadIdentityKeyPath) ++ return rootCertExists && certChainExists && keyExists ++} ++ + // Find the root CA to use when connecting to the CA (Istiod or external). + func (a *Agent) FindRootCAForCA() (string, error) { + var rootCAPath string +diff --git a/pkg/istio-agent/agent_test.go b/pkg/istio-agent/agent_test.go +index e3da7b91c9..d116458ddb 100644 +--- a/pkg/istio-agent/agent_test.go ++++ b/pkg/istio-agent/agent_test.go +@@ -20,6 +20,9 @@ import ( + "crypto/x509" + "encoding/json" + "fmt" ++ "istio.io/istio/pkg/config/constants" ++ "istio.io/istio/security/pkg/nodeagent/cache" ++ "istio.io/istio/security/pkg/nodeagent/sds" + "net" + "os" + "path" +@@ -259,6 +262,47 @@ func TestAgent(t *testing.T) { + return a + }).Check(t, security.WorkloadKeyCertResourceName, security.RootCertReqResourceName) + }) ++ t.Run("External SDS socket", func(t *testing.T) { ++ ++ dir := mktemp() ++ copyCerts(t, dir) ++ ++ secOpts := &security.Options {} ++ secOpts.RootCertFilePath = dir + "/root-cert.pem" ++ secOpts.CertChainFilePath = dir + "/cert-chain.pem" ++ secOpts.KeyFilePath = dir + "/key.pem" ++ ++ secretCache, err := cache.NewSecretManagerClient(nil, secOpts) ++ if err != nil { ++ t.Fatal(err) ++ } ++ defer secretCache.Close() ++ ++ // this SDS Server listens on the fixed socket path serving the certs copied to the temp directory, ++ // and acts as the external SDS Server that the Agent will detect at startup ++ sdsServer := sds.NewServer(secOpts, secretCache) ++ defer sdsServer.Stop() ++ ++ Setup(t).Check(t, security.WorkloadKeyCertResourceName, security.RootCertReqResourceName) ++ ++ t.Cleanup(func() { ++ _ = os.RemoveAll(dir) ++ }) ++ }) ++ t.Run("Workload certificates", func(t *testing.T) { ++ ++ dir := constants.WorkloadIdentityPath ++ if err := os.MkdirAll(dir, 0o755); err != nil { ++ t.Fatal(err) ++ } ++ copyCerts(t, dir) ++ ++ Setup(t).Check(t, security.WorkloadKeyCertResourceName, security.RootCertReqResourceName) ++ ++ t.Cleanup(func() { ++ _ = os.RemoveAll(dir) ++ }) ++ }) + t.Run("VMs", func(t *testing.T) { + // Bootstrap sets up a short lived JWT token and root certificate. The initial run will fetch + // a certificate and write it to disk. This will be used (by mTLS authenticator) for future +@@ -515,7 +559,6 @@ func Setup(t *testing.T, opts ...func(a AgentTest) AgentTest) *AgentTest { + } + ca := setupCa(t, resp.CaAuthenticator) + resp.Security = security.Options{ +- WorkloadUDSPath: filepath.Join(d, "SDS"), + CAEndpoint: ca.URL, + CAProviderName: "Citadel", + TrustDomain: "cluster.local", +@@ -593,7 +636,7 @@ func (a *AgentTest) Check(t *testing.T, expectedSDS ...string) map[string]*xds.A + sdsStreams := map[string]*xds.AdsTest{} + gotKeys := []string{} + for _, res := range xdstest.ExtractSecretResources(t, resp.Resources) { +- sds := xds.NewSdsTest(t, setupDownstreamConnectionUDS(t, a.Security.WorkloadUDSPath)). ++ sds := xds.NewSdsTest(t, setupDownstreamConnectionUDS(t, constants.WorkloadIdentitySocketPath)). + WithMetadata(meta). + WithTimeout(time.Second * 20) // CSR can be extremely slow with race detection enabled due to 2048 RSA + sds.RequestResponseAck(t, &discovery.DiscoveryRequest{ResourceNames: []string{res}}) +diff --git a/pkg/security/security.go b/pkg/security/security.go +index c1cf9b48f6..2a60e60552 100644 +--- a/pkg/security/security.go ++++ b/pkg/security/security.go +@@ -114,9 +114,6 @@ const ( + // TODO: ProxyConfig should have most of those, and be passed to all components + // (as source of truth) + type Options struct { +- // WorkloadUDSPath is the unix domain socket through which SDS server communicates with workload proxies. +- WorkloadUDSPath string +- + // CAEndpoint is the CA endpoint to which node agent sends CSR request. + CAEndpoint string + +diff --git a/security/pkg/nodeagent/caclient/credentials_test.go b/security/pkg/nodeagent/caclient/credentials_test.go +index dfddc36fda..3bbc1cf6c2 100644 +--- a/security/pkg/nodeagent/caclient/credentials_test.go ++++ b/security/pkg/nodeagent/caclient/credentials_test.go +@@ -58,7 +58,6 @@ func TestGetTokenForXDS(t *testing.T) { + PilotCertProvider: "istiod", + OutputKeyCertToDir: "", + ProvCert: "", +- WorkloadUDSPath: "./etc/istio/proxy/SDS", + ClusterID: "", + FileMountedCerts: false, + WorkloadNamespace: "", +diff --git a/security/pkg/nodeagent/sds/sdsservice_test.go b/security/pkg/nodeagent/sds/sdsservice_test.go +index f14f02e4a0..1d97005bc9 100644 +--- a/security/pkg/nodeagent/sds/sdsservice_test.go ++++ b/security/pkg/nodeagent/sds/sdsservice_test.go +@@ -15,6 +15,7 @@ package sds + + import ( + "fmt" ++ "istio.io/istio/pkg/config/constants" + "net" + "strings" + "testing" +@@ -23,8 +24,6 @@ import ( + "github.com/google/go-cmp/cmp" + "golang.org/x/net/context" + "google.golang.org/grpc" +- "k8s.io/apimachinery/pkg/util/uuid" +- + "istio.io/istio/pilot/pkg/xds" + "istio.io/istio/pilot/test/xdstest" + ca2 "istio.io/istio/pkg/security" +@@ -108,9 +107,7 @@ func setupSDS(t *testing.T) *TestServer { + ResourceName: ca2.RootCertReqResourceName, + }) + +- opts := &ca2.Options{ +- WorkloadUDSPath: fmt.Sprintf("/tmp/workload_gotest%s.sock", string(uuid.NewUUID())), +- } ++ opts := &ca2.Options{} + server := NewServer(opts, st) + t.Cleanup(func() { + server.Stop() +@@ -119,7 +116,7 @@ func setupSDS(t *testing.T) *TestServer { + t: t, + server: server, + store: st, +- udsPath: opts.WorkloadUDSPath, ++ udsPath: constants.WorkloadIdentitySocketPath, + } + } + +diff --git a/security/pkg/nodeagent/sds/server.go b/security/pkg/nodeagent/sds/server.go +index ac4e78a0a2..0ae86bbf9a 100644 +--- a/security/pkg/nodeagent/sds/server.go ++++ b/security/pkg/nodeagent/sds/server.go +@@ -15,6 +15,7 @@ + package sds + + import ( ++ "istio.io/istio/pkg/config/constants" + "net" + "time" + +@@ -46,8 +47,8 @@ type Server struct { + func NewServer(options *security.Options, workloadSecretCache security.SecretManager) *Server { + s := &Server{stopped: atomic.NewBool(false)} + s.workloadSds = newSDSService(workloadSecretCache, options) +- s.initWorkloadSdsService(options) +- sdsServiceLog.Infof("SDS server for workload certificates started, listening on %q", options.WorkloadUDSPath) ++ s.initWorkloadSdsService() ++ sdsServiceLog.Infof("SDS server for workload certificates started, listening on %q", constants.WorkloadIdentitySocketPath) + return s + } + +@@ -81,12 +82,12 @@ func (s *Server) Stop() { + } + } + +-func (s *Server) initWorkloadSdsService(options *security.Options) { ++func (s *Server) initWorkloadSdsService() { + s.grpcWorkloadServer = grpc.NewServer(s.grpcServerOptions()...) + s.workloadSds.register(s.grpcWorkloadServer) + + var err error +- s.grpcWorkloadListener, err = uds.NewListener(options.WorkloadUDSPath) ++ s.grpcWorkloadListener, err = uds.NewListener(constants.WorkloadIdentitySocketPath) + if err != nil { + sdsServiceLog.Errorf("Failed to set up UDS path: %v", err) + } +@@ -102,7 +103,7 @@ func (s *Server) initWorkloadSdsService(options *security.Options) { + serverOk := true + setUpUdsOK := true + if s.grpcWorkloadListener == nil { +- if s.grpcWorkloadListener, err = uds.NewListener(options.WorkloadUDSPath); err != nil { ++ if s.grpcWorkloadListener, err = uds.NewListener(constants.WorkloadIdentitySocketPath); err != nil { + sdsServiceLog.Errorf("SDS grpc server for workload proxies failed to set up UDS: %v", err) + setUpUdsOK = false + } +diff --git a/security/pkg/nodeagent/test/setup.go b/security/pkg/nodeagent/test/setup.go +index d2c32f0482..d7070381e5 100644 +--- a/security/pkg/nodeagent/test/setup.go ++++ b/security/pkg/nodeagent/test/setup.go +@@ -160,7 +160,6 @@ func (e *Env) StartProxy(t *testing.T) { + // StartSDSServer starts SDS server + func (e *Env) StartSDSServer(t *testing.T) { + serverOptions := &security.Options{ +- WorkloadUDSPath: e.ProxySetup.SDSPath(), + JWTPath: proxyTokenPath, + CAEndpoint: fmt.Sprintf("127.0.0.1:%d", e.ProxySetup.Ports().ExtraPort), + } +diff --git a/tools/packaging/common/envoy_bootstrap.json b/tools/packaging/common/envoy_bootstrap.json +index 293f259e67..b76bc27337 100644 +--- a/tools/packaging/common/envoy_bootstrap.json ++++ b/tools/packaging/common/envoy_bootstrap.json +@@ -245,7 +245,7 @@ + "endpoint": { + "address":{ + "pipe": { +- "path": "{{ .config.ConfigPath }}/SDS" ++ "path": "/var/run/secrets/workload-identity/socket" + } + } + } diff --git a/POC/spire/agent-account.yaml b/POC/spire/agent-account.yaml new file mode 100644 index 00000000..9091404c --- /dev/null +++ b/POC/spire/agent-account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-agent + namespace: spire diff --git a/POC/spire/agent-cluster-role.yaml b/POC/spire/agent-cluster-role.yaml new file mode 100644 index 00000000..8bfe36c5 --- /dev/null +++ b/POC/spire/agent-cluster-role.yaml @@ -0,0 +1,24 @@ +# Required cluster role to allow spire-agent to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role +rules: +- apiGroups: [""] + resources: ["pods","nodes","nodes/proxy"] + verbs: ["get"] + +--- +# Binds above cluster role to spire-agent service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role-binding +subjects: +- kind: ServiceAccount + name: spire-agent + namespace: spire +roleRef: + kind: ClusterRole + name: spire-agent-cluster-role + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/POC/spire/agent-configmap.yaml b/POC/spire/agent-configmap.yaml new file mode 100644 index 00000000..4b7c4f37 --- /dev/null +++ b/POC/spire/agent-configmap.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-agent + namespace: spire +data: + agent.conf: | + agent { + data_dir = "/run/spire" + log_level = "DEBUG" + server_address = "spire-server" + server_port = "8081" + socket_path = "/run/spire/sockets/agent.sock" + trust_bundle_path = "/run/spire/bundle/root-cert.pem" + trust_domain = "example.org" + } + + plugins { + NodeAttestor "k8s_psat" { + plugin_data { + # NOTE: Change this to your cluster name + cluster = "demo-cluster" + } + } + + KeyManager "memory" { + plugin_data { + } + } + + WorkloadAttestor "k8s" { + plugin_data { + # Defaults to the secure kubelet port by default. + # Minikube does not have a cert in the cluster CA bundle that + # can authenticate the kubelet cert, so skip validation. + skip_kubelet_verification = true + } + } + + WorkloadAttestor "unix" { + plugin_data { + } + } + } + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8080" + live_path = "/live" + ready_path = "/ready" + } diff --git a/POC/spire/agent-daemonset.yaml b/POC/spire/agent-daemonset.yaml new file mode 100644 index 00000000..222153b8 --- /dev/null +++ b/POC/spire/agent-daemonset.yaml @@ -0,0 +1,135 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: spire-agent + namespace: spire + labels: + app: spire-agent +spec: + selector: + matchLabels: + app: spire-agent + template: + metadata: + namespace: spire + labels: + app: spire-agent + spec: + hostPID: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: spire-agent + initContainers: + - name: init + # This is a small image with wait-for-it, choose whatever image + # you prefer that waits for a service to be up. This image is built + # from https://github.com/lqhl/wait-for-it + image: gcr.io/spiffe-io/wait-for-it + args: ["-t", "30", "spire-server:8081"] + containers: + - name: spire-agent + image: gcr.io/spiffe-io/spire-agent:1.1.1 + args: ["-config", "/run/spire/config/agent.conf"] + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: spire-bundle + mountPath: /run/spire/bundle + - name: spire-agent-socket-dir + mountPath: /run/spire/sockets + - name: spire-token + mountPath: /var/run/secrets/tokens + livenessProbe: + httpGet: + path: /live + port: 8080 + failureThreshold: 2 + initialDelaySeconds: 15 + periodSeconds: 60 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + # This is the container which runs the SPIFFE CSI driver. + - name: spiffe-csi-driver + image: ghcr.io/spiffe/spiffe-csi-driver:0.1.0 + imagePullPolicy: IfNotPresent + args: [ + "-node-id", "CSI_NODE", + "-workload-api-socket-dir", "/spire-agent-socket", + "-csi-socket-path", "/spiffe-csi/csi.sock", + ] + volumeMounts: + # The volume containing the SPIRE agent socket. The SPIFFE CSI + # driver will mount this directory into containers. + - mountPath: /spire-agent-socket + name: spire-agent-socket-dir + readOnly: true + # The volume that will contain the CSI driver socket shared + # with the kubelet and the driver registrar. + - mountPath: /spiffe-csi + name: spiffe-csi-socket-dir + # The volume containing mount points for containers. + - mountPath: /var/lib/kubelet/pods + mountPropagation: Bidirectional + name: mountpoint-dir + securityContext: + privileged: true + # This container runs the CSI Node Driver Registrar which takes care + # of all the little details required to register a CSI driver with + # the kubelet. + - name: node-driver-registrar + image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0 + imagePullPolicy: IfNotPresent + args: [ + "-csi-address", "/spiffe-csi/csi.sock", + "-kubelet-registration-path", "/var/lib/kubelet/plugins/csi.spiffe.io/csi.sock", + ] + volumeMounts: + # The registrar needs access to the SPIFFE CSI driver socket + - mountPath: /spiffe-csi + name: spiffe-csi-socket-dir + # The registrar needs access to the Kubelet plugin registration + # directory + - name: kubelet-plugin-registration-dir + mountPath: /registration + volumes: + - name: spire-config + configMap: + name: spire-agent + - name: spire-bundle + configMap: + name: trust-bundle + - name: spire-token + projected: + sources: + - serviceAccountToken: + path: spire-agent + expirationSeconds: 7200 + audience: spire-server + # This volume is used to share the workload api socket between the + # CSI driver and SPIRE agent + - name: spire-agent-socket-dir + hostPath: + path: /run/spire/socket-dir + type: DirectoryOrCreate + # This volume is where the socket for kubelet->driver communication lives + - name: spiffe-csi-socket-dir + hostPath: + path: /var/lib/kubelet/plugins/csi.spiffe.io + type: DirectoryOrCreate + # This volume is where the SPIFFE CSI driver mounts volumes + - name: mountpoint-dir + hostPath: + path: /var/lib/kubelet/pods + type: Directory + # This volume is where the node-driver-registrar registers the plugin + # with kubelet + - name: kubelet-plugin-registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory diff --git a/POC/spire/cleanup-spire.sh b/POC/spire/cleanup-spire.sh new file mode 100755 index 00000000..17f6cd9f --- /dev/null +++ b/POC/spire/cleanup-spire.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +kubectl delete CustomResourceDefinition spiffeids.spiffeid.spiffe.io +kubectl delete -f k8s-workload-registrar-crd-configmap.yaml -f k8s-workload-registrar-crd-cluster-role.yaml +kubectl delete clusterrole spire-server-trust-role spire-agent-cluster-role +kubectl delete clusterrolebinding spire-server-trust-role-binding spire-agent-cluster-role-binding +kubectl delete namespace spire diff --git a/POC/spire/deploy-spire.sh b/POC/spire/deploy-spire.sh new file mode 100755 index 00000000..eeebe100 --- /dev/null +++ b/POC/spire/deploy-spire.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +set -e + +# Create the k8s-workload-registrar crd, configmap and associated role bindingsspace +kubectl apply \ + -f k8s-workload-registrar-crd-cluster-role.yaml \ + -f k8s-workload-registrar-crd-configmap.yaml \ + -f spiffeid.spiffe.io_spiffeids.yaml + +# Create the server’s service account, configmap and associated role bindings +kubectl apply \ + -f server-account.yaml \ + -f spire-bundle-configmap.yaml \ + -f server-cluster-role.yaml + +# Deploy the server configmap and statefulset +kubectl apply \ + -f server-configmap.yaml \ + -f server-statefulset.yaml \ + -f server-service.yaml + +# Configuring and deploying the SPIRE Agent +kubectl apply \ + -f agent-account.yaml \ + -f agent-cluster-role.yaml + +sleep 2 + +kubectl apply \ + -f agent-configmap.yaml \ + -f agent-daemonset.yaml + +# Applying SPIFFE CSI Driver configuration +kubectl apply -f spiffe-csi-driver.yaml diff --git a/POC/spire/k8s-workload-registrar-crd-cluster-role.yaml b/POC/spire/k8s-workload-registrar-crd-cluster-role.yaml new file mode 100644 index 00000000..f4982cfa --- /dev/null +++ b/POC/spire/k8s-workload-registrar-crd-cluster-role.yaml @@ -0,0 +1,33 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: k8s-workload-registrar-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: k8s-workload-registrar-role +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: k8s-workload-registrar-role +rules: +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["get", "create"] +- apiGroups: [""] + resources: ["endpoints", "nodes", "pods"] + verbs: ["get", "list", "watch"] +- apiGroups: ["spiffeid.spiffe.io"] + resources: ["spiffeids"] + verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] +- apiGroups: ["spiffeid.spiffe.io"] + resources: ["spiffeids/status"] + verbs: ["get", "patch", "update"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["validatingwebhookconfigurations"] + verbs: ["get", "list", "update", "watch"] diff --git a/POC/spire/k8s-workload-registrar-crd-configmap.yaml b/POC/spire/k8s-workload-registrar-crd-configmap.yaml new file mode 100644 index 00000000..c3a86345 --- /dev/null +++ b/POC/spire/k8s-workload-registrar-crd-configmap.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8s-workload-registrar + namespace: spire +data: + k8s-workload-registrar.conf: | + trust_domain = "example.org" + server_socket_path = "/run/spire/sockets/server.sock" + cluster = "demo-cluster" + mode = "crd" + metrics_bind_addr = "0" diff --git a/POC/spire/kustomization.yaml b/POC/spire/kustomization.yaml new file mode 100644 index 00000000..0a491013 --- /dev/null +++ b/POC/spire/kustomization.yaml @@ -0,0 +1,18 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: spire + +resources: +- spire-namespace.yaml +- agent-account.yaml +- agent-cluster-role.yaml +- agent-configmap.yaml +- agent-daemonset.yaml +- server-account.yaml +- server-cluster-role.yaml +- server-configmap.yaml +- server-service.yaml +- server-statefulset.yaml +- spire-bundle-configmap.yaml + diff --git a/POC/spire/server-account.yaml b/POC/spire/server-account.yaml new file mode 100644 index 00000000..51ad4c5e --- /dev/null +++ b/POC/spire/server-account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-server + namespace: spire diff --git a/POC/spire/server-cluster-role.yaml b/POC/spire/server-cluster-role.yaml new file mode 100644 index 00000000..6f4cbd64 --- /dev/null +++ b/POC/spire/server-cluster-role.yaml @@ -0,0 +1,28 @@ +# ClusterRole to allow spire-server node attestor to query Token Review API +# and to be able to push certificate bundles to a configmap +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-trust-role +rules: +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["patch", "get", "list"] + +--- +# Binds above cluster role to spire-server service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-trust-role-binding +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire +roleRef: + kind: ClusterRole + name: spire-server-trust-role + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/POC/spire/server-configmap.yaml b/POC/spire/server-configmap.yaml new file mode 100644 index 00000000..d3cd7005 --- /dev/null +++ b/POC/spire/server-configmap.yaml @@ -0,0 +1,66 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-server + namespace: spire +data: + server.conf: | + server { + bind_address = "0.0.0.0" + bind_port = "8081" + socket_path = "/run/spire/sockets/server.sock" + trust_domain = "example.org" + data_dir = "/run/spire/data" + log_level = "DEBUG" + ca_key_type = "rsa-2048" + + default_svid_ttl = "1h" + ca_subject = { + country = ["US"], + organization = ["SPIFFE"], + common_name = "", + } + } + + plugins { + DataStore "sql" { + plugin_data { + database_type = "sqlite3" + connection_string = "/run/spire/data/datastore.sqlite3" + } + } + + NodeAttestor "k8s_psat" { + plugin_data { + clusters = { + # NOTE: Change this to your cluster name + "demo-cluster" = { + use_token_review_api_validation = true + service_account_allow_list = ["spire:spire-agent"] + } + } + } + } + + KeyManager "disk" { + plugin_data { + keys_path = "/run/spire/data/keys.json" + } + } + + Notifier "k8sbundle" { + plugin_data { + namespace = "spire" + config_map = "trust-bundle" + config_map_key = "root-cert.pem" + } + } + } + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8080" + live_path = "/live" + ready_path = "/ready" + } diff --git a/POC/spire/server-service.yaml b/POC/spire/server-service.yaml new file mode 100644 index 00000000..fa4df2e9 --- /dev/null +++ b/POC/spire/server-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: spire-server + namespace: spire +spec: + type: NodePort + ports: + - name: grpc + port: 8081 + targetPort: 8081 + protocol: TCP + selector: + app: spire-server diff --git a/POC/spire/server-statefulset.yaml b/POC/spire/server-statefulset.yaml new file mode 100644 index 00000000..8af7d050 --- /dev/null +++ b/POC/spire/server-statefulset.yaml @@ -0,0 +1,90 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: spire-server + namespace: spire + labels: + app: spire-server +spec: + replicas: 1 + selector: + matchLabels: + app: spire-server + serviceName: spire-server + template: + metadata: + namespace: spire + labels: + app: spire-server + spec: + serviceAccountName: spire-server + shareProcessNamespace: true + containers: + - name: spire-server + image: gcr.io/spiffe-io/spire-server:1.1.1 + args: + - -config + - /run/spire/config/server.conf + livenessProbe: + httpGet: + path: /live + port: 8080 + failureThreshold: 2 + initialDelaySeconds: 15 + periodSeconds: 60 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + ports: + - containerPort: 8081 + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: spire-data + mountPath: /run/spire/data + readOnly: false + - name: spire-registration-socket + mountPath: /run/spire/sockets + readOnly: false + - name: k8s-workload-registrar + image: gcr.io/spiffe-io/k8s-workload-registrar:1.1.1 + args: + - -config + - /run/spire/config/k8s-workload-registrar.conf + ports: + - containerPort: 9443 + name: webhook + protocol: TCP + volumeMounts: + - mountPath: /run/spire/config + name: k8s-workload-registrar-config + readOnly: true + - name: spire-registration-socket + mountPath: /run/spire/sockets + readOnly: true + volumes: + - name: spire-config + configMap: + name: spire-server + - name: k8s-workload-registrar-config + configMap: + name: k8s-workload-registrar + - name: spire-registration-socket + hostPath: + path: /run/spire/server-sockets + type: DirectoryOrCreate + volumeClaimTemplates: + - metadata: + name: spire-data + namespace: spire + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/POC/spire/spiffe-csi-driver.yaml b/POC/spire/spiffe-csi-driver.yaml new file mode 100644 index 00000000..e9d07bc5 --- /dev/null +++ b/POC/spire/spiffe-csi-driver.yaml @@ -0,0 +1,20 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: "csi.spiffe.io" +spec: + # Only ephemeral, inline volumes are supported. There is no need for a + # controller to provision and attach volumes. + attachRequired: false + + # Request the pod information which the CSI driver uses to verify that an + # ephemeral mount was requested. + podInfoOnMount: true + + # Don't change ownership on the contents of the mount since the Workload API + # Unix Domain Socket is typically open to all (i.e. 0777). + fsGroupPolicy: None + + # Declare support for ephemeral volumes only. + volumeLifecycleModes: + - Ephemeral diff --git a/POC/spire/spiffeid.spiffe.io_spiffeids.yaml b/POC/spire/spiffeid.spiffe.io_spiffeids.yaml new file mode 100644 index 00000000..94a4fb30 --- /dev/null +++ b/POC/spire/spiffeid.spiffe.io_spiffeids.yaml @@ -0,0 +1,104 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.4 + name: spiffeids.spiffeid.spiffe.io +spec: + group: spiffeid.spiffe.io + names: + kind: SpiffeID + listKind: SpiffeIDList + plural: spiffeids + singular: spiffeid + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: SpiffeID is the Schema for the spiffeid API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: SpiffeIDSpec defines the desired state of SpiffeID + properties: + dnsNames: + items: + type: string + type: array + parentId: + type: string + selector: + properties: + arbitrary: + description: Arbitrary selectors + items: + type: string + type: array + containerImage: + description: Container image to match for this spiffe ID + type: string + containerName: + description: Container name to match for this spiffe ID + type: string + namespace: + description: Namespace to match for this spiffe ID + type: string + nodeName: + description: Node name to match for this spiffe ID + type: string + podLabel: + additionalProperties: + type: string + description: Pod label name/value to match for this spiffe ID + type: object + podName: + description: Pod name to match for this spiffe ID + type: string + podUid: + description: Pod UID to match for this spiffe ID + type: string + serviceAccount: + description: ServiceAccount to match for this spiffe ID + type: string + type: object + spiffeId: + type: string + required: + - parentId + - selector + - spiffeId + type: object + status: + description: SpiffeIDStatus defines the observed state of SpiffeID + properties: + entryId: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: string + type: object + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + \ No newline at end of file diff --git a/POC/spire/spire-bundle-configmap.yaml b/POC/spire/spire-bundle-configmap.yaml new file mode 100644 index 00000000..c9d93983 --- /dev/null +++ b/POC/spire/spire-bundle-configmap.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: trust-bundle + namespace: spire + annotations: + synator/sync: 'yes' + synator/include-namespaces: 'istio-system,default' diff --git a/POC/utils.sh b/POC/utils.sh new file mode 100644 index 00000000..f8171585 --- /dev/null +++ b/POC/utils.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +# Generate Secrets + +# Mint SVIDs for workloads +./spire-server x509 mint -spiffeID spiffe://cluster.local/ns/default/sa/details -ttl 8760h -write /tmp/details +./spire-server x509 mint -spiffeID spiffe://cluster.local/ns/default/sa/productpage -ttl 8760h -write /tmp/productpage +./spire-server x509 mint -spiffeID spiffe://cluster.local/ns/default/sa/ratings -ttl 8760h -write /tmp/ratings +./spire-server x509 mint -spiffeID spiffe://cluster.local/ns/default/sa/reviews -ttl 8760h -write /tmp/reviews + +cat /tmp/details/svid.pem /tmp/details/bundle.pem > /tmp/details/chain.pem +cat /tmp/productpage/svid.pem /tmp/productpage/bundle.pem > /tmp/productpage/chain.pem +cat /tmp/ratings/svid.pem /tmp/ratings/bundle.pem > /tmp/ratings/chain.pem +cat /tmp/reviews/svid.pem /tmp/reviews/bundle.pem > /tmp/reviews/chain.pem + +# Mint SVID for Istio IngressGateway +./spire-server x509 mint -spiffeID spiffe://cluster.local/ns/istio-system/sa/istio-ingressgateway-service-account -ttl 8760h -write /tmp/ingress +cat /tmp/ingress/svid.pem /tmp/ingress/bundle.pem > /tmp/ingress/chain.pem + +# Convert to base64 + +# Convert `pem` file to base64 and copy to clipboard, then paste it in the corresponding place in bookinfo/secrets + +cat chain.pem | base64 | pbcopy + + +# Debug proxy using Envoy admin interface + +# Log into pod in container istio-proxy +kubectl exec --stdin --tty $POD -c istio-proxy -- /bin/bash + +kubectl exec --stdin --tty $POD -c istio-proxy -- curl localhost:15000/config_dump + +# Check Envoy proxy secrets: +curl localhost:15000/certs + +# Check Envoy proxy configuration: +curl localhost:15000/config_dump +kubectl exec --stdin --tty $POD -c istio-proxy -- curl localhost:15000/config_dump > config.json + +# Change logging config to debug: +curl -X POST localhost:15000/logging?level=debug + + +# Port forward to the first istio-ingressgateway pod +alias igpf='kubectl -n istio-system port-forward $(kubectl -n istio-system get pods -listio=ingressgateway -o=jsonpath="{.items[0].metadata.name}") 15000' + +# Get the http routes from the port-forwarded ingressgateway pod (requires jq) +alias iroutes='curl --silent http://localhost:15000/config_dump | jq '\''.configs.routes.dynamic_route_configs[].route_config.virtual_hosts[]| {name: .name, domains: .domains, route: .routes[].match.prefix}'\''' + +# Get the logs of the first istio-ingressgateway pod +# Shows what happens with incoming requests and possible errors +alias igl='kubectl -n istio-system logs $(kubectl -n istio-system get pods -listio=ingressgateway -o=jsonpath="{.items[0].metadata.name}") --tail=300' + +# Get the logs of the first istio-pilot pod +# Shows issues with configurations or connecting to the Envoy proxies +alias ipl='kubectl -n istio-system logs $(kubectl -n istio-system get pods -listio=pilot -o=jsonpath="{.items[0].metadata.name}") discovery --tail=300' + +# Debug services connections +kubectl run --generator=run-pod/v1 -i --tty busybox-curl --image=radial/busyboxplus:curl --restart=Never -- sh + + +# istiod dashboard +istioctl dashboard controlz deployment/istiod.istio-system + + +# SPIRE bundles +## show +kubectl exec --stdin --tty -n spire2 spire-server-0 -- /opt/spire/bin/spire-server bundle show -format spiffe -socketPath /run/spire/sockets/server.sock +## set +kubectl exec --stdin --tty -n spire spire-server-0 -c spire-server -- /opt/spire/bin/spire-server bundle set -format spiffe -id spiffe://domain.test -socketPath /run/spire/sockets/server.sock + +## Mint SVID in domain.test +kubectl exec --stdin --tty -n spire2 spire-server-0 -- /opt/spire/bin/spire-server x509 mint -spiffeID spiffe://domain.test/myservice -socketPath /run/spire/sockets/server.sock + +## curl with TLS +curl --cert svid.pem --key key.pem -k -I https://localhost:7000/productpage diff --git a/README.md b/README.md index f18b137c..6ce5da81 100644 --- a/README.md +++ b/README.md @@ -1 +1,8 @@ -# Mithril +# istio-spire + +- [Jenkins pipeline setup](./ci-build.md) +- [Development](./development.md) +- [POC](./POC/README.md) +- [POC Instructions](./POC/doc/poc-instructions.md) +- [AWS Policies Instructions](./POC/doc/aws-policies/aws-policies.md) +- [Demo](./POC/demo/README.md) diff --git a/ci-build.md b/ci-build.md new file mode 100644 index 00000000..730aabb5 --- /dev/null +++ b/ci-build.md @@ -0,0 +1,53 @@ +# Jenkins pipeline + +## How to set up and use Vault secrets in a Jenkins pipeline + +HPE's Jenkins uses [HashiCorp Vault](https://www.vaultproject.io) as a secrets store, where team members can store secrets and have a Jenkins pipeline retrieving and using them. + +### Setting up your Vault secrets + +- Install [HashiCorp Vault](https://www.vaultproject.io/downloads) in your system. This will add the Vault CLI that we will use to access the secrets store. +- Create a [GitHub token](https://github.hpe.com/Vault/Wiki/wiki/Onboarding#generate-a-github-token-for-accessing-vault-via-jenkins-pipeline-ui-andor-cli) using your HPE GitHub user. Save it in a safe place. +- Set your `VAULT_ADDR` pointing to HPE's Vault server: +```bash +export VAULT_ADDR=https://vault.docker.hpecorp.net:443 +``` +Please note that from now on, you must be connected to the HPE VPN to have access to the Vault server. +- Login to the Vault server using your newly created GitHub token: +```bash +vault login -method=github token= +``` +- Check that you have access to your organization's secrets path: +```bash +vault read secret/hpe4it-jenkins-ci/repo/sec-eng/istio-spire +``` +That should output a list of the secrets stored in the Jenkins path (if any), or a permission denied error if you don't have access to it. + +If you don't have permissions, make sure to add yourself or have someone else add you to the [`sec-eng` org team](https://github.hpe.com/orgs/Docker-in-Datacenter-VaultTeams/teams/sec-eng/members). Read [step #2 here](https://github.hpe.com/Vault/Wiki/wiki/Onboarding#onboarding) for more information. + + +- Now you can store as many secrets as you want in the Jenkins' path, but **be aware that writing to a specific path will overwrite all the existing secrets in the same path**. If you don't want to lose the existing secrets, you can create your custom sub-path (something like `secret/hpe4it-jenkins-ci/repo/sec-eng/istio-spire/mysubpath`) or make sure you re-write the existing secrets along with the new ones you need. + +- Set up the secret(s) you need (**run this only if you want to reset all the secrets in the root path**): +```bash +vault write secret/hpe4it-jenkins-ci/repo/sec-eng/istio-spire \ + mySuperSecret=myt0k3n \ + mySecondSecret=2ndt0k3n +``` + +### Accessing the secrets from the Jenkins pipeline + +HPE's Jenkins has a convenience helper function for reading Vault secrets. + +```groovy +pipeline { + # ... + script { + def secrets = vaultGetSecrets() + print secrets.mySuperSecret + print secrets.mySecondSecret + } +} +``` + +Please refer to the [HPE Vault wiki](https://github.hpe.com/Vault/Wiki/wiki/Onboarding) for more information. diff --git a/development.md b/development.md new file mode 100644 index 00000000..c955f23e --- /dev/null +++ b/development.md @@ -0,0 +1,58 @@ +# Development + +We curently support the following Istio branches: + - master + - 1.10 + - 1.11 + - 1.12 + +## Mithril images +Our private images are available at [ECR](https://console.aws.amazon.com/ecr/home?region=us-east-1) and [HPE HUB](https://hub.docker.hpecorp.net/), respectively, +**529024819027.dkr.ecr.us-east-1.amazonaws.com/mithril** and **hub.docker.hpecorp.net**. + +For development purposes, we have the following images: + + - Jenkins pipeline: (hub.docker.hpecorp.net/sec-eng/ubuntu:pipeline) + - Mithril dependencies: (529024819027.dkr.ecr.us-east-1.amazonaws.com/mithril) + + +## Distribution +At the moment, we provide images, scripts, and code patches. + +The Mithril images are uploaded to a private ECR and HPE HUB during the **build-and-push-istio-images** stage at our pipeline. + +Our scripts and patches are uploaded to a public S3 bucket with versioning. This means that we download the latest versions by default, but the older versions are still available for consulting. This process occurs automatically within the **distribute-poc** stage at our pipeline, with the scripts and patches being uploaded in parallel. + +### Mithril images +Our images are available for customer consumption in the public ECR. Our public ECR HUB is **public.ecr.aws/e4m8j0n8/mithril**. + +## Scripts assets + +At **distribute-assets** stage, a `tar.gz` file is created with the desired assets. + +``` +tar -zcvf mithril.tar.gz \ +bookinfo spire istio \ +deploy-all.sh create-namespaces.sh cleanup-all.sh \ +forward-port.sh create-kind-cluster.sh \ +doc/poc-instructions.md demo/demo-script.sh \ +demo/README.md demo/federation-demo.sh \ +../usecases/federation +``` + +Then, this file is uploaded to the bucket **s3://mithril-customer-assets** and set as publicly readable through an ACL object. + +## Image patches + +Then, this file is uploaded to the bucket **s3://mithril-poc-patchset** and set as publicly readable through an ACL object. + +``` +tar -zcvf mithril-poc-patchset.tar.gz patches +``` + +### Mithril public images tags + +Our current images are: +- stable_20211209 +- stable_20211022 +- stable_20210920 diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 00000000..25524dec --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,7 @@ +FROM 529024819027.dkr.ecr.us-east-1.amazonaws.com/mithril-deps:latest + +COPY . . + +ENV LATEST_SPIRE_VERSION=1.1.1 + +WORKDIR /mithril \ No newline at end of file diff --git a/docker/Makefile b/docker/Makefile new file mode 100644 index 00000000..7e494abf --- /dev/null +++ b/docker/Makefile @@ -0,0 +1,111 @@ +# Importing environment configuration +cnf ?= conf.env +include $(cnf) +export $(shell sed 's/=.*//' $(cnf)) + +.DEFAULT_GOAL := help + +# Creating the command to be executed when building the image +CMD_BUILD := "docker build -t $(APP_NAME)" +ifdef ISTIO_VERSION +PATCH_VERSION = `echo $(ISTIO_VERSION) | cut -d'-' -f 2` +CMD_BUILD += " --build-arg ISTIO_VERSION=$(ISTIO_VERSION)" +endif +ifdef ISTIO_CTL_VERSION +CMD_BUILD += " --build-arg ISTIO_CTL_VERSION=$(ISTIO_CTL_VERSION)" +endif +ifdef PATCH_VERSION +CMD_BUILD += " --build-arg PATCH_VERSION=$(PATCH_VERSION)" +endif +CMD_BUILD += " -f ./Dockerfile-mithril-deps.dockerfile .." + +## Build the container. +build: + eval $(CMD_BUILD) + +## Run the container. +run: + @mkdir -p $(HOME)/.kube && touch $(HOME)/.kube/config + docker run -i -t --rm -v "$(DOCKER_SOCK_MAP)" -v "$(KUBE_CONFIG_MAP)" --network host --name $(APP_NAME) $(APP_NAME) + +# Stop APP running containers +stop: + docker stop $(APP_NAME) + +## Remove APP running containers. +rm: stop + docker rm $(APP_NAME) + +## Clear the APP images. +clear-img: + docker rmi $(APP_NAME) + +## Build and Start the container. +start: build run + +## Build, tag and push the container. +push: build publish + +## Pull the image and tag it as $APP_NAME:latest. +pull: tag-image + +pull-image: + docker pull hub.docker.hpecorp.net/sec-eng/mithril + +tag-image: pull-image + docker tag hub.docker.hpecorp.net/sec-eng/mithril $(APP_NAME):latest + +## Login in and publish the image to the repo. +publish: login publish-latest publish-version + +publish-latest: tag-latest + docker push $(DOCKER_REPO)/$(APP_NAME):latest + +publish-version: tag-version + docker push $(DOCKER_REPO)/$(APP_NAME):$(VERSION) + +tag-latest: + docker tag $(APP_NAME) $(DOCKER_REPO)/$(APP_NAME):latest + +tag-version: + docker tag $(APP_NAME) $(DOCKER_REPO)/$(APP_NAME):$(VERSION) + +## Auto login to HPE docker hub MSR. +login: + docker login $(HUB_URL) -u $(DOCKER_USER) -p $(DOCKER_PWD) + +#------------------------------------------------------------------------ +# Document file +#------------------------------------------------------------------------ + +# VARIABLES +NAME = mithril +VERSION = 0.0.1 +AUTHOR=HPE + +# COLORS +GREEN := $(shell tput -Txterm setaf 2) +RESET := $(shell tput -Txterm sgr0) + +TARGET_MAX_CHAR_NUM=20 + +## shows help. +help: + @echo "--------------------------------------------------------------------------------" + @echo "Author : ${GREEN}$(AUTHOR)${RESET}" + @echo "Project : ${GREEN}$(NAME)${RESET}" + @echo "Version : ${GREEN}$(VERSION)${RESET}" + @echo "--------------------------------------------------------------------------------" + @echo "" + @echo "Usage:" + @echo " ${GREEN}make${RESET} " + @echo "Targets:" + @awk '/^[a-zA-Z\-\_0-9]+:/ { \ + helpMessage = match(lastLine, /^## (.*)/); \ + if (helpMessage) { \ + helpCommand = substr($$1, 0, index($$1, ":")); \ + helpMessage = substr(lastLine, RSTART + 3, RLENGTH); \ + printf " ${GREEN}%-$(TARGET_MAX_CHAR_NUM)s${RESET} %s\n", helpCommand, helpMessage; \ + } \ + } \ +{ lastLine = $$0 }' $(MAKEFILE_LIST) \ No newline at end of file diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 00000000..2cfe6eb0 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,63 @@ +# Building the image + +To build the image you can use the following command. In this command you can specify any istio branch version and istioctl version you want to be added into the container. By default it is cloning the branch *_release-1.10_* and installing the istioctl _1.10.1_ + +```bash +make build ISTIO_VERSION=release-1.9 ISTIO_CTL_VERSION=1.9.1 +``` +The image has all the needed dependencies to be able to run and deploy the POC. + +Further needed dependencies can be added if there is a need, to continue with Mithril development. + +# Running the container + +You can run the container using the following command +```bash +make run +``` + +This command will start the container and you can follow this [guide](https://github.hpe.com/sec-eng/istio-spire/blob/master/POC/README.md) in order to set up the environment. + +# Pushing the image to Mirantis Secure Registry +Before trying to push the image to [MSR](https://hub.docker.hpecorp.net/repositories?namespace=sec-eng) you need to set the environment variables in the `conf.env` file. + +You need to update the `DOCKER_USER` and `DOCKER_PWD` fields, which are your docker user and your docker password. + +_Your **Docker password** is most likely your Private Access Token from MSR_ + +After setting the correct credentials in the `conf.env` file you can automatically **build** and **publish** the image with the command + +```bash +make push +``` + +You can also just publish the image if there is any update made to it +```bash +make publish +``` + +# Pulling the image +To download the image you can use + +```bash +make pull +``` + +After downloading the image you can execute it with + +```bash +make run +``` +# Troubleshooting + +If you encounter any problem during the building of istio images it's probably due to caching since the docker engine is shared from the host. + +Exit the container and then you can execute these commands to clear the environment: + +```bash +docker stop $(docker ps -aq) +docker rm $(docker ps -aq) +docker rmi $(docker images -aq) --force +``` + +These commands will stop and remove all the containers, and clean up all the images. diff --git a/docker/conf.env b/docker/conf.env new file mode 100644 index 00000000..a63ecb07 --- /dev/null +++ b/docker/conf.env @@ -0,0 +1,8 @@ +APP_NAME=mithril +DOCKER_REPO=hub.docker.hpecorp.net/sec-eng +VERSION=1.0 +DOCKER_USER= +DOCKER_PWD= +HUB_URL=hub.docker.hpecorp.net +KUBE_CONFIG_MAP=${HOME}/.kube/config:/root/.kube/config:rw +DOCKER_SOCK_MAP=/var/run/docker.sock:/var/run/docker.sock:rw \ No newline at end of file diff --git a/docker/mithril-deps.Dockerfile b/docker/mithril-deps.Dockerfile new file mode 100644 index 00000000..512ac8c5 --- /dev/null +++ b/docker/mithril-deps.Dockerfile @@ -0,0 +1,64 @@ +FROM 529024819027.dkr.ecr.us-east-1.amazonaws.com/mithril:ubuntu-20.04 + +ARG DEBIAN_FRONTEND=noninteractive +ARG ISTIO_VERSION=release-1.10 +ARG ISTIO_CTL_VERSION=1.10.1 +ARG PATCH_VERSION=1.10 +ARG GO_VERSION=1.17.4 +ARG SPIRE_VERSION=1.1.1 + +ENV TAG=my-build +ENV HUB=localhost:5000 +ENV BUILD_WITH_CONTAINER=0 + +WORKDIR /mithril + +RUN apt-get update \ + && apt-get -y install git make bash curl docker.io ruby ruby-dev rpm wget util-linux gcc libffi-dev libc6-dev apt-utils + +RUN gem install --no-document fpm \ + && gem update fpm + +# AWS CLI installation +RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" \ + && unzip -o awscliv2.zip \ + && rm -rf awscliv2.zip \ + && ./aws/install --update + +# Go installation +RUN wget https://golang.org/dl/go$GO_VERSION.linux-amd64.tar.gz \ + && tar -C /usr/local -xzf go$GO_VERSION.linux-amd64.tar.gz \ + && rm -rf go$GO_VERSION.linux-amd64.tar.gz + +ENV PATH="${PATH}:/usr/local/go/bin" + +# Spire installation +RUN wget https://github.com/spiffe/spire/releases/download/v$SPIRE_VERSION/spire-$SPIRE_VERSION-linux-x86_64-glibc.tar.gz \ + && tar zvxf spire-$SPIRE_VERSION-linux-x86_64-glibc.tar.gz \ + && cp -r spire-$SPIRE_VERSION/. /opt/spire/ \ + && rm -rf spire-$SPIRE_VERSION-linux-x86_64-glibc.tar.gz + +ENV PATH="${PATH}:/opt/spire/bin" + +# Kubectl Installation +RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \ + && install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl \ + && rm -rf kubectl + +# Kind Installation +RUN curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64 \ + && chmod +x ./kind \ + && mv ./kind /usr/local/bin + +# Istioctl Installation +RUN curl -L https://istio.io/downloadIstio | ISTIO_VERSION=$ISTIO_CTL_VERSION sh - \ + && cp istio-$ISTIO_CTL_VERSION/bin/istioctl /usr/local/bin \ + && mv istio-$ISTIO_CTL_VERSION istioctl-$ISTIO_CTL_VERSION + +# Terraform +RUN apt-get install -y gnupg software-properties-common curl \ + && curl -fsSL https://apt.releases.hashicorp.com/gpg | apt-key add - \ + && apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" \ + && apt-get update && apt-get install terraform + +WORKDIR /mithril \ No newline at end of file diff --git a/docker/pipe/Dockerfile b/docker/pipe/Dockerfile new file mode 100644 index 00000000..03b689f9 --- /dev/null +++ b/docker/pipe/Dockerfile @@ -0,0 +1,42 @@ +FROM ubuntu:latest + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update + +RUN apt-get -y install git make bash curl util-linux gcc libffi-dev libc6-dev apt-utils wget \ + apt-transport-https ca-certificates gnupg lsb-release + +RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg +RUN echo \ + "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null + +RUN apt-get update +RUN apt-get -y install docker.io +RUN apt-get -y install ruby ruby-dev rubygems build-essential rpm +RUN gem install --no-document fpm + +# AWS CLI installation +RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" +RUN unzip -o awscliv2.zip +RUN ./aws/install --update + +# Go installation +RUN wget https://golang.org/dl/go1.17.5.linux-amd64.tar.gz +RUN tar -C /usr/local -xzf go1.17.5.linux-amd64.tar.gz + +ENV PATH=/usr/local/go/bin:$PATH + +# Terraform +RUN apt-get install -y gnupg software-properties-common curl \ + && curl -fsSL https://apt.releases.hashicorp.com/gpg | apt-key add - \ + && apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" \ + && apt-get update && apt-get install terraform + +# Docker buildx installation +RUN wget https://github.com/docker/buildx/releases/download/v0.7.1/buildx-v0.7.1.linux-amd64 \ + && mv buildx-v0.7.1.linux-amd64 docker-buildx && chmod +x docker-buildx \ + && mkdir -p $HOME/.docker/cli-plugins && mv docker-buildx $HOME/.docker/cli-plugins + +ENV PATH=$PATH:$HOME/.docker/cli-plugins diff --git a/e2e/external_workload_test.go b/e2e/external_workload_test.go new file mode 100644 index 00000000..55d2809a --- /dev/null +++ b/e2e/external_workload_test.go @@ -0,0 +1,19 @@ +package e2e + +import ( + "testing" +) + +func TestExternalWorkload(t *testing.T) { + client, config, err := createClientGo() + if err != nil { + t.Fatal(err) + } + + clientset = client + kubeConfig = config + + cmd = "curl -I example.org" + + t.Run("request_external_workload_from_sleep_pod", requestFromSleep) +} diff --git a/e2e/federation_test.go b/e2e/federation_test.go new file mode 100644 index 00000000..515ebb7d --- /dev/null +++ b/e2e/federation_test.go @@ -0,0 +1,53 @@ +package e2e + +import ( + "net/http" + "testing" + + "gotest.tools/assert" +) + +func TestFederation(t *testing.T) { + t.Run("request_productpage_workload", requestSecureProductpageWorkload) +} + +func requestSecureProductpageWorkload(t *testing.T) { + clientset, config, err := createClientGo() + if err != nil { + t.Fatal(err) + } + + cmd := "/opt/spire/bin/spire-server x509 mint -spiffeID spiffe://domain.test/myservice -socketPath /run/spire/sockets/server.sock --write /tmp" + _, _, err = execInContainer(clientset, config, "app=spire-server", "spire-server", "spire2", cmd) + if err != nil { + t.Fatal(err) + } + + cmd = "cat /tmp/svid.pem" + svidPEM, _, err := execInContainer(clientset, config, "app=spire-server", "spire-server", "spire2", cmd) + if err != nil { + t.Fatal(err) + } + + cmd = "cat /tmp/key.pem" + keyPEM, _, err := execInContainer(clientset, config, "app=spire-server", "spire-server", "spire2", cmd) + if err != nil { + t.Fatal(err) + } + + httpClient, err := createSecureHttpClient(svidPEM, keyPEM) + + if err != nil { + t.Fatal(err) + } + + resp, err := httpClient.Get("https://localhost:7000/productpage") + + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + + assert.Equal(t, resp.StatusCode, http.StatusOK) + +} diff --git a/e2e/go.mod b/e2e/go.mod new file mode 100644 index 00000000..ecd187a0 --- /dev/null +++ b/e2e/go.mod @@ -0,0 +1,25 @@ +module e2e + +go 1.16 + +require ( + github.com/go-logr/logr v1.1.0 // indirect + github.com/google/go-cmp v0.5.6 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/stretchr/testify v1.7.0 + golang.org/x/net v0.0.0-20210913180222-943fd674d43e // indirect + golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect + golang.org/x/sys v0.0.0-20210915083310-ed5796bab164 // indirect + golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect + golang.org/x/text v0.3.7 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.27.1 // indirect + gotest.tools v2.2.0+incompatible // indirect + k8s.io/api v0.22.1 + k8s.io/apimachinery v0.22.1 + k8s.io/client-go v0.22.1 + k8s.io/klog/v2 v2.20.0 // indirect + k8s.io/utils v0.0.0-20210820185131-d34e5cb4466e // indirect +) diff --git a/e2e/go.sum b/e2e/go.sum new file mode 100644 index 00000000..f2a5ab09 --- /dev/null +++ b/e2e/go.sum @@ -0,0 +1,540 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.0.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.1.0 h1:nAbevmWlS2Ic4m4+/An5NXkaGqlqpbBgdcuThZxnZyI= +github.com/go-logr/logr v1.1.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210913180222-943fd674d43e h1:+b/22bPvDYt4NPDcy4xAGCmON713ONAWFeY3Z7I3tR8= +golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210915083310-ed5796bab164 h1:7ZDGnxgHAMw7thfC5bEos0RDAccZKxioiWBhfIe+tvw= +golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY= +k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= +k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM= +k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw= +k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.20.0 h1:tlyxlSvd63k7axjhuchckaRJm+a92z5GSOrTOQY5sHw= +k8s.io/klog/v2 v2.20.0/go.mod h1:Gm8eSIfQN6457haJuPaMxZw4wyP5k+ykPFlrhQDvhvw= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210820185131-d34e5cb4466e h1:ldQh+neBabomh7+89dTpiFAB8tGdfVmuIzAHbvtl+9I= +k8s.io/utils v0.0.0-20210820185131-d34e5cb4466e/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/e2e/simple_bookinfo_test.go b/e2e/simple_bookinfo_test.go new file mode 100644 index 00000000..1d93b856 --- /dev/null +++ b/e2e/simple_bookinfo_test.go @@ -0,0 +1,51 @@ +package e2e + +import ( + "bytes" + "net/http" + "os" + "os/exec" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSimpleBookinfo(t *testing.T) { + t.Run("version", version) + t.Run("get_cluster", getCluster) + t.Run("request_productpage_workload", requestProductpageWorkload) +} + +func version(t *testing.T) { + cmd := exec.Command("istioctl", "version") + + buf := new(bytes.Buffer) + cmd.Stdout = buf + cmd.Stderr = os.Stderr + + cmd.Run() + + actual := buf.String() + assert.Contains(t, actual, istioctlVersion) +} + +func getCluster(t *testing.T) { + cmd := exec.Command("kind", "get clusters") + + buf := new(bytes.Buffer) + cmd.Stdout = buf + cmd.Stderr = os.Stderr + + cmd.Run() + + actual := buf.String() + assert.Contains(t, actual, "kind") +} + +func requestProductpageWorkload(t *testing.T) { + resp, err := http.Get("http://localhost:8000/productpage") + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() +} diff --git a/e2e/utils.go b/e2e/utils.go new file mode 100644 index 00000000..1931eef8 --- /dev/null +++ b/e2e/utils.go @@ -0,0 +1,205 @@ +package e2e + +import ( + "context" + "crypto/tls" + "errors" + "flag" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/remotecommand" + "k8s.io/client-go/util/homedir" +) + +var clientset *kubernetes.Clientset +var kubeConfig *rest.Config + +var istioctlVersion = "1.10" +var defaultNamespace = "default" +var statusOK = "HTTP/1.1 200 OK" +var cmd string + +type Writer struct { + Str []string +} + +func (w *Writer) Write(p []byte) (n int, err error) { + str := string(p) + if len(str) > 0 { + w.Str = append(w.Str, str) + } + return len(str), nil +} + +func createClientGo() (*kubernetes.Clientset, *rest.Config, error) { + var kubeconfig *string + if home := homedir.HomeDir(); home != "" { + kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file") + } else { + kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file") + } + flag.Parse() + + config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig) + if err != nil { + return nil, nil, err + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, config, err + } + + return clientset, config, err +} + +func createSecureHttpClient(certPEM, keyPEM string) (*http.Client, error) { + cert, err := tls.X509KeyPair([]byte(certPEM), []byte(keyPEM)) + + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + Certificates: []tls.Certificate{cert}, + InsecureSkipVerify: true, + }, + }, + } + + return client, nil +} + +func buildCmd(command string) []string { + cmd := []string{ + "sh", + "-c", + command, + } + return cmd +} + +func requestFromSleep(t *testing.T) { + labelSelector := "app=sleep" + listOptions := metav1.ListOptions{ + LabelSelector: labelSelector, + } + + podList, err := clientset.CoreV1().Pods(defaultNamespace).List(context.TODO(), listOptions) + if err != nil { + t.Error("Error when listing pods") + } + + if len(podList.Items) == 0 { + t.Fatal("Sleep pod not found") + } + sleepPod := podList.Items[0] + + command := buildCmd(cmd) + + req := clientset.CoreV1().RESTClient().Post(). + Namespace(defaultNamespace). + Resource("pods"). + Name(sleepPod.Name). + SubResource("exec"). + Param("container", "sleep") + + option := &v1.PodExecOptions{ + Command: command, + Stdin: true, + Stdout: true, + Stderr: true, + TTY: true, + } + option.Stdin = false + req.VersionedParams(option, + scheme.ParameterCodec, + ) + + executor, err := remotecommand.NewSPDYExecutor(kubeConfig, http.MethodPost, req.URL()) + if err != nil { + t.Error(err) + } + + stdOut := new(Writer) + os.Stderr.Sync() + + err = executor.Stream(remotecommand.StreamOptions{ + Stdin: nil, + Stdout: stdOut, + Stderr: os.Stderr, + Tty: false, + TerminalSizeQueue: nil, + }) + if err != nil { + t.Error(err) + } + + // stdOut should contain a status code response from a request using the "-I" parameter + assert.Contains(t, stdOut.Str[0], statusOK) +} + +func execInContainer(clientset *kubernetes.Clientset, config *rest.Config, labelSelector, container, namespace, cmd string) (string, string, error) { + podList, err := clientset.CoreV1(). + Pods(namespace). + List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector}) + + if err != nil { + return "", "", err + } + + if len(podList.Items) == 0 { + return "", "", errors.New("pod not found") + } + pod := podList.Items[0] + + command := buildCmd(cmd) + + req := clientset.CoreV1(). + RESTClient(). + Post(). + Namespace(namespace). + Resource("pods"). + Name(pod.Name). + SubResource("exec"). + Param("container", container). + VersionedParams(&v1.PodExecOptions{ + Command: command, + Stdout: true, + Stderr: true, + }, scheme.ParameterCodec) + + executor, err := remotecommand.NewSPDYExecutor(config, http.MethodPost, req.URL()) + if err != nil { + return "", "", err + } + + stdOut := new(Writer) + stdErr := new(Writer) + os.Stderr.Sync() + + err = executor.Stream(remotecommand.StreamOptions{ + Stdin: nil, + Stdout: stdOut, + Stderr: stdErr, + TerminalSizeQueue: nil, + }) + if err != nil { + return "", "", nil + } + + return strings.Join(stdOut.Str, "\n"), strings.Join(stdErr.Str, "\n"), nil +} diff --git a/e2e/workload_to_ingress_upstream_disk_test.go b/e2e/workload_to_ingress_upstream_disk_test.go new file mode 100644 index 00000000..cd3f9691 --- /dev/null +++ b/e2e/workload_to_ingress_upstream_disk_test.go @@ -0,0 +1,19 @@ +package e2e + +import ( + "testing" +) + +func TestWorkloadToIngressUpstreamDisk(t *testing.T) { + client, config, err := createClientGo() + if err != nil { + t.Fatal(err) + } + + clientset = client + kubeConfig = config + + cmd = "curl -I http://istio-ingressgateway.istio-system.svc:8000/status/200" + + t.Run("request_httpbin_from_sleep_pod", requestFromSleep) +} diff --git a/e2e/workload_to_ingress_upstream_spire_test.go b/e2e/workload_to_ingress_upstream_spire_test.go new file mode 100644 index 00000000..9d9a901e --- /dev/null +++ b/e2e/workload_to_ingress_upstream_spire_test.go @@ -0,0 +1,19 @@ +package e2e + +import ( + "testing" +) + +func TestWorkloadToIngressUpstreamSpire(t *testing.T) { + client, config, err := createClientGo() + if err != nil { + t.Fatal(err) + } + + clientset = client + kubeConfig = config + + cmd = "curl -I http://istio-ingressgateway.istio-system.svc:8000/status/200" + + t.Run("request_httpbin_from_sleep_pod", requestFromSleep) +} diff --git a/terraform/integration-tests/external-workload/integration-tests.sh b/terraform/integration-tests/external-workload/integration-tests.sh new file mode 100644 index 00000000..6ca40c69 --- /dev/null +++ b/terraform/integration-tests/external-workload/integration-tests.sh @@ -0,0 +1,40 @@ +#!/bin/bash +exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 + +apt update -y +apt install docker.io awscli -y + +aws configure set aws_access_key_id ${access_key} +aws configure set aws_secret_access_key ${secret_access_key} + +aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${hub} + +docker pull ${hub}:${build_tag} + +# Tagging for easier use within the docker command below +docker tag ${hub}:${build_tag} mithril-testing:${build_tag} + +# Creating kubernetes config to use kubectl inside the container +mkdir -p $HOME/.kube && touch $HOME/.kube/config + +# Running usecase and testing it +docker run -i --rm \ +-v "/var/run/docker.sock:/var/run/docker.sock:rw" \ +-v "/.kube/config:/root/.kube/config:rw" \ +--network host mithril-testing:${build_tag} \ +bash -c 'cd /mithril/usecases && find . -type f -iname "*.sh" -exec chmod +x {} \; && cd ${usecase} && /mithril/POC/create-kind-cluster.sh && +HUB=${hub} AWS_ACCESS_KEY_ID=${access_key} AWS_SECRET_ACCESS_KEY=${secret_access_key} /mithril/POC/create-docker-registry-secret.sh && +kubectl create ns spire && TAG=${build_tag} HUB=${hub} ./deploy-all.sh && +kubectl rollout status deployment sleep && +SPIRE_AGENT_POD=$(kubectl get pod -l app=spire-agent -n spire -o jsonpath="{.items[0].metadata.name}") && +kubectl exec -n spire $SPIRE_AGENT_POD -c spire-agent -- /opt/spire/bin/spire-agent api fetch -write /tmp/ -socketPath /run/spire/sockets/agent.sock && +kubectl exec -n spire $SPIRE_AGENT_POD -c spire-agent -- cat /tmp/svid.0.pem > svid.pem && +kubectl exec -n spire $SPIRE_AGENT_POD -c spire-agent -- cat /tmp/svid.0.key > svid.key && +kubectl exec -n spire $SPIRE_AGENT_POD -c spire-agent -- cat /tmp/bundle.0.pem > bundle.pem && +./start-server.sh && +cd /mithril/e2e && go test -v e2e -run TestExternalWorkload 2>&1 | tee ${build_tag}-${usecase}-result.txt && +AWS_ACCESS_KEY_ID=${access_key} AWS_SECRET_ACCESS_KEY=${secret_access_key} aws s3 cp ${build_tag}-${usecase}-result.txt s3://mithril-artifacts/${build_tag}/ --region us-east-1' + +cat /var/log/user-data.log >> ${build_tag}-${usecase}-log.txt + +aws s3 cp /${build_tag}-${usecase}-log.txt s3://mithril-artifacts/${build_tag}/ --region us-east-1 \ No newline at end of file diff --git a/terraform/integration-tests/external-workload/main.tf b/terraform/integration-tests/external-workload/main.tf new file mode 100644 index 00000000..f1f3b6ed --- /dev/null +++ b/terraform/integration-tests/external-workload/main.tf @@ -0,0 +1,128 @@ +# 1. Create vpc +resource "aws_vpc" "testing-vpc" { + cidr_block = "10.0.0.0/16" + tags = { + Name = "testing" + } +} + +# 2. Create Internet Gateway +resource "aws_internet_gateway" "gw" { + vpc_id = aws_vpc.testing-vpc.id +} + +# 3. Create Custom Route Table +resource "aws_route_table" "testing-route-table" { + vpc_id = aws_vpc.testing-vpc.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.gw.id + } + + route { + ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.gw.id + } + + tags = { + Name = "Prod" + } +} + +# 4. Create a Subnet +resource "aws_subnet" "subnet-1" { + vpc_id = aws_vpc.testing-vpc.id + cidr_block = "10.0.1.0/24" + availability_zone = "us-east-1a" + + tags = { + Name = "testing-subnet" + } +} + +# 5. Associate subnet with Route Table +resource "aws_route_table_association" "a" { + subnet_id = aws_subnet.subnet-1.id + route_table_id = aws_route_table.testing-route-table.id +} + +# 6. Create Security Group to allow ports +resource "aws_security_group" "allow_web" { + name = "mithril_sg_test" + description = "Allow Web inbound traffic" + vpc_id = aws_vpc.testing-vpc.id + + ingress { + description = "HTTP" + from_port = 0 + to_port = 0 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + ingress { + description = "SSH" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { + Name = "allow_web" + } +} + +# 7. Create Ubuntu server +resource "aws_instance" "mithril_instance" { + ami = var.EC2_AMI + instance_type = var.EC2_INSTANCE_TYPE + availability_zone = "us-east-1a" + key_name = var.EC2_KEY_PAIR + private_ip = "10.0.1.50" + vpc_security_group_ids = [ "${aws_security_group.allow_web.id}" ] + subnet_id = aws_subnet.subnet-1.id + associate_public_ip_address = true + user_data = data.template_file.init.rendered + + root_block_device { + volume_size = var.VOLUME_SIZE + } + + tags = { + Name = "mithril-testing" + } +} + +output "EC2-PUBLIC-IP" { + value = aws_instance.mithril_instance.public_ip +} + +data "aws_secretsmanager_secret" "secrets" { + name = "mithril-jenkins-integration-tests" +} + +data "aws_secretsmanager_secret_version" "mithril_secret" { + secret_id = data.aws_secretsmanager_secret.secrets.id +} + +data "template_file" "init" { + template = file("integration-tests.sh") + + vars = { + access_key = jsondecode(nonsensitive(data.aws_secretsmanager_secret_version.mithril_secret.secret_string))["ACCESS_KEY_ID"], + secret_access_key = jsondecode(nonsensitive(data.aws_secretsmanager_secret_version.mithril_secret.secret_string))["SECRET_ACCESS_KEY"], + region = var.ECR_REGION, + tag = var.TAG, + hub = var.HUB, + build_tag = var.BUILD_TAG + usecase = var.USECASE + } +} \ No newline at end of file diff --git a/terraform/integration-tests/external-workload/output.tf b/terraform/integration-tests/external-workload/output.tf new file mode 100644 index 00000000..e69de29b diff --git a/terraform/integration-tests/external-workload/provider.tf b/terraform/integration-tests/external-workload/provider.tf new file mode 100644 index 00000000..25dddaa5 --- /dev/null +++ b/terraform/integration-tests/external-workload/provider.tf @@ -0,0 +1,4 @@ +provider "aws" { + region = var.ECR_REGION + profile = var.AWS_PROFILE +} \ No newline at end of file diff --git a/terraform/integration-tests/external-workload/variables.tf b/terraform/integration-tests/external-workload/variables.tf new file mode 100644 index 00000000..66a1f9a7 --- /dev/null +++ b/terraform/integration-tests/external-workload/variables.tf @@ -0,0 +1,54 @@ +variable "TAG" { + default = "latest" + description = "TAG used to download the images from ECR repository" +} + +variable "BUILD_TAG" { + default = "latest" + description = "Build TAG from Jenkins Pipeline" +} + +variable "HUB" { + default = "529024819027.dkr.ecr.us-east-1.amazonaws.com/mithril" + description = "HUB used to download the images from ECR repository" +} + +variable "ECR_REGION" { + default = "us-east-1" + description = "ECR region specified to download the docker images" +} + +variable "ARTIFACT_BUCKET_NAME" { + default = "mithril-artifacts" + description = "S3 Bucket name for the Mithril Artifacts" +} + +variable "AWS_PROFILE" { + default = "scytale" + description = "AWS profile used to grant access to AWS CLI API" +} + +variable "EC2_AMI" { + default = "ami-09e67e426f25ce0d7" + description = "Ubuntu 20.04 LTS AMI ID" +} + +variable "EC2_INSTANCE_TYPE" { + default = "t2.xlarge" + description = "EC2 Instance type created by terraform" +} + +variable "EC2_KEY_PAIR" { + default = "mithril-integration-test" + description = "AWS key pair name used to connect to the EC2 instance" +} + +variable "VOLUME_SIZE" { + default = 50 + description = "Root block device volume size used by EC2 instance" +} + +variable "USECASE" { + default = "external-workload" + description = "Client within the mesh start a connection to an external workload" +} \ No newline at end of file diff --git a/terraform/integration-tests/federation/integration-tests.sh b/terraform/integration-tests/federation/integration-tests.sh new file mode 100644 index 00000000..0d4e399d --- /dev/null +++ b/terraform/integration-tests/federation/integration-tests.sh @@ -0,0 +1,45 @@ +#!/bin/bash +exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 + +apt update -y +apt install docker.io awscli -y + +aws configure set aws_access_key_id ${access_key} +aws configure set aws_secret_access_key ${secret_access_key} + +aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${hub} + +docker pull ${hub}:${build_tag} + +# Tagging for easier use within the docker command below +docker tag ${hub}:${build_tag} mithril-testing:${build_tag} + +# Creating kubernetes config to use kubectl inside the container +mkdir -p $HOME/.kube && touch $HOME/.kube/config + +# Running usecase and testing it +docker run -i \ +-v "/var/run/docker.sock:/var/run/docker.sock:rw" \ +-v "/.kube/config:/root/.kube/config:rw" \ +--network host mithril-testing:${build_tag} \ +bash -c 'export HUB=${hub} && +export TAG=${build_tag} && +export AWS_ACCESS_KEY_ID=${access_key} && +export AWS_SECRET_ACCESS_KEY=${secret_access_key} && +/mithril/POC/create-kind-cluster.sh && +/mithril/POC/create-docker-registry-secret.sh && +cd /mithril/usecases/federation && +./deploy-all.sh && +kubectl wait pod --for=condition=Ready -l app=productpage --timeout=-1s && +kubectl wait pod --for=condition=Ready -l app=details --timeout=-1s && +kubectl wait pod --for=condition=Ready -l app=reviews --timeout=-1s && +kubectl wait pod --for=condition=Ready -l app=ratings --timeout=-1s && +echo "Deployment done!" && +/mithril/usecases/federation/forward-secure-port.sh && +cd /mithril/e2e && go test -v e2e -run TestFederation 2>&1 | tee ${build_tag}-${usecase}-result.txt && +aws s3 cp ${build_tag}-${usecase}-result.txt s3://mithril-artifacts/${build_tag}/ --region us-east-1 && +echo "Testing done!"' + +cat /var/log/user-data.log >> ${build_tag}-${usecase}-log.txt + +aws s3 cp /${build_tag}-${usecase}-log.txt s3://mithril-artifacts/${build_tag}/ --region us-east-1 diff --git a/terraform/integration-tests/federation/main.tf b/terraform/integration-tests/federation/main.tf new file mode 100644 index 00000000..2d172eeb --- /dev/null +++ b/terraform/integration-tests/federation/main.tf @@ -0,0 +1,128 @@ +# 1. Create vpc +resource "aws_vpc" "testing-vpc" { + cidr_block = "10.0.0.0/16" + tags = { + Name = "testing" + } +} + +# 2. Create Internet Gateway +resource "aws_internet_gateway" "gw" { + vpc_id = aws_vpc.testing-vpc.id +} + +# 3. Create Custom Route Table +resource "aws_route_table" "testing-route-table" { + vpc_id = aws_vpc.testing-vpc.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.gw.id + } + + route { + ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.gw.id + } + + tags = { + Name = "Prod" + } +} + +# 4. Create a Subnet +resource "aws_subnet" "subnet-1" { + vpc_id = aws_vpc.testing-vpc.id + cidr_block = "10.0.1.0/24" + availability_zone = "us-east-1a" + + tags = { + Name = "testing-subnet" + } +} + +# 5. Associate subnet with Route Table +resource "aws_route_table_association" "a" { + subnet_id = aws_subnet.subnet-1.id + route_table_id = aws_route_table.testing-route-table.id +} + +# 6. Create Security Group to allow ports +resource "aws_security_group" "allow_web" { + name = "mithril_sg_test" + description = "Allow Web inbound traffic" + vpc_id = aws_vpc.testing-vpc.id + + ingress { + description = "HTTP" + from_port = 0 + to_port = 0 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + ingress { + description = "SSH" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { + Name = "allow_web" + } +} + +# 7. Create Ubuntu server +resource "aws_instance" "mithril_instance" { + ami = var.EC2_AMI + instance_type = var.EC2_INSTANCE_TYPE + availability_zone = "us-east-1a" + key_name = var.EC2_KEY_PAIR + private_ip = "10.0.1.50" + vpc_security_group_ids = [ "${aws_security_group.allow_web.id}" ] + subnet_id = aws_subnet.subnet-1.id + associate_public_ip_address = true + user_data = data.template_file.init.rendered + + root_block_device { + volume_size = var.VOLUME_SIZE + } + + tags = { + Name = "mithril-testing" + } +} + +output "EC2-PUBLIC-IP" { + value = aws_instance.mithril_instance.public_ip +} + +data "aws_secretsmanager_secret" "secrets" { + name = "mithril-jenkins-integration-tests" +} + +data "aws_secretsmanager_secret_version" "mithril_secret" { + secret_id = data.aws_secretsmanager_secret.secrets.id +} + +data "template_file" "init" { + template = file("integration-tests.sh") + + vars = { + access_key = jsondecode(nonsensitive(data.aws_secretsmanager_secret_version.mithril_secret.secret_string))["ACCESS_KEY_ID"], + secret_access_key = jsondecode(nonsensitive(data.aws_secretsmanager_secret_version.mithril_secret.secret_string))["SECRET_ACCESS_KEY"], + region = var.ECR_REGION, + tag = var.TAG, + hub = var.HUB, + build_tag = var.BUILD_TAG + usecase = var.USECASE + } +} diff --git a/terraform/integration-tests/federation/output.tf b/terraform/integration-tests/federation/output.tf new file mode 100644 index 00000000..e69de29b diff --git a/terraform/integration-tests/federation/provider.tf b/terraform/integration-tests/federation/provider.tf new file mode 100644 index 00000000..13a45295 --- /dev/null +++ b/terraform/integration-tests/federation/provider.tf @@ -0,0 +1,4 @@ +provider "aws" { + region = var.ECR_REGION + profile = var.AWS_PROFILE +} diff --git a/terraform/integration-tests/federation/variables.tf b/terraform/integration-tests/federation/variables.tf new file mode 100644 index 00000000..b43a6573 --- /dev/null +++ b/terraform/integration-tests/federation/variables.tf @@ -0,0 +1,54 @@ +variable "TAG" { + default = "latest" + description = "TAG used to download the images from ECR repository" +} + +variable "BUILD_TAG" { + default = "latest" + description = "Build TAG from Jenkins Pipeline" +} + +variable "HUB" { + default = "529024819027.dkr.ecr.us-east-1.amazonaws.com/mithril" + description = "HUB used to download the images from ECR repository" +} + +variable "ECR_REGION" { + default = "us-east-1" + description = "ECR region specified to download the docker images" +} + +variable "ARTIFACT_BUCKET_NAME" { + default = "mithril-artifacts" + description = "S3 Bucket name for the Mithril Artifacts" +} + +variable "AWS_PROFILE" { + default = "scytale" + description = "AWS profile used to grant access to AWS CLI API" +} + +variable "EC2_AMI" { + default = "ami-09e67e426f25ce0d7" + description = "Ubuntu 20.04 LTS AMI ID" +} + +variable "EC2_INSTANCE_TYPE" { + default = "t2.xlarge" + description = "EC2 Instance type created by terraform" +} + +variable "EC2_KEY_PAIR" { + default = "mithril-integration-test" + description = "AWS key pair name used to connect to the EC2 instance" +} + +variable "VOLUME_SIZE" { + default = 50 + description = "Root block device volume size used by EC2 instance" +} + +variable "USECASE" { + default = "federation" + description = "Federation use case" +} diff --git a/terraform/integration-tests/simple-bookinfo/integration-tests.sh b/terraform/integration-tests/simple-bookinfo/integration-tests.sh new file mode 100644 index 00000000..460aa64c --- /dev/null +++ b/terraform/integration-tests/simple-bookinfo/integration-tests.sh @@ -0,0 +1,71 @@ +#!/bin/bash +exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 + +apt update -y +apt install docker.io awscli -y + +aws configure set aws_access_key_id ${access_key} +aws configure set aws_secret_access_key ${secret_access_key} + +aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${hub} + +docker pull ${hub}:${build_tag} + +# Tagging for easier use within the docker command below +docker tag ${hub}:${build_tag} mithril-testing:${build_tag} + +# Creating kubernetes config to use kubectl inside the container +mkdir -p $HOME/.kube && touch $HOME/.kube/config + +echo "===== simple_bookinfo =====" + +# Creating kind cluster +docker run -i --rm \ +-v "/var/run/docker.sock:/var/run/docker.sock:rw" \ +-v "/.kube/config:/root/.kube/config:rw" \ +--network host mithril-testing:${build_tag} \ +/mithril/POC/create-kind-cluster.sh + +# Creating Docker secrets for ECR images +docker run -i --rm \ +-v "/var/run/docker.sock:/var/run/docker.sock:rw" \ +-v "/.kube/config:/root/.kube/config:rw" \ +--network host mithril-testing:${build_tag} \ +bash -c "HUB=${hub} AWS_ACCESS_KEY_ID=${access_key} AWS_SECRET_ACCESS_KEY=${secret_access_key} /mithril/POC/create-docker-registry-secret.sh" + +# Deploying the PoC +docker run -i --rm \ +-v "/var/run/docker.sock:/var/run/docker.sock:rw" \ +-v "/.kube/config:/root/.kube/config:rw" \ +--network host mithril-testing:${build_tag} \ +bash -c "cd /mithril/POC && kubectl create ns spire && TAG=${build_tag} HUB=${hub} ./deploy-all.sh" + +# Port Forwarding the POD +docker run -i -d --rm \ +-v "/var/run/docker.sock:/var/run/docker.sock:rw" \ +-v "/.kube/config:/root/.kube/config:rw" \ +--network host mithril-testing:${build_tag} \ +bash -c 'INGRESS_POD=$(kubectl get pod -l app=istio-ingressgateway -n istio-system -o jsonpath="{.items[0].metadata.name}") \ +&& kubectl port-forward "$INGRESS_POD" 8000:8080 -n istio-system' + +# Waiting for POD to be ready +docker run -i --rm \ +-v "/var/run/docker.sock:/var/run/docker.sock:rw" \ +-v "/.kube/config:/root/.kube/config:rw" \ +--network host mithril-testing:${build_tag} \ +bash -c 'kubectl rollout status deployment productpage-v1' + +# Test simple_bookinfo_test +docker run -i --rm \ +-v "/var/run/docker.sock:/var/run/docker.sock:rw" \ +-v "/.kube/config:/root/.kube/config:rw" \ +--network host mithril-testing:${build_tag} \ +bash -c "cd /mithril/e2e && touch ${build_tag}-${usecase}-result.txt \ + && go test -v e2e -run TestSimpleBookinfo > ${build_tag}-${usecase}-result.txt \ + && AWS_ACCESS_KEY_ID=${access_key} AWS_SECRET_ACCESS_KEY=${secret_access_key} aws s3 cp ${build_tag}-${usecase}-result.txt s3://mithril-artifacts/${build_tag}/ --region us-east-1" + +# Generate log files +cat /var/log/user-data.log >> ${build_tag}-${usecase}-log.txt + +# Copying log to S3 bucket +aws s3 cp /${build_tag}-${usecase}-log.txt s3://mithril-artifacts/${build_tag}/ --region us-east-1 \ No newline at end of file diff --git a/terraform/integration-tests/simple-bookinfo/main.tf b/terraform/integration-tests/simple-bookinfo/main.tf new file mode 100644 index 00000000..f1f3b6ed --- /dev/null +++ b/terraform/integration-tests/simple-bookinfo/main.tf @@ -0,0 +1,128 @@ +# 1. Create vpc +resource "aws_vpc" "testing-vpc" { + cidr_block = "10.0.0.0/16" + tags = { + Name = "testing" + } +} + +# 2. Create Internet Gateway +resource "aws_internet_gateway" "gw" { + vpc_id = aws_vpc.testing-vpc.id +} + +# 3. Create Custom Route Table +resource "aws_route_table" "testing-route-table" { + vpc_id = aws_vpc.testing-vpc.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.gw.id + } + + route { + ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.gw.id + } + + tags = { + Name = "Prod" + } +} + +# 4. Create a Subnet +resource "aws_subnet" "subnet-1" { + vpc_id = aws_vpc.testing-vpc.id + cidr_block = "10.0.1.0/24" + availability_zone = "us-east-1a" + + tags = { + Name = "testing-subnet" + } +} + +# 5. Associate subnet with Route Table +resource "aws_route_table_association" "a" { + subnet_id = aws_subnet.subnet-1.id + route_table_id = aws_route_table.testing-route-table.id +} + +# 6. Create Security Group to allow ports +resource "aws_security_group" "allow_web" { + name = "mithril_sg_test" + description = "Allow Web inbound traffic" + vpc_id = aws_vpc.testing-vpc.id + + ingress { + description = "HTTP" + from_port = 0 + to_port = 0 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + ingress { + description = "SSH" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { + Name = "allow_web" + } +} + +# 7. Create Ubuntu server +resource "aws_instance" "mithril_instance" { + ami = var.EC2_AMI + instance_type = var.EC2_INSTANCE_TYPE + availability_zone = "us-east-1a" + key_name = var.EC2_KEY_PAIR + private_ip = "10.0.1.50" + vpc_security_group_ids = [ "${aws_security_group.allow_web.id}" ] + subnet_id = aws_subnet.subnet-1.id + associate_public_ip_address = true + user_data = data.template_file.init.rendered + + root_block_device { + volume_size = var.VOLUME_SIZE + } + + tags = { + Name = "mithril-testing" + } +} + +output "EC2-PUBLIC-IP" { + value = aws_instance.mithril_instance.public_ip +} + +data "aws_secretsmanager_secret" "secrets" { + name = "mithril-jenkins-integration-tests" +} + +data "aws_secretsmanager_secret_version" "mithril_secret" { + secret_id = data.aws_secretsmanager_secret.secrets.id +} + +data "template_file" "init" { + template = file("integration-tests.sh") + + vars = { + access_key = jsondecode(nonsensitive(data.aws_secretsmanager_secret_version.mithril_secret.secret_string))["ACCESS_KEY_ID"], + secret_access_key = jsondecode(nonsensitive(data.aws_secretsmanager_secret_version.mithril_secret.secret_string))["SECRET_ACCESS_KEY"], + region = var.ECR_REGION, + tag = var.TAG, + hub = var.HUB, + build_tag = var.BUILD_TAG + usecase = var.USECASE + } +} \ No newline at end of file diff --git a/terraform/integration-tests/simple-bookinfo/output.tf b/terraform/integration-tests/simple-bookinfo/output.tf new file mode 100644 index 00000000..e69de29b diff --git a/terraform/integration-tests/simple-bookinfo/provider.tf b/terraform/integration-tests/simple-bookinfo/provider.tf new file mode 100644 index 00000000..25dddaa5 --- /dev/null +++ b/terraform/integration-tests/simple-bookinfo/provider.tf @@ -0,0 +1,4 @@ +provider "aws" { + region = var.ECR_REGION + profile = var.AWS_PROFILE +} \ No newline at end of file diff --git a/terraform/integration-tests/simple-bookinfo/variables.tf b/terraform/integration-tests/simple-bookinfo/variables.tf new file mode 100644 index 00000000..5565409a --- /dev/null +++ b/terraform/integration-tests/simple-bookinfo/variables.tf @@ -0,0 +1,54 @@ +variable "TAG" { + default = "latest" + description = "TAG used to download the images from ECR repository" +} + +variable "BUILD_TAG" { + default = "latest" + description = "Build TAG from Jenkins Pipeline" +} + +variable "HUB" { + default = "529024819027.dkr.ecr.us-east-1.amazonaws.com/mithril" + description = "HUB used to download the images from ECR repository" +} + +variable "ECR_REGION" { + default = "us-east-1" + description = "ECR region specified to download the docker images" +} + +variable "ARTIFACT_BUCKET_NAME" { + default = "mithril-artifacts" + description = "S3 Bucket name for the Mithril Artifacts" +} + +variable "AWS_PROFILE" { + default = "scytale" + description = "AWS profile used to grant access to AWS CLI API" +} + +variable "EC2_AMI" { + default = "ami-09e67e426f25ce0d7" + description = "Ubuntu 20.04 LTS AMI ID" +} + +variable "EC2_INSTANCE_TYPE" { + default = "t2.xlarge" + description = "EC2 Instance type created by terraform" +} + +variable "EC2_KEY_PAIR" { + default = "mithril-integration-test" + description = "AWS key pair name used to connect to the EC2 instance" +} + +variable "VOLUME_SIZE" { + default = 50 + description = "Root block device volume size used by EC2 instance" +} + +variable "USECASE" { + default = "simple-bookinfo" + description = "Initial usecase" +} \ No newline at end of file diff --git a/terraform/integration-tests/workload-to-ingress-upstream-disk/integration-tests.sh b/terraform/integration-tests/workload-to-ingress-upstream-disk/integration-tests.sh new file mode 100644 index 00000000..a7553928 --- /dev/null +++ b/terraform/integration-tests/workload-to-ingress-upstream-disk/integration-tests.sh @@ -0,0 +1,40 @@ +#!/bin/bash +exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 + +apt update -y +apt install docker.io awscli -y + +aws configure set aws_access_key_id ${access_key} +aws configure set aws_secret_access_key ${secret_access_key} + +aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${hub} + +docker pull ${hub}:${build_tag} + +# Tagging for easier use within the docker command below +docker tag ${hub}:${build_tag} mithril-testing:${build_tag} + +# Creating kubernetes config to use kubectl inside the container +mkdir -p $HOME/.kube && touch $HOME/.kube/config + +# Running usecase and testing it +docker run -i --rm \ +-v "/var/run/docker.sock:/var/run/docker.sock:rw" \ +-v "/.kube/config:/root/.kube/config:rw" \ +--network host mithril-testing:${build_tag} \ +bash -c 'cd /mithril/usecases && find . -type f -iname "*.sh" -exec chmod +x {} \; && cd workload-to-ingress-upstream-disk/server-cluster && /mithril/POC/create-kind-cluster.sh && +HUB=${hub} AWS_ACCESS_KEY_ID=${access_key} AWS_SECRET_ACCESS_KEY=${secret_access_key} /mithril/POC/create-docker-registry-secret.sh && +kubectl create ns spire && TAG=${build_tag} HUB=${hub} ./deploy-all.sh && +kubectl rollout status deployment httpbin && +INGRESS_POD=$(kubectl get pod -l app=istio-ingressgateway -n istio-system -o jsonpath="{.items[0].metadata.name}") && +/mithril/POC/forward-port.sh && +cd /mithril/usecases/workload-to-ingress-upstream-disk/client-cluster && /mithril/usecases/common/utils/create-kind2-cluster.sh && +HUB=${hub} AWS_ACCESS_KEY_ID=${access_key} AWS_SECRET_ACCESS_KEY=${secret_access_key} /mithril/POC/create-docker-registry-secret.sh && +kubectl create ns spire && TAG=${build_tag} HUB=${hub} && ./deploy-all.sh && +kubectl rollout status deployment sleep && +cd /mithril/e2e && go test -v e2e -run TestWorkloadToIngressUpstreamDisk 2>&1 | tee ${build_tag}-${usecase}-result.txt && +AWS_ACCESS_KEY_ID=${access_key} AWS_SECRET_ACCESS_KEY=${secret_access_key} aws s3 cp ${build_tag}-${usecase}-result.txt s3://mithril-artifacts/${build_tag}/ --region us-east-1' + +cat /var/log/user-data.log >> ${build_tag}-${usecase}-log.txt + +aws s3 cp /${build_tag}-${usecase}-log.txt s3://mithril-artifacts/${build_tag}/ --region us-east-1 \ No newline at end of file diff --git a/terraform/integration-tests/workload-to-ingress-upstream-disk/main.tf b/terraform/integration-tests/workload-to-ingress-upstream-disk/main.tf new file mode 100644 index 00000000..f1f3b6ed --- /dev/null +++ b/terraform/integration-tests/workload-to-ingress-upstream-disk/main.tf @@ -0,0 +1,128 @@ +# 1. Create vpc +resource "aws_vpc" "testing-vpc" { + cidr_block = "10.0.0.0/16" + tags = { + Name = "testing" + } +} + +# 2. Create Internet Gateway +resource "aws_internet_gateway" "gw" { + vpc_id = aws_vpc.testing-vpc.id +} + +# 3. Create Custom Route Table +resource "aws_route_table" "testing-route-table" { + vpc_id = aws_vpc.testing-vpc.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.gw.id + } + + route { + ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.gw.id + } + + tags = { + Name = "Prod" + } +} + +# 4. Create a Subnet +resource "aws_subnet" "subnet-1" { + vpc_id = aws_vpc.testing-vpc.id + cidr_block = "10.0.1.0/24" + availability_zone = "us-east-1a" + + tags = { + Name = "testing-subnet" + } +} + +# 5. Associate subnet with Route Table +resource "aws_route_table_association" "a" { + subnet_id = aws_subnet.subnet-1.id + route_table_id = aws_route_table.testing-route-table.id +} + +# 6. Create Security Group to allow ports +resource "aws_security_group" "allow_web" { + name = "mithril_sg_test" + description = "Allow Web inbound traffic" + vpc_id = aws_vpc.testing-vpc.id + + ingress { + description = "HTTP" + from_port = 0 + to_port = 0 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + ingress { + description = "SSH" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { + Name = "allow_web" + } +} + +# 7. Create Ubuntu server +resource "aws_instance" "mithril_instance" { + ami = var.EC2_AMI + instance_type = var.EC2_INSTANCE_TYPE + availability_zone = "us-east-1a" + key_name = var.EC2_KEY_PAIR + private_ip = "10.0.1.50" + vpc_security_group_ids = [ "${aws_security_group.allow_web.id}" ] + subnet_id = aws_subnet.subnet-1.id + associate_public_ip_address = true + user_data = data.template_file.init.rendered + + root_block_device { + volume_size = var.VOLUME_SIZE + } + + tags = { + Name = "mithril-testing" + } +} + +output "EC2-PUBLIC-IP" { + value = aws_instance.mithril_instance.public_ip +} + +data "aws_secretsmanager_secret" "secrets" { + name = "mithril-jenkins-integration-tests" +} + +data "aws_secretsmanager_secret_version" "mithril_secret" { + secret_id = data.aws_secretsmanager_secret.secrets.id +} + +data "template_file" "init" { + template = file("integration-tests.sh") + + vars = { + access_key = jsondecode(nonsensitive(data.aws_secretsmanager_secret_version.mithril_secret.secret_string))["ACCESS_KEY_ID"], + secret_access_key = jsondecode(nonsensitive(data.aws_secretsmanager_secret_version.mithril_secret.secret_string))["SECRET_ACCESS_KEY"], + region = var.ECR_REGION, + tag = var.TAG, + hub = var.HUB, + build_tag = var.BUILD_TAG + usecase = var.USECASE + } +} \ No newline at end of file diff --git a/terraform/integration-tests/workload-to-ingress-upstream-disk/output.tf b/terraform/integration-tests/workload-to-ingress-upstream-disk/output.tf new file mode 100644 index 00000000..e69de29b diff --git a/terraform/integration-tests/workload-to-ingress-upstream-disk/provider.tf b/terraform/integration-tests/workload-to-ingress-upstream-disk/provider.tf new file mode 100644 index 00000000..25dddaa5 --- /dev/null +++ b/terraform/integration-tests/workload-to-ingress-upstream-disk/provider.tf @@ -0,0 +1,4 @@ +provider "aws" { + region = var.ECR_REGION + profile = var.AWS_PROFILE +} \ No newline at end of file diff --git a/terraform/integration-tests/workload-to-ingress-upstream-disk/variables.tf b/terraform/integration-tests/workload-to-ingress-upstream-disk/variables.tf new file mode 100644 index 00000000..fa3d686c --- /dev/null +++ b/terraform/integration-tests/workload-to-ingress-upstream-disk/variables.tf @@ -0,0 +1,54 @@ +variable "TAG" { + default = "latest" + description = "TAG used to download the images from ECR repository" +} + +variable "BUILD_TAG" { + default = "latest" + description = "Build TAG from Jenkins Pipeline" +} + +variable "HUB" { + default = "529024819027.dkr.ecr.us-east-1.amazonaws.com/mithril" + description = "HUB used to download the images from ECR repository" +} + +variable "ECR_REGION" { + default = "us-east-1" + description = "ECR region specified to download the docker images" +} + +variable "ARTIFACT_BUCKET_NAME" { + default = "mithril-artifacts" + description = "S3 Bucket name for the Mithril Artifacts" +} + +variable "AWS_PROFILE" { + default = "scytale" + description = "AWS profile used to grant access to AWS CLI API" +} + +variable "EC2_AMI" { + default = "ami-09e67e426f25ce0d7" + description = "Ubuntu 20.04 LTS AMI ID" +} + +variable "EC2_INSTANCE_TYPE" { + default = "t2.xlarge" + description = "EC2 Instance type created by terraform" +} + +variable "EC2_KEY_PAIR" { + default = "mithril-integration-test" + description = "AWS key pair name used to connect to the EC2 instance" +} + +variable "VOLUME_SIZE" { + default = 50 + description = "Root block device volume size used by EC2 instance" +} + +variable "USECASE" { + default = "workload-to-ingress-upstream-disk" + description = "Workload to ingress, two clusters with UpstreamAuthority disk plugin" +} \ No newline at end of file diff --git a/terraform/integration-tests/workload-to-ingress-upstream-spire/integration-tests.sh b/terraform/integration-tests/workload-to-ingress-upstream-spire/integration-tests.sh new file mode 100644 index 00000000..7c67f32f --- /dev/null +++ b/terraform/integration-tests/workload-to-ingress-upstream-spire/integration-tests.sh @@ -0,0 +1,41 @@ +#!/bin/bash +exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 + +apt update -y +apt install docker.io awscli -y + +aws configure set aws_access_key_id ${access_key} +aws configure set aws_secret_access_key ${secret_access_key} + +aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${hub} + +docker pull ${hub}:${build_tag} + +# Tagging for easier use within the docker command below +docker tag ${hub}:${build_tag} mithril-testing:${build_tag} + +# Creating kubernetes config to use kubectl inside the container +mkdir -p $HOME/.kube && touch $HOME/.kube/config + +# Running usecase and testing it +docker run -i --rm \ +-v "/var/run/docker.sock:/var/run/docker.sock:rw" \ +-v "/.kube/config:/root/.kube/config:rw" \ +--network host mithril-testing:${build_tag} \ +bash -c 'cd /mithril/usecases && find . -type f -iname "*.sh" -exec chmod +x {} \; && cd workload-to-ingress-upstream-spire && ./set-env.sh && +cd server-cluster && /mithril/POC/create-kind-cluster.sh && +HUB=${hub} AWS_ACCESS_KEY_ID=${access_key} AWS_SECRET_ACCESS_KEY=${secret_access_key} /mithril/POC/create-docker-registry-secret.sh && +kubectl create ns spire && TAG=${build_tag} HUB=${hub} ./deploy-all.sh && +kubectl rollout status deployment httpbin && +INGRESS_POD=$(kubectl get pod -l app=istio-ingressgateway -n istio-system -o jsonpath="{.items[0].metadata.name}") && +/mithril/POC/forward-port.sh && +cd /mithril/usecases/workload-to-ingress-upstream-spire/client-cluster && ../../common/utils/create-kind2-cluster.sh && +HUB=${hub} AWS_ACCESS_KEY_ID=${access_key} AWS_SECRET_ACCESS_KEY=${secret_access_key} /mithril/POC/create-docker-registry-secret.sh && +kubectl create ns spire && TAG=${build_tag} HUB=${hub} && ./deploy-all.sh && +kubectl rollout status deployment sleep && +cd /mithril/e2e && go test -v e2e -run TestWorkloadToIngressUpstreamSpire 2>&1 | tee ${build_tag}-${usecase}-result.txt && +AWS_ACCESS_KEY_ID=${access_key} AWS_SECRET_ACCESS_KEY=${secret_access_key} aws s3 cp ${build_tag}-${usecase}-result.txt s3://mithril-artifacts/${build_tag}/ --region us-east-1' + +cat /var/log/user-data.log >> ${build_tag}-${usecase}-log.txt + +aws s3 cp /${build_tag}-${usecase}-log.txt s3://mithril-artifacts/${build_tag}/ --region us-east-1 diff --git a/terraform/integration-tests/workload-to-ingress-upstream-spire/main.tf b/terraform/integration-tests/workload-to-ingress-upstream-spire/main.tf new file mode 100644 index 00000000..2d172eeb --- /dev/null +++ b/terraform/integration-tests/workload-to-ingress-upstream-spire/main.tf @@ -0,0 +1,128 @@ +# 1. Create vpc +resource "aws_vpc" "testing-vpc" { + cidr_block = "10.0.0.0/16" + tags = { + Name = "testing" + } +} + +# 2. Create Internet Gateway +resource "aws_internet_gateway" "gw" { + vpc_id = aws_vpc.testing-vpc.id +} + +# 3. Create Custom Route Table +resource "aws_route_table" "testing-route-table" { + vpc_id = aws_vpc.testing-vpc.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.gw.id + } + + route { + ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.gw.id + } + + tags = { + Name = "Prod" + } +} + +# 4. Create a Subnet +resource "aws_subnet" "subnet-1" { + vpc_id = aws_vpc.testing-vpc.id + cidr_block = "10.0.1.0/24" + availability_zone = "us-east-1a" + + tags = { + Name = "testing-subnet" + } +} + +# 5. Associate subnet with Route Table +resource "aws_route_table_association" "a" { + subnet_id = aws_subnet.subnet-1.id + route_table_id = aws_route_table.testing-route-table.id +} + +# 6. Create Security Group to allow ports +resource "aws_security_group" "allow_web" { + name = "mithril_sg_test" + description = "Allow Web inbound traffic" + vpc_id = aws_vpc.testing-vpc.id + + ingress { + description = "HTTP" + from_port = 0 + to_port = 0 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + ingress { + description = "SSH" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { + Name = "allow_web" + } +} + +# 7. Create Ubuntu server +resource "aws_instance" "mithril_instance" { + ami = var.EC2_AMI + instance_type = var.EC2_INSTANCE_TYPE + availability_zone = "us-east-1a" + key_name = var.EC2_KEY_PAIR + private_ip = "10.0.1.50" + vpc_security_group_ids = [ "${aws_security_group.allow_web.id}" ] + subnet_id = aws_subnet.subnet-1.id + associate_public_ip_address = true + user_data = data.template_file.init.rendered + + root_block_device { + volume_size = var.VOLUME_SIZE + } + + tags = { + Name = "mithril-testing" + } +} + +output "EC2-PUBLIC-IP" { + value = aws_instance.mithril_instance.public_ip +} + +data "aws_secretsmanager_secret" "secrets" { + name = "mithril-jenkins-integration-tests" +} + +data "aws_secretsmanager_secret_version" "mithril_secret" { + secret_id = data.aws_secretsmanager_secret.secrets.id +} + +data "template_file" "init" { + template = file("integration-tests.sh") + + vars = { + access_key = jsondecode(nonsensitive(data.aws_secretsmanager_secret_version.mithril_secret.secret_string))["ACCESS_KEY_ID"], + secret_access_key = jsondecode(nonsensitive(data.aws_secretsmanager_secret_version.mithril_secret.secret_string))["SECRET_ACCESS_KEY"], + region = var.ECR_REGION, + tag = var.TAG, + hub = var.HUB, + build_tag = var.BUILD_TAG + usecase = var.USECASE + } +} diff --git a/terraform/integration-tests/workload-to-ingress-upstream-spire/output.tf b/terraform/integration-tests/workload-to-ingress-upstream-spire/output.tf new file mode 100644 index 00000000..e69de29b diff --git a/terraform/integration-tests/workload-to-ingress-upstream-spire/provider.tf b/terraform/integration-tests/workload-to-ingress-upstream-spire/provider.tf new file mode 100644 index 00000000..13a45295 --- /dev/null +++ b/terraform/integration-tests/workload-to-ingress-upstream-spire/provider.tf @@ -0,0 +1,4 @@ +provider "aws" { + region = var.ECR_REGION + profile = var.AWS_PROFILE +} diff --git a/terraform/integration-tests/workload-to-ingress-upstream-spire/variables.tf b/terraform/integration-tests/workload-to-ingress-upstream-spire/variables.tf new file mode 100644 index 00000000..c582b60f --- /dev/null +++ b/terraform/integration-tests/workload-to-ingress-upstream-spire/variables.tf @@ -0,0 +1,54 @@ +variable "TAG" { + default = "latest" + description = "TAG used to download the images from ECR repository" +} + +variable "BUILD_TAG" { + default = "latest" + description = "Build TAG from Jenkins Pipeline" +} + +variable "HUB" { + default = "529024819027.dkr.ecr.us-east-1.amazonaws.com/mithril" + description = "HUB used to download the images from ECR repository" +} + +variable "ECR_REGION" { + default = "us-east-1" + description = "ECR region specified to download the docker images" +} + +variable "ARTIFACT_BUCKET_NAME" { + default = "mithril-artifacts" + description = "S3 Bucket name for the Mithril Artifacts" +} + +variable "AWS_PROFILE" { + default = "scytale" + description = "AWS profile used to grant access to AWS CLI API" +} + +variable "EC2_AMI" { + default = "ami-09e67e426f25ce0d7" + description = "Ubuntu 20.04 LTS AMI ID" +} + +variable "EC2_INSTANCE_TYPE" { + default = "t2.xlarge" + description = "EC2 Instance type created by terraform" +} + +variable "EC2_KEY_PAIR" { + default = "mithril-integration-test" + description = "AWS key pair name used to connect to the EC2 instance" +} + +variable "VOLUME_SIZE" { + default = 50 + description = "Root block device volume size used by EC2 instance" +} + +variable "USECASE" { + default = "workload-to-ingress-upstream-spire" + description = "Workload to ingress, two clusters with UpstreamAuthority spire plugin" +} diff --git a/terraform/istio-unit-tests/check-go-version.sh b/terraform/istio-unit-tests/check-go-version.sh new file mode 100644 index 00000000..4a3b5c03 --- /dev/null +++ b/terraform/istio-unit-tests/check-go-version.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +# Check istio release version, release-1.10 has a go1.16 dependency. + +if [[ ${istio_branch} == release-1.10 ]]; then go install golang.org/dl/go1.16.12@latest && +"$HOME"/go/bin/go1.16.12 download && export PATH="$HOME/sdk/go1.16.12/bin:$PATH" && go version; fi diff --git a/terraform/istio-unit-tests/main.tf b/terraform/istio-unit-tests/main.tf new file mode 100644 index 00000000..ad5ddee6 --- /dev/null +++ b/terraform/istio-unit-tests/main.tf @@ -0,0 +1,129 @@ +# 1. Create vpc +resource "aws_vpc" "testing-vpc" { + cidr_block = "10.0.0.0/16" + tags = { + Name = "testing" + } +} + +# 2. Create Internet Gateway +resource "aws_internet_gateway" "gw" { + vpc_id = aws_vpc.testing-vpc.id +} + +# 3. Create Custom Route Table +resource "aws_route_table" "testing-route-table" { + vpc_id = aws_vpc.testing-vpc.id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.gw.id + } + + route { + ipv6_cidr_block = "::/0" + gateway_id = aws_internet_gateway.gw.id + } + + tags = { + Name = "Prod" + } +} + +# 4. Create a Subnet +resource "aws_subnet" "subnet-1" { + vpc_id = aws_vpc.testing-vpc.id + cidr_block = "10.0.1.0/24" + availability_zone = "us-east-1a" + + tags = { + Name = "testing-subnet" + } +} + +# 5. Associate subnet with Route Table +resource "aws_route_table_association" "a" { + subnet_id = aws_subnet.subnet-1.id + route_table_id = aws_route_table.testing-route-table.id +} + +# 6. Create Security Group to allow ports +resource "aws_security_group" "allow_web" { + name = "mithril_sg_test" + description = "Allow Web inbound traffic" + vpc_id = aws_vpc.testing-vpc.id + + ingress { + description = "HTTP" + from_port = 0 + to_port = 0 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + ingress { + description = "SSH" + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = { + Name = "allow_web" + } +} + +# 7. Create Ubuntu server +resource "aws_instance" "mithril_instance" { + ami = var.EC2_AMI + instance_type = var.EC2_INSTANCE_TYPE + availability_zone = "us-east-1a" + key_name = var.EC2_KEY_PAIR + private_ip = "10.0.1.50" + vpc_security_group_ids = [ "${aws_security_group.allow_web.id}" ] + subnet_id = aws_subnet.subnet-1.id + associate_public_ip_address = true + user_data = data.template_file.init.rendered + + root_block_device { + volume_size = var.VOLUME_SIZE + } + + tags = { + Name = "mithril-testing" + } +} + +output "EC2-PUBLIC-IP" { + value = aws_instance.mithril_instance.public_ip +} + +data "aws_secretsmanager_secret" "secrets" { + name = "mithril-jenkins-integration-tests" +} + +data "aws_secretsmanager_secret_version" "mithril_secret" { + secret_id = data.aws_secretsmanager_secret.secrets.id +} + +data "template_file" "init" { + template = file("unit-tests.sh") + + vars = { + access_key = jsondecode(nonsensitive(data.aws_secretsmanager_secret_version.mithril_secret.secret_string))["ACCESS_KEY_ID"], + secret_access_key = jsondecode(nonsensitive(data.aws_secretsmanager_secret_version.mithril_secret.secret_string))["SECRET_ACCESS_KEY"], + region = var.ECR_REGION, + tag = var.TAG, + hub = var.HUB, + build_tag = var.BUILD_TAG + usecase = var.USECASE + istio_branch = var.ISTIO_BRANCH + } +} \ No newline at end of file diff --git a/terraform/istio-unit-tests/output.tf b/terraform/istio-unit-tests/output.tf new file mode 100644 index 00000000..e69de29b diff --git a/terraform/istio-unit-tests/provider.tf b/terraform/istio-unit-tests/provider.tf new file mode 100644 index 00000000..25dddaa5 --- /dev/null +++ b/terraform/istio-unit-tests/provider.tf @@ -0,0 +1,4 @@ +provider "aws" { + region = var.ECR_REGION + profile = var.AWS_PROFILE +} \ No newline at end of file diff --git a/terraform/istio-unit-tests/unit-tests.sh b/terraform/istio-unit-tests/unit-tests.sh new file mode 100644 index 00000000..03b7e240 --- /dev/null +++ b/terraform/istio-unit-tests/unit-tests.sh @@ -0,0 +1,51 @@ +#!/bin/bash +exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1 + +apt update -y +apt install docker.io awscli -y + +aws configure set aws_access_key_id ${access_key} +aws configure set aws_secret_access_key ${secret_access_key} + +aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${hub} + +docker pull ${hub}:${build_tag} + +# Tagging for easier use within the docker command below +docker tag ${hub}:${build_tag} mithril-testing:${build_tag} + +# Creating kubernetes config to use kubectl inside the container +mkdir -p $HOME/.kube && touch $HOME/.kube/config + +# Running usecase and testing it +docker run -i \ +-v "/var/run/docker.sock:/var/run/docker.sock:rw" \ +-v "/.kube/config:/root/.kube/config:rw" \ +--network host mithril-testing:${build_tag} \ +bash -c 'echo ${istio_branch} && export istio_branch=${istio_branch} && +. /mithril/terraform/istio-unit-tests/check-go-version.sh && +mkdir tmp && +cd tmp && +git clone --single-branch --branch ${istio_branch} https://github.com/istio/istio.git && +cd istio && +git apply /mithril/POC/patches/poc.${istio_branch}.patch && +go get github.com/spiffe/go-spiffe/v2 && +go mod tidy && +make build && +make test 2>&1 | tee ${build_tag}-istio-unit-tests-result.txt && +AWS_ACCESS_KEY_ID=${access_key} AWS_SECRET_ACCESS_KEY=${secret_access_key} aws s3 cp ${build_tag}-istio-unit-tests-result.txt s3://mithril-artifacts/${build_tag}/ --region us-east-1 && +go test -race -coverprofile cover.out ./... 2>&1' + +docker commit $(docker ps -aq) mithril/coverage + +docker run -i \ +-v "/var/run/docker.sock:/var/run/docker.sock:rw" \ +-v "/.kube/config:/root/.kube/config:rw" \ +--rm mithril/coverage:latest \ +bash -c 'cd tmp/istio && +go tool cover -o coverage.html -html=cover.out && +AWS_ACCESS_KEY_ID=${access_key} AWS_SECRET_ACCESS_KEY=${secret_access_key} aws s3 cp coverage.html s3://mithril-artifacts/${build_tag}/ --region us-east-1' + +cat /var/log/user-data.log >> ${build_tag}-istio-unit-tests-log.txt + +aws s3 cp /${build_tag}-istio-unit-tests-log.txt s3://mithril-artifacts/${build_tag}/ --region us-east-1 diff --git a/terraform/istio-unit-tests/variables.tf b/terraform/istio-unit-tests/variables.tf new file mode 100644 index 00000000..2b53aa63 --- /dev/null +++ b/terraform/istio-unit-tests/variables.tf @@ -0,0 +1,59 @@ +variable "TAG" { + default = "latest" + description = "TAG used to download the images from ECR repository" +} + +variable "BUILD_TAG" { + default = "latest" + description = "Build TAG from Jenkins Pipeline" +} + +variable "HUB" { + default = "529024819027.dkr.ecr.us-east-1.amazonaws.com/mithril" + description = "HUB used to download the images from ECR repository" +} + +variable "ECR_REGION" { + default = "us-east-1" + description = "ECR region specified to download the docker images" +} + +variable "ARTIFACT_BUCKET_NAME" { + default = "mithril-artifacts" + description = "S3 Bucket name for the Mithril Artifacts" +} + +variable "AWS_PROFILE" { + default = "scytale" + description = "AWS profile used to grant access to AWS CLI API" +} + +variable "EC2_AMI" { + default = "ami-09e67e426f25ce0d7" + description = "Ubuntu 20.04 LTS AMI ID" +} + +variable "EC2_INSTANCE_TYPE" { + default = "t2.xlarge" + description = "EC2 Instance type created by terraform" +} + +variable "EC2_KEY_PAIR" { + default = "mithril-integration-test" + description = "AWS key pair name used to connect to the EC2 instance" +} + +variable "VOLUME_SIZE" { + default = 50 + description = "Root block device volume size used by EC2 instance" +} + +variable "USECASE" { + default = "simple-bookinfo" + description = "Initial usecase" +} + +variable "ISTIO_BRANCH" { + default = "release-1.10" + description = "Istio branch to run unit tests" +} \ No newline at end of file diff --git a/usecases/common/networking/gateway.yaml b/usecases/common/networking/gateway.yaml new file mode 100644 index 00000000..de45ae2f --- /dev/null +++ b/usecases/common/networking/gateway.yaml @@ -0,0 +1,47 @@ +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: httpbin-gateway +spec: + selector: + istio: ingressgateway # use istio default controller + servers: + - port: + number: 8080 + name: https + protocol: HTTPS + tls: + mode: ISTIO_MUTUAL + hosts: + - "*" +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: httpbin-service +spec: + hosts: + - "*" + gateways: + - httpbin-gateway + http: + - match: + - uri: + prefix: /status + - uri: + prefix: /delay + route: + - destination: + port: + number: 8000 + host: httpbin +--- +apiVersion: "networking.istio.io/v1alpha3" +kind: "DestinationRule" +metadata: + name: "enable-mtls" +spec: + host: "*.svc.cluster.local" + trafficPolicy: + tls: + mode: ISTIO_MUTUAL diff --git a/usecases/common/networking/service-entry.yaml b/usecases/common/networking/service-entry.yaml new file mode 100644 index 00000000..16f65260 --- /dev/null +++ b/usecases/common/networking/service-entry.yaml @@ -0,0 +1,26 @@ +apiVersion: networking.istio.io/v1alpha3 +kind: ServiceEntry +metadata: + name: app +spec: + hosts: + - istio-ingressgateway.istio-system.svc + ports: + - number: 8000 + name: http-port + protocol: HTTP + targetPort: 8000 + resolution: DNS +--- +apiVersion: networking.istio.io/v1alpha3 +kind: DestinationRule +metadata: + name: app +spec: + host: istio-ingressgateway.istio-system.svc + trafficPolicy: + portLevelSettings: + - port: + number: 8000 + tls: + mode: ISTIO_MUTUAL # initiates HTTPS when accessing istio-ingressgateway.istio-system.svc diff --git a/usecases/common/spire/certs/upstream-ca.pem b/usecases/common/spire/certs/upstream-ca.pem new file mode 100644 index 00000000..23ff4916 --- /dev/null +++ b/usecases/common/spire/certs/upstream-ca.pem @@ -0,0 +1,10 @@ +-----BEGIN CERTIFICATE----- +MIIBTjCB9aADAgECAgEBMAoGCCqGSM49BAMCMBYxFDASBgNVBAMTC1Vwc3RyZWFt +IENBMCAYDzAwMDEwMTAxMDAwMDAwWhcNMzExMDAxMjExOTQ5WjAWMRQwEgYDVQQD +EwtVcHN0cmVhbSBDQTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABItmy5ZXNy0z +kHuVU43EI8+MVn8gZpzm+550SAMp4SOiodkyz+Lqco+wjNgoiO6+uC3sEPsIwSiI +DnqGC6pBGKCjMjAwMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJyRx8yP0NdU +TB2o8prTz5hMGUxBMAoGCCqGSM49BAMCA0gAMEUCIBJjVSj9wSxyISnU5B6dJvEv +6MHnkvtLrYFurGOds8qaAiEAln0Ri++6uNKzQVXbkHHfxuYI2Rnx3+89LVgfOqAK +90s= +-----END CERTIFICATE----- diff --git a/usecases/common/spire/keys/upstream-ca.key.pem b/usecases/common/spire/keys/upstream-ca.key.pem new file mode 100644 index 00000000..3354173c --- /dev/null +++ b/usecases/common/spire/keys/upstream-ca.key.pem @@ -0,0 +1,5 @@ +-----BEGIN PRIVATE KEY----- +MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgaaesHxERhsl1DYLV +ZabFDs/3DauHAiBLhr9ZIfLGCBuhRANCAASLZsuWVzctM5B7lVONxCPPjFZ/IGac +5vuedEgDKeEjoqHZMs/i6nKPsIzYKIjuvrgt7BD7CMEoiA56hguqQRig +-----END PRIVATE KEY----- diff --git a/usecases/common/utils/create-kind2-cluster.sh b/usecases/common/utils/create-kind2-cluster.sh new file mode 100644 index 00000000..67bb6c65 --- /dev/null +++ b/usecases/common/utils/create-kind2-cluster.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +set -o errexit + +# create registry container unless it already exists +reg_name='kind-registry' +reg_port='5000' +running="$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" +if [ "${running}" != 'true' ]; then + docker run \ + -d --restart=always -p "127.0.0.1:${reg_port}:5000" --name "${reg_name}" \ + registry:2 +fi + +# create a cluster with the local registry enabled in container, and configured +# with the Token API server. +cat < usecases/federation/forward-secure-port.sh + +Forwarding from 127.0.0.1:7000 -> 7080 +Forwarding from [::1]:7000 -> 7080 +``` + +### Generate certs + +Mint SVID in the trust domain `domain.test`: + +```bash +> kubectl exec --stdin --tty -n spire2 spire-server-0 -- /opt/spire/bin/spire-server x509 mint -spiffeID spiffe://domain.test/myservice -socketPath /run/spire/sockets/server.sock +``` + +Copy the X509-SVID section of the output to a file `svid.pem`. +```bash +> openssl x509 -in mint-cert.pem -out svid.pem +``` + +Copy the Private key section of the output to a file `key.pem`. +```bash +> openssl pkey -in mint-cert.pem -out key.pem +``` + +### Test TLS request + +```bash +> curl --cert svid.pem --key key.pem -k -I https://localhost:7000/productpage + +HTTP/2 200 +content-type: text/html; charset=utf-8 +content-length: 5183 +server: istio-envoy +``` diff --git a/usecases/federation/bookinfo/deploy-bookinfo.sh b/usecases/federation/bookinfo/deploy-bookinfo.sh new file mode 100755 index 00000000..acc9cf11 --- /dev/null +++ b/usecases/federation/bookinfo/deploy-bookinfo.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +DIR="../../../POC" + +if [[ "$1" ]]; then + DIR=$1 +fi + +istioctl kube-inject --filename $DIR/bookinfo/bookinfo.yaml | kubectl apply -f - + +kubectl apply -f gateway.yaml diff --git a/usecases/federation/bookinfo/gateway.yaml b/usecases/federation/bookinfo/gateway.yaml new file mode 100644 index 00000000..d85694ca --- /dev/null +++ b/usecases/federation/bookinfo/gateway.yaml @@ -0,0 +1,69 @@ +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: bookinfo-gateway +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 8080 + name: http + protocol: HTTP + hosts: + - "*" +--- +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: bookinfo-gateway-mtls +spec: + selector: + istio: ingressgateway-mtls + servers: + - port: + number: 7080 + name: https + protocol: HTTPS + tls: + mode: ISTIO_MUTUAL + hosts: + - "*" +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: bookinfo-service +spec: + hosts: + - "*" + gateways: + - bookinfo-gateway + - bookinfo-gateway-mtls + http: + - match: + - uri: + exact: /productpage + - uri: + prefix: /static + - uri: + exact: /login + - uri: + exact: /logout + - uri: + prefix: /api/v1/products + route: + - destination: + host: productpage.default.svc.cluster.local + port: + number: 9080 +--- +apiVersion: "networking.istio.io/v1alpha3" +kind: "DestinationRule" +metadata: + name: "enable-mtls" +spec: + host: "*.svc.cluster.local" + trafficPolicy: + tls: + mode: ISTIO_MUTUAL diff --git a/usecases/federation/create-namespaces.sh b/usecases/federation/create-namespaces.sh new file mode 100755 index 00000000..360b9aea --- /dev/null +++ b/usecases/federation/create-namespaces.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +kubectl create ns istio-system +kubectl create ns spire +kubectl create ns spire2 +sleep 2 diff --git a/usecases/federation/deploy-all.sh b/usecases/federation/deploy-all.sh new file mode 100755 index 00000000..ecc69c50 --- /dev/null +++ b/usecases/federation/deploy-all.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +folder=$(dirname "$0") +pushd "$folder" || exit + +# create all namespaces at the beginning to prevent errors with the bundle sync +./create-namespaces.sh + +kubectl apply -f ../../POC/configmaps.yaml + +(cd spire2 ; ./deploy-spire.sh) +(cd spire ; ./deploy-spire.sh) + +# wait until spire2 is ready +kubectl rollout status statefulset -n spire2 spire-server +# echo bundle from spire2 (domain.test) +bundle=$(kubectl exec --stdin spire-server-0 -c spire-server -n spire2 -- /opt/spire/bin/spire-server bundle show -format spiffe -socketPath /run/spire/sockets/server.sock) + +# wait until spire is ready +kubectl rollout status statefulset -n spire spire-server +# set domain.test bundle to spire +kubectl exec --stdin spire-server-0 -c spire-server -n spire -- /opt/spire/bin/spire-server bundle set -format spiffe -id spiffe://domain.test -socketPath /run/spire/sockets/server.sock <<< "$bundle" + +(cd istio ; ./deploy-istio.sh) +(cd bookinfo ; ./deploy-bookinfo.sh) diff --git a/usecases/federation/forward-secure-port.sh b/usecases/federation/forward-secure-port.sh new file mode 100755 index 00000000..80fb7b3d --- /dev/null +++ b/usecases/federation/forward-secure-port.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +INGRESS_POD=$(kubectl get pod -l istio=ingressgateway-mtls -n istio-system -o jsonpath="{.items[0].metadata.name}") +kubectl port-forward "$INGRESS_POD" 7000:7080 -n istio-system & diff --git a/usecases/federation/istio/deploy-istio.sh b/usecases/federation/istio/deploy-istio.sh new file mode 100755 index 00000000..67ba3fd9 --- /dev/null +++ b/usecases/federation/istio/deploy-istio.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +echo "Deploying Istio..." + +if [[ $HUB ]]; then + echo "Using HUB from environment: $HUB" + args="$args --set values.global.hub=$HUB" +else + echo "No HUB set, using default value from istio-config.yaml" +fi + +if [[ $TAG ]]; then + echo "Using TAG from environment: $TAG" + args="$args --set values.global.tag=$TAG" +else + echo "No TAG set, using default value from istio-config.yaml" +fi + +DIR="../../../POC" + +if [[ "$1" ]]; then + DIR=$1 +fi + +kubectl create ns istio-system +sleep 2 +istioctl install -f istio-config.yaml --skip-confirmation $args +kubectl apply -f $DIR/istio/auth.yaml diff --git a/usecases/federation/istio/istio-config.yaml b/usecases/federation/istio/istio-config.yaml new file mode 100644 index 00000000..1d470559 --- /dev/null +++ b/usecases/federation/istio/istio-config.yaml @@ -0,0 +1,118 @@ +# apiVersion: install.istio.io/v1alpha1 +apiVersion: operator.istio.io/v1alpha1 +kind: IstioOperator +metadata: + namespace: istio-system +spec: + profile: default + meshConfig: + trustDomain: example.org + values: + global: + hub: localhost:5000 + tag: my-build + imagePullPolicy: "Always" + imagePullSecrets: + - secret-registry + sidecarInjectorWebhook: + templates: + spire: | + spec: + containers: + - name: istio-proxy + env: + - name: CA_PROVIDER + value: "spiffe" + - name: SPIFFE_ENDPOINT_SOCKET + value: "unix:///run/spire/sockets/agent.sock" + volumeMounts: + - name: spire-agent-socket + mountPath: /run/spire/sockets + readOnly: true + volumes: + - name: spire-agent-socket + csi: + driver: "csi.spiffe.io" + components: + pilot: + k8s: + env: + # Disable istiod CA Sever functionality + - name: ENABLE_CA_SERVER + value: "false" + # Check that istio-agent's namespace and service account match the ones in the JWT token presented in the connection + - name: PILOT_ENABLE_XDS_IDENTITY_CHECK + value: "true" + # Configure the SPIFFE Workload API as the cert provider for istiod + - name: PILOT_CERT_PROVIDER + value: "spiffe" + - name: SPIFFE_ENDPOINT_SOCKET + value: "unix:///run/spire/sockets/agent.sock" + overlays: + - apiVersion: apps/v1 + kind: Deployment + name: istiod + patches: + - path: spec.template.spec.containers.[name:discovery].volumeMounts[7] + value: + name: spire-agent-socket + mountPath: "/run/spire/sockets" + readOnly: true + - path: spec.template.spec.volumes[7] + value: + name: spire-agent-socket + csi: + driver: "csi.spiffe.io" + ingressGateways: + - name: istio-ingressgateway + enabled: true + label: + istio: ingressgateway + k8s: + overlays: + - apiVersion: apps/v1 + kind: Deployment + name: istio-ingressgateway + patches: + - path: spec.template.spec.volumes[8] + value: + name: spire-agent-socket + csi: + driver: "csi.spiffe.io" + - path: spec.template.spec.containers.[name:istio-proxy].volumeMounts[8] + value: + name: spire-agent-socket + mountPath: "/run/spire/sockets" + readOnly: true + env: + - name: CA_PROVIDER + value: "spiffe" + - name: SPIFFE_ENDPOINT_SOCKET + value: "unix:///run/spire/sockets/agent.sock" + - name: istio-ingressgateway-mtls + enabled: true + label: + istio: ingressgateway-mtls + k8s: + podAnnotations: + spiffe.io/federatesWith: "domain.test" + overlays: + - apiVersion: apps/v1 + kind: Deployment + name: istio-ingressgateway-mtls + patches: + - path: spec.template.spec.volumes[8] + value: + name: spire-agent-socket + csi: + driver: "csi.spiffe.io" + - path: spec.template.spec.containers.[name:istio-proxy].volumeMounts[8] + value: + name: spire-agent-socket + mountPath: "/run/spire/sockets" + readOnly: true + env: + - name: CA_PROVIDER + value: "spiffe" + - name: SPIFFE_ENDPOINT_SOCKET + value: "unix:///run/spire/sockets/agent.sock" diff --git a/usecases/federation/spire/agent-configmap.yaml b/usecases/federation/spire/agent-configmap.yaml new file mode 100644 index 00000000..4b7c4f37 --- /dev/null +++ b/usecases/federation/spire/agent-configmap.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-agent + namespace: spire +data: + agent.conf: | + agent { + data_dir = "/run/spire" + log_level = "DEBUG" + server_address = "spire-server" + server_port = "8081" + socket_path = "/run/spire/sockets/agent.sock" + trust_bundle_path = "/run/spire/bundle/root-cert.pem" + trust_domain = "example.org" + } + + plugins { + NodeAttestor "k8s_psat" { + plugin_data { + # NOTE: Change this to your cluster name + cluster = "demo-cluster" + } + } + + KeyManager "memory" { + plugin_data { + } + } + + WorkloadAttestor "k8s" { + plugin_data { + # Defaults to the secure kubelet port by default. + # Minikube does not have a cert in the cluster CA bundle that + # can authenticate the kubelet cert, so skip validation. + skip_kubelet_verification = true + } + } + + WorkloadAttestor "unix" { + plugin_data { + } + } + } + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8080" + live_path = "/live" + ready_path = "/ready" + } diff --git a/usecases/federation/spire/deploy-spire.sh b/usecases/federation/spire/deploy-spire.sh new file mode 100755 index 00000000..5f25ff05 --- /dev/null +++ b/usecases/federation/spire/deploy-spire.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +set -e + +# Parameterizing DIR folder in order to get demo-script running +DIR="../../../POC" + +if [[ "$1" ]]; then + DIR=$1 +fi + +# Create the k8s-workload-registrar crd, configmap and associated role bindingsspace +kubectl apply \ + -f $DIR/spire/k8s-workload-registrar-crd-cluster-role.yaml \ + -f $DIR/spire/k8s-workload-registrar-crd-configmap.yaml \ + -f $DIR/spire/spiffeid.spiffe.io_spiffeids.yaml + +# Create the server’s service account, configmap and associated role bindings +kubectl apply \ + -f $DIR/spire/server-account.yaml \ + -f $DIR/spire/spire-bundle-configmap.yaml \ + -f $DIR/spire/server-cluster-role.yaml + +# Deploy the server configmap and statefulset +kubectl apply \ + -f server-configmap.yaml \ + -f $DIR/spire/server-statefulset.yaml \ + -f $DIR/spire/server-service.yaml + +# Configuring and deploying the SPIRE Agent +kubectl apply \ + -f $DIR/spire/agent-account.yaml \ + -f $DIR/spire/agent-cluster-role.yaml + +sleep 2 + +kubectl apply \ + -f agent-configmap.yaml \ + -f $DIR/spire/agent-daemonset.yaml + +# Applying SPIFFE CSI Driver configuration +kubectl apply -f $DIR/spire/spiffe-csi-driver.yaml diff --git a/usecases/federation/spire/server-configmap.yaml b/usecases/federation/spire/server-configmap.yaml new file mode 100644 index 00000000..1afe2fe3 --- /dev/null +++ b/usecases/federation/spire/server-configmap.yaml @@ -0,0 +1,75 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-server + namespace: spire +data: + server.conf: | + server { + bind_address = "0.0.0.0" + bind_port = "8081" + socket_path = "/run/spire/sockets/server.sock" + trust_domain = "example.org" + data_dir = "/run/spire/data" + log_level = "DEBUG" + ca_key_type = "rsa-2048" + + default_svid_ttl = "1h" + ca_subject = { + country = ["US"], + organization = ["SPIFFE"], + common_name = "", + } + + federation { + federates_with "domain.test" { + bundle_endpoint_url = "https://bundle-endpoint.spire2.svc.cluster.local:8443" + bundle_endpoint_profile "https_spiffe" { + endpoint_spiffe_id = "spiffe://domain.test/spire/server" + } + } + } + } + + plugins { + DataStore "sql" { + plugin_data { + database_type = "sqlite3" + connection_string = "/run/spire/data/datastore.sqlite3" + } + } + + NodeAttestor "k8s_psat" { + plugin_data { + clusters = { + # NOTE: Change this to your cluster name + "demo-cluster" = { + use_token_review_api_validation = true + service_account_allow_list = ["spire:spire-agent"] + } + } + } + } + + KeyManager "disk" { + plugin_data { + keys_path = "/run/spire/data/keys.json" + } + } + + Notifier "k8sbundle" { + plugin_data { + namespace = "spire" + config_map = "trust-bundle" + config_map_key = "root-cert.pem" + } + } + } + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8080" + live_path = "/live" + ready_path = "/ready" + } diff --git a/usecases/federation/spire2/cleanup-spire.sh b/usecases/federation/spire2/cleanup-spire.sh new file mode 100755 index 00000000..200308f6 --- /dev/null +++ b/usecases/federation/spire2/cleanup-spire.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +kubectl delete clusterrole spire-server-trust-role +kubectl delete clusterrolebinding spire-server-trust-role-binding +kubectl delete namespace spire2 diff --git a/usecases/federation/spire2/deploy-spire.sh b/usecases/federation/spire2/deploy-spire.sh new file mode 100755 index 00000000..6618d510 --- /dev/null +++ b/usecases/federation/spire2/deploy-spire.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +set -e + +# Create the server’s service account, configmap and associated role bindings +kubectl apply \ + -f server-account.yaml \ + -f spire-bundle-configmap.yaml \ + -f server-cluster-role.yaml + +# Deploy the server configmap and statefulset +kubectl apply \ + -f server-configmap.yaml \ + -f server-statefulset.yaml \ + -f server-service.yaml diff --git a/usecases/federation/spire2/kustomization.yaml b/usecases/federation/spire2/kustomization.yaml new file mode 100644 index 00000000..f1dd8f5d --- /dev/null +++ b/usecases/federation/spire2/kustomization.yaml @@ -0,0 +1,14 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +namespace: spire2 + +resources: +- spire-namespace.yaml +- server-account.yaml +- server-cluster-role.yaml +- server-configmap.yaml +- server-service.yaml +- server-statefulset.yaml +- spire-bundle-configmap.yaml + diff --git a/usecases/federation/spire2/server-account.yaml b/usecases/federation/spire2/server-account.yaml new file mode 100644 index 00000000..f806b8fc --- /dev/null +++ b/usecases/federation/spire2/server-account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-server + namespace: spire2 diff --git a/usecases/federation/spire2/server-cluster-role.yaml b/usecases/federation/spire2/server-cluster-role.yaml new file mode 100644 index 00000000..f847bbcf --- /dev/null +++ b/usecases/federation/spire2/server-cluster-role.yaml @@ -0,0 +1,28 @@ +# ClusterRole to allow spire-server node attestor to query Token Review API +# and to be able to push certificate bundles to a configmap +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-trust-role +rules: +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["patch", "get", "list"] + +--- +# Binds above cluster role to spire-server service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-trust-role-binding +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire2 +roleRef: + kind: ClusterRole + name: spire-server-trust-role + apiGroup: rbac.authorization.k8s.io diff --git a/usecases/federation/spire2/server-configmap.yaml b/usecases/federation/spire2/server-configmap.yaml new file mode 100644 index 00000000..bcd2decb --- /dev/null +++ b/usecases/federation/spire2/server-configmap.yaml @@ -0,0 +1,64 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-server + namespace: spire2 +data: + server.conf: | + server { + bind_address = "0.0.0.0" + bind_port = "8081" + socket_path = "/run/spire/sockets/server.sock" + trust_domain = "domain.test" + data_dir = "/run/spire/data" + log_level = "DEBUG" + ca_key_type = "rsa-2048" + + default_svid_ttl = "1h" + ca_subject = { + country = ["US"], + organization = ["SPIFFE"], + common_name = "", + } + federation { + bundle_endpoint { + address = "0.0.0.0" + port = 8443 + } + } + } + + plugins { + DataStore "sql" { + plugin_data { + database_type = "sqlite3" + connection_string = "/run/spire/data/datastore.sqlite3" + } + } + + NodeAttestor "k8s_psat" { + plugin_data { + clusters = { + # NOTE: Change this to your cluster name + "demo-cluster" = { + use_token_review_api_validation = true + service_account_allow_list = ["spire:spire-agent"] + } + } + } + } + + KeyManager "disk" { + plugin_data { + keys_path = "/run/spire/data/keys.json" + } + } + } + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8080" + live_path = "/live" + ready_path = "/ready" + } diff --git a/usecases/federation/spire2/server-service.yaml b/usecases/federation/spire2/server-service.yaml new file mode 100644 index 00000000..5b453d3f --- /dev/null +++ b/usecases/federation/spire2/server-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: bundle-endpoint + namespace: spire2 +spec: + type: NodePort + ports: + - name: http + port: 8443 + targetPort: 8443 + protocol: TCP + selector: + app: spire-server diff --git a/usecases/federation/spire2/server-statefulset.yaml b/usecases/federation/spire2/server-statefulset.yaml new file mode 100644 index 00000000..1a1cce54 --- /dev/null +++ b/usecases/federation/spire2/server-statefulset.yaml @@ -0,0 +1,64 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: spire-server + namespace: spire2 + labels: + app: spire-server +spec: + replicas: 1 + selector: + matchLabels: + app: spire-server + serviceName: spire-server + template: + metadata: + namespace: spire2 + labels: + app: spire-server + spec: + serviceAccountName: spire-server + shareProcessNamespace: true + containers: + - name: spire-server + image: gcr.io/spiffe-io/spire-server:1.1.1 + args: + - -config + - /run/spire/config/server.conf + livenessProbe: + httpGet: + path: /live + port: 8080 + failureThreshold: 2 + initialDelaySeconds: 15 + periodSeconds: 60 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + ports: + - containerPort: 8081 + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: spire-data + mountPath: /run/spire/data + readOnly: false + volumes: + - name: spire-config + configMap: + name: spire-server + volumeClaimTemplates: + - metadata: + name: spire-data + namespace: spire2 + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/usecases/federation/spire2/spire-bundle-configmap.yaml b/usecases/federation/spire2/spire-bundle-configmap.yaml new file mode 100644 index 00000000..a54b744d --- /dev/null +++ b/usecases/federation/spire2/spire-bundle-configmap.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: trust-bundle + namespace: spire2 diff --git a/usecases/workload-to-ingress-upstream-disk/README.md b/usecases/workload-to-ingress-upstream-disk/README.md new file mode 100644 index 00000000..604bbf48 --- /dev/null +++ b/usecases/workload-to-ingress-upstream-disk/README.md @@ -0,0 +1,217 @@ +# Connecting two workloads from different Mithril clusters using external disk SPIRE CA + +This section showcases a mTLS between two workloads from different Mithril clusters within the +same trust domain. On this use case both clusters have a SPIRE Server using the `disk` +UpstreamAuthority plugin that loads CA credentials from disk, more information about this +SPIRE setup on the UpstreamAuthority disk example. + +# Overview + +![Structure for each Mithril cluster with the bookinfo example.](img/overview.png) + +The Mithril changes in Istiod and Istio Agent make possible the use of the SPIRE Workload +API to fetch identities for every entity within the mesh. The identities for Istiod, Istio +Ingress, Egress, and for the workloads are all generated by SPIRE by leveraging the k8s +workload registrar, and then the Istio Agent sidecars of each workload fetch and push their +credentials materials to be used by the Envoy proxy to communicate and perform mTLS. + +# Configuring Istio Ingress Gateway for mTLS + +To showcase the mTLS communication between the workloads we will need to configure a Gateway, +a VirtualService and a DestinationRule on the server-side Ingressgateway for it to require +HTTPS on incoming requests from outside the cluster. + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: bookinfo-gateway +spec: + selector: + istio: ingressgateway + servers: + - port: + number: 8080 + name: https + protocol: HTTPS + tls: + mode: ISTIO_MUTUAL + hosts: + - "*" +--- +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: bookinfo-service +spec: + hosts: + - "*" + gateways: + - bookinfo-gateway + http: + - match: + - uri: + exact: /productpage + - uri: + prefix: /static + - uri: + exact: /login + - uri: + exact: /logout + - uri: + prefix: /api/v1/products + route: + - destination: + host: productpage.default.svc.cluster.local + port: + number: 9080 +--- +apiVersion: "networking.istio.io/v1alpha3" +kind: "DestinationRule" +metadata: + name: "enable-mtls" +spec: + host: "*.svc.cluster.local" + trafficPolicy: + tls: + mode: ISTIO_MUTUAL +``` + +Using this configuration, we route requests from outside the cluster to internal services like the _/productpage_ service from the bookinfo example. + +## Exposing the ingress gateway of the server cluster + +```bash +INGRESS_POD=$(kubectl get pod -l istio=ingressgateway -n istio-system -o jsonpath="{.items[0].metadata.name}") +kubectl port-forward "$INGRESS_POD" --address 0.0.0.0 8000:8080 -n istio-system +``` + +# Perform a curl between the workloads + +We can deploy and use the [**sleep**](https://github.com/istio/istio/blob/master/samples/sleep/sleep.yaml) +app sample with the necessary annotation template from Mithril on a second cluster +to use curl and test the connection between the sleep app from the first Mithril +cluster and the Product Pageservice from the server-side cluster. If you have automatic +sidecar injection enabled: + +```bash +$ kubectl apply -n istio-system -f - < GET /status/200 HTTP/1.1 +> Host: app.example.org:8000 +> User-Agent: curl/7.79.1-DEV +> Accept: */* +> +* Mark bundle as not supporting multiuse +< HTTP/1.1 200 OK +< server: envoy +< date: Sun, 03 Oct 2021 21:27:30 GMT +< content-type: text/html; charset=utf-8 +< access-control-allow-origin: * +< access-control-allow-credentials: true +< content-length: 0 +< x-envoy-upstream-service-time: 2 +< +``` + +### Inspecting the credential materials of the workloads + +```bash +$ openssl s_client -showcerts -connect 10.0.1.50:8000 +CONNECTED(00000003) +Can't use SSL_get_servername +depth=2 C = US, O = SPIFFE +verify error:num=19:self signed certificate in certificate chain +verify return:1 +depth=2 C = US, O = SPIFFE +verify return:1 +depth=1 C = US, O = SPIFFE, OU = DOWNSTREAM-1 +verify return:1 +depth=0 C = US, O = SPIRE, CN = istio-ingressgateway-5c8d74fb97-7jjnf +verify return:1 +--- +Certificate chain + 0 s:C = US, O = SPIRE, CN = istio-ingressgateway-5c8d74fb97-7jjnf + i:C = US, O = SPIFFE, OU = DOWNSTREAM-1 +-----BEGIN CERTIFICATE----- +MIIDbzCCAlegAwIBAgIRAOqitjQfMARR/z5PNG9IrQswDQYJKoZIhvcNAQELBQAw +NTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBlNQSUZGRTEVMBMGA1UECxMMRE9XTlNU +UkVBTS0xMB4XDTIxMTAwMzIxMTkxM1oXDTIxMTAwMzIxNTkxNlowTTELMAkGA1UE +BhMCVVMxDjAMBgNVBAoTBVNQSVJFMS4wLAYDVQQDEyVpc3Rpby1pbmdyZXNzZ2F0 +ZXdheS01YzhkNzRmYjk3LTdqam5mMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE +Bnh4/9UU79UI4/B8lV5qKMaQPdC5o49dCzCYlYT6v+e8qKkUfja5NpZwndkV2FuJ +e2/KgIHOht3EpuFdXrRFpaOCASswggEnMA4GA1UdDwEB/wQEAwIDqDAdBgNVHSUE +FjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU +N7B14vVV0vH47QxmKBKKBIK1dVQwHwYDVR0jBBgwFoAUcouNphnIKBARp9fSQ+Oj +FlYl/GUwgacGA1UdEQSBnzCBnIIlaXN0aW8taW5ncmVzc2dhdGV3YXktNWM4ZDc0 +ZmI5Ny03ampuZoIlaXN0aW8taW5ncmVzc2dhdGV3YXkuaXN0aW8tc3lzdGVtLnN2 +Y4ZMc3BpZmZlOi8vZXhhbXBsZS5vcmcvbnMvaXN0aW8tc3lzdGVtL3NhL2lzdGlv +LWluZ3Jlc3NnYXRld2F5LXNlcnZpY2UtYWNjb3VudDANBgkqhkiG9w0BAQsFAAOC +AQEAtvyssqDU4kfj5aUl4fgSDOAQFbeqZKjG5VqCVTw21Qf867Jq1koKP3wY9v0h +lAJaluOfwViXMpU3/liyFh+9d+o35rbRWppRaoTmTwYs5/5tdLsIPZCOrS7Fijce +PzrEarPXfx1vzrCGe4ti2VMCz6n1CcGSwiz38tU4WC6qFoQh/b5ZAdCtBmmnRJbJ +ONcLoJWaOGs5AntFfJuvykmViJRkA/YMfJdOObUfEk0d+Sx9czb1TuB/rIrCDw9C +YGxcrFwquTJ6rM8xUJAFEk1slqwhEz20+RfFvapDxiBUgC6R1j0P9f4q1wbGfaKd +b+k6bDDBqkAKsogCXF0mSwfQaw== +-----END CERTIFICATE----- + 1 s:C = US, O = SPIFFE, OU = DOWNSTREAM-1 + i:C = US, O = SPIFFE +-----BEGIN CERTIFICATE----- +MIICojCCAkigAwIBAgIQVVi4qUQwCKuBcI2RdJ4q1DAKBggqhkjOPQQDAjAeMQsw +CQYDVQQGEwJVUzEPMA0GA1UEChMGU1BJRkZFMB4XDTIxMTAwMzIwNTkwNloXDTIx +MTAwMzIxNTkxNlowNTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBlNQSUZGRTEVMBMG +A1UECxMMRE9XTlNUUkVBTS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA2WhV/3t2SjFi86z8fCgTDxdNyp7dUjKyBPzFLL5TVgYDecC6So0AS5WsSFlr +9gIoAqfd7wtoy2OE6lpoYc8edHZj7TMZYvy3W7qsivA3XpX7iW/2s2E/fWCUU3hx +3F/jGHX3enXYgg0INVpL+Oooaj22u3nvzT1cZr9fPMQ6K92OnChXMS6aDKDsXnvp +ootQEDo0I4mODiax6oOp+11Df2Ckc+KK8SRceotufDyA1vDsvqxZZjBYlsNTtewg +eakszrhqMHGmDQWM5mF5RoHHN9RqYHX34SUoZOQCNXkmrgUkeXM9e0ZAPuMXYDeJ +IeX2PRm7v/8Kl1A7ELAVlULQ5wIDAQABo4GFMIGCMA4GA1UdDwEB/wQEAwIBhjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRyi42mGcgoEBGn19JD46MWViX8ZTAf +BgNVHSMEGDAWgBQGbUPQ/PIznziF5vBNoOzwaxKDIzAfBgNVHREEGDAWhhRzcGlm +ZmU6Ly9leGFtcGxlLm9yZzAKBggqhkjOPQQDAgNIADBFAiAiz88+ZZxhhGGF6nDf +MIGePf3DnJZiJTRyKODdCAyqzAIhAK/6mRT2jA3YKtn34/rLh3+98ll2zDUkLRHp +hSJFy4IW +-----END CERTIFICATE----- + 2 s:C = US, O = SPIFFE + i:C = US, O = SPIFFE +-----BEGIN CERTIFICATE----- +MIIBjTCCATSgAwIBAgIBADAKBggqhkjOPQQDAjAeMQswCQYDVQQGEwJVUzEPMA0G +A1UEChMGU1BJRkZFMB4XDTIxMTAwMzAzMjY0NFoXDTIxMTAxMDAzMjY1NFowHjEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBlNQSUZGRTBZMBMGByqGSM49AgEGCCqGSM49 +AwEHA0IABMEMYiqbVz7ypPBHtNyb4ZL9mGKtp2FBiQE6aDjrfIgbZt8j++6kU5n7 +toPy2+YVwBF2eaajs8CD977xl22vaQ2jYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQGbUPQ/PIznziF5vBNoOzwaxKDIzAfBgNV +HREEGDAWhhRzcGlmZmU6Ly9leGFtcGxlLm9yZzAKBggqhkjOPQQDAgNHADBEAiBR +JLEuS2Bakvmn5Az8YJV62owMr9WnJV7CP9q3GykGMQIgNNCTTZkEHoDxNvKzZbqx +/m45e9OU/q3ggP7k6vZtMIo= +-----END CERTIFICATE----- +--- +Server certificate +subject=C = US, O = SPIRE, CN = istio-ingressgateway-5c8d74fb97-7jjnf + +issuer=C = US, O = SPIFFE, OU = DOWNSTREAM-1 + +--- +``` + \ No newline at end of file diff --git a/usecases/workload-to-ingress-upstream-spire/client-cluster/create-kind-cluster.sh b/usecases/workload-to-ingress-upstream-spire/client-cluster/create-kind-cluster.sh new file mode 100644 index 00000000..67bb6c65 --- /dev/null +++ b/usecases/workload-to-ingress-upstream-spire/client-cluster/create-kind-cluster.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +set -o errexit + +# create registry container unless it already exists +reg_name='kind-registry' +reg_port='5000' +running="$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" +if [ "${running}" != 'true' ]; then + docker run \ + -d --restart=always -p "127.0.0.1:${reg_port}:5000" --name "${reg_name}" \ + registry:2 +fi + +# create a cluster with the local registry enabled in container, and configured +# with the Token API server. +cat < "${DIR}"/root-cert.pem + +log "Creating regristration entry for nestedA spire-server" +./spire-server entry create \ + -parentID "spiffe://example.org/spire/agent/x509pop/$(fingerprint "${DIR}"/nestedA/agent-nestedA.crt.pem)" \ + -spiffeID "spiffe://example.org/ns/spire/sa/spire-server-nestedA" -dns spire-server-0 -dns spire-server.spire.svc \ + -selector "unix:uid:0" \ + -downstream \ + -ttl 3600 -socketPath="/tmp/spire-server/private/api.sock" + +log "Creating regristration entry for nestedB spire-server" +./spire-server entry create \ + -parentID "spiffe://example.org/spire/agent/x509pop/$(fingerprint "${DIR}"/nestedB/agent-nestedB.crt.pem)" \ + -spiffeID "spiffe://example.org/ns/spire/sa/spire-server-nestedB" -dns spire-server-0 -dns spire-server.spire.svc \ + -selector "unix:uid:0" \ + -downstream \ + -ttl 3600 -socketPath="/tmp/spire-server/private/api.sock"