diff --git a/patterns/blueprint-vpc-lattice/README.md b/patterns/blueprint-vpc-lattice/README.md index 3249bce645..0d7abee659 100644 --- a/patterns/blueprint-vpc-lattice/README.md +++ b/patterns/blueprint-vpc-lattice/README.md @@ -20,7 +20,7 @@ AWS Gateway API controller is used in both clusters to manage the Kubernetes Gat See [here](https://aws-ia.github.io/terraform-aws-eks-blueprints/getting-started/#prerequisites) for the prerequisites and steps to deploy this pattern. -1. set up the first cluster with its own VPC and the second +1. set up the first cluster with its own VPC and the second with an aurora postgres DB ```shell # setting up the cluster1 @@ -33,32 +33,35 @@ See [here](https://aws-ia.github.io/terraform-aws-eks-blueprints/getting-started terraform apply ``` -2. Initialize the aurora postgress database for cluster2 vpc refer [here](./cluster2/postgres-setup/README.md) +2. Initialize the aurora postgres database for cluster2 vpc refer [here](./cluster2/postgres-setup/README.md) 3. Initialize Kubernetes secrets for cluster2 ```shell -cd cluster2 +# assuming you are already in the /cluster2 folder chmod +x secrets.sh && ./secrets.sh ``` 4. Deploy the kubernetes artefacts for cluster2 +Deploy the datastore service to the EKS cluster in cluster2. This service fronts an Aurora PostgreSQL database and exposes REST API endpoints with path-prefixes /popular and /summary. To demonstrate canary shifting of traffic, deploy two versions of the datastore service to the cluster as shown below. + ```shell +# Apply Kubernetes set of manifests to both clusters that defines the GatewayClass and Gateway resources. The Gateway API controller then creates a Lattice service network with the same name, eks-lattice-network, as that of the Gateway resource if one doesn’t exist and attaches the VPCs to the service network. export CLUSTER_2=cluster2 export AWS_DEFAULT_REGION=$(aws configure get region) export AWS_ACCOUNT_NUMBER=$(aws sts get-caller-identity --query "Account" --output text) -aws eks update-kubeconfig --name $CLUSTER_2 --region $AWS_DEFAULT_REGION - export CTX_CLUSTER_2=arn:aws:eks:$AWS_DEFAULT_REGION:${AWS_ACCOUNT_NUMBER}:cluster/$CLUSTER_2 -kubectl apply --context="${CTX_CLUSTER_2}" -f ./$CLUSTER_2/gateway-lattice.yaml # GatewayClass and Gateway -kubectl apply --context="${CTX_CLUSTER_2}" -f ./$CLUSTER_2/route-datastore-canary.yaml # HTTPRoute and ClusterIP Services -kubectl apply --context="${CTX_CLUSTER_2}" -f ./$CLUSTER_2/datastore.yaml # Deployment +kubectl apply --context="${CTX_CLUSTER_2}" -f ./$CLUSTER_2/gateway-lattice.yml # GatewayClass and Gateway +kubectl apply --context="${CTX_CLUSTER_2}" -f ./$CLUSTER_2/route-datastore-canary.yml # HTTPRoute and ClusterIP Services +kubectl apply --context="${CTX_CLUSTER_2}" -f ./$CLUSTER_2/datastore.yml # Deployment ``` 5. Deploy the gateway lattice and the frontend service on cluster1 +The frontend service is configured to communicate with the datastore service in cluster1 using its custom domain name. + ```shell export CLUSTER_1=cluster1 export AWS_DEFAULT_REGION=$(aws configure get region) @@ -69,13 +72,30 @@ aws eks update-kubeconfig --name $CLUSTER_1 --region $AWS_DEFAULT_REGION export CTX_CLUSTER_1=arn:aws:eks:$AWS_DEFAULT_REGION:${AWS_ACCOUNT_NUMBER}:cluster/$CLUSTER_1 -kubectl apply --context="${CTX_CLUSTER_1}" -f ./$CLUSTER_1/gateway-lattice.yaml # GatewayClass and Gateway -kubectl apply --context="${CTX_CLUSTER_1}" -f ./$CLUSTER_1/frontend.yaml +kubectl apply --context="${CTX_CLUSTER_1}" -f ./$CLUSTER_1/gateway-lattice.yml # GatewayClass and Gateway +kubectl apply --context="${CTX_CLUSTER_1}" -f ./$CLUSTER_1/frontend.yml # Frontend service ``` +## Testing if cluster1 service could talk to cluster2 service via VPC lattice + +Shell commands below uses kubectl port-forward to forward outgoing traffic from a local port to the server port 3000 on one of the pods of the frontend service, which allows us to test this use case end-to-end without needing any load balancer. + +```shell +POD=$(kubectl -context="${CTX_CLUSTER_1}" get pod -n apps -l app=frontend -o jsonpath="{.items[0].metadata.name}") +kubectl -context="${CTX_CLUSTER_1}" -n apps port-forward ${POD} 80:3000 # Port Forwarding + +curl -X GET http://localhost/popular/category|jq +curl -X GET http://localhost/summary|jq # you could retry the summary to see if you get a different results from different versions + +``` ## Destroy +To teardown and remove the resources created in this example: + ```shell -chmod +x ./destroy.sh && ./destroy.sh +cd cluster1 +terraform apply -destroy -autoapprove +cd ../cluster2 +terraform apply -destroy -autoapprove ``` diff --git a/patterns/blueprint-vpc-lattice/cluster2/aurora.tf b/patterns/blueprint-vpc-lattice/cluster2/aurora.tf index 6509169846..8644d9e231 100644 --- a/patterns/blueprint-vpc-lattice/cluster2/aurora.tf +++ b/patterns/blueprint-vpc-lattice/cluster2/aurora.tf @@ -6,7 +6,7 @@ module "rds-aurora" { # insert the 5 required variables here password = random_password.password.result - username = "admin" + username = "admins" private_subnet_ids_p = module.vpc.private_subnets private_subnet_ids_s = null region = local.region diff --git a/patterns/blueprint-vpc-lattice/cluster2/secrets.sh b/patterns/blueprint-vpc-lattice/cluster2/secrets.sh index b4871c1b52..c6e598a161 100755 --- a/patterns/blueprint-vpc-lattice/cluster2/secrets.sh +++ b/patterns/blueprint-vpc-lattice/cluster2/secrets.sh @@ -1,5 +1,5 @@ ##!/bin/bash -DBHOST="$(terraform output -raw postgres_host)" +DBHOST="$(terraform output -json postgres_host | jq -r '.[0]')" DBUSER="$(terraform output -raw postgres_username)" DBPASSWORD="$(terraform output -raw postgres_password)" DBPORT="$(terraform output -raw postgres_port)" @@ -12,15 +12,12 @@ AWS_DEFAULT_REGION=$(aws configure get region) AWS_ACCOUNT_NUMBER=$(aws sts get-caller-identity --query "Account" --output text) +aws eks update-kubeconfig --name $CLUSTER_2 --region $AWS_DEFAULT_REGION export CTX_CLUSTER_2=arn:aws:eks:$AWS_DEFAULT_REGION:${AWS_ACCOUNT_NUMBER}:cluster/$CLUSTER_2 -kubectl apply --context="${CTX_CLUSTER_1}" -f ./$CLUSTER_1/gateway-lattice.yml -kubectl apply --context="${CTX_CLUSTER_2}" -f ./$CLUSTER_2/gateway-lattice.yml - - - +# setting up the cluster cluster secrets kubectl create --context="${CTX_CLUSTER_2}" ns apps kubectl create --context="${CTX_CLUSTER_2}" secret generic postgres-credentials \ --from-literal=POSTGRES_HOST="${DBHOST}" \