From 8a2fcafcf35556f60855b124a2072cb34a6593f6 Mon Sep 17 00:00:00 2001 From: ruban suthan Date: Fri, 24 Nov 2023 16:25:36 +1100 Subject: [PATCH] Round two PR review changes --- patterns/blueprint-vpc-lattice/README.md | 38 ++++++------------- .../blueprint-vpc-lattice/cluster1/main.tf | 29 +------------- .../blueprint-vpc-lattice/cluster1/outputs.tf | 4 +- .../blueprint-vpc-lattice/cluster2/main.tf | 29 +------------- .../blueprint-vpc-lattice/cluster2/outputs.tf | 4 +- 5 files changed, 18 insertions(+), 86 deletions(-) diff --git a/patterns/blueprint-vpc-lattice/README.md b/patterns/blueprint-vpc-lattice/README.md index 0d7abee659..139045e2eb 100644 --- a/patterns/blueprint-vpc-lattice/README.md +++ b/patterns/blueprint-vpc-lattice/README.md @@ -3,9 +3,7 @@ This pattern demonstrates where a service in one EKS cluster communicates with a service in another cluster and VPC, using VPC Lattice. Besides it also shows how service discovery works, with support for using custom domain names for services. It also demonstrates how VPC Lattice enables services in EKS clusters with overlapping CIDRs to communicate with each other without the need for any networking constructs like private NAT Gateways and Transit Gateways. - [Documentation](https://aws.amazon.com/vpc/lattice/) -- [Launch Blog](https://aws.amazon.com/blogs/containers/amazon-vpc-cni-now-supports-kubernetes-network-policies/) - -## Scenario +- [Launch Blog](https://aws.amazon.com/blogs/containers/application-networking-with-amazon-vpc-lattice-and-amazon-eks/) The solution architecture used to demonstrate cross-cluster connectivity with VPC Lattice is shown in the following diagram. The following are the relevant aspects of this architecture. @@ -46,16 +44,11 @@ Deploy the datastore service to the EKS cluster in cluster2. This service fronts ```shell # Apply Kubernetes set of manifests to both clusters that defines the GatewayClass and Gateway resources. The Gateway API controller then creates a Lattice service network with the same name, eks-lattice-network, as that of the Gateway resource if one doesn’t exist and attaches the VPCs to the service network. -export CLUSTER_2=cluster2 -export AWS_DEFAULT_REGION=$(aws configure get region) -export AWS_ACCOUNT_NUMBER=$(aws sts get-caller-identity --query "Account" --output text) - -export CTX_CLUSTER_2=arn:aws:eks:$AWS_DEFAULT_REGION:${AWS_ACCOUNT_NUMBER}:cluster/$CLUSTER_2 - +aws eks update-kubeconfig --name -kubectl apply --context="${CTX_CLUSTER_2}" -f ./$CLUSTER_2/gateway-lattice.yml # GatewayClass and Gateway -kubectl apply --context="${CTX_CLUSTER_2}" -f ./$CLUSTER_2/route-datastore-canary.yml # HTTPRoute and ClusterIP Services -kubectl apply --context="${CTX_CLUSTER_2}" -f ./$CLUSTER_2/datastore.yml # Deployment +kubectl apply -f ./cluster2/gateway-lattice.yml # GatewayClass and Gateway +kubectl apply -f ./cluster2/route-datastore-canary.yml # HTTPRoute and ClusterIP Services +kubectl apply -f ./cluster2/datastore.yml # Deployment ``` 5. Deploy the gateway lattice and the frontend service on cluster1 @@ -63,17 +56,10 @@ kubectl apply --context="${CTX_CLUSTER_2}" -f ./$CLUSTER_2/datastore.yml The frontend service is configured to communicate with the datastore service in cluster1 using its custom domain name. ```shell -export CLUSTER_1=cluster1 -export AWS_DEFAULT_REGION=$(aws configure get region) -export AWS_ACCOUNT_NUMBER=$(aws sts get-caller-identity --query "Account" --output text) - -aws eks update-kubeconfig --name $CLUSTER_1 --region $AWS_DEFAULT_REGION - -export CTX_CLUSTER_1=arn:aws:eks:$AWS_DEFAULT_REGION:${AWS_ACCOUNT_NUMBER}:cluster/$CLUSTER_1 - +aws eks update-kubeconfig --name -kubectl apply --context="${CTX_CLUSTER_1}" -f ./$CLUSTER_1/gateway-lattice.yml # GatewayClass and Gateway -kubectl apply --context="${CTX_CLUSTER_1}" -f ./$CLUSTER_1/frontend.yml # Frontend service +kubectl apply -f ./cluster1/gateway-lattice.yml # GatewayClass and Gateway +kubectl apply -f ./cluster1/frontend.yml # Frontend service ``` ## Testing if cluster1 service could talk to cluster2 service via VPC lattice @@ -81,11 +67,11 @@ kubectl apply --context="${CTX_CLUSTER_1}" -f ./$CLUSTER_1/frontend.yml # Front Shell commands below uses kubectl port-forward to forward outgoing traffic from a local port to the server port 3000 on one of the pods of the frontend service, which allows us to test this use case end-to-end without needing any load balancer. ```shell -POD=$(kubectl -context="${CTX_CLUSTER_1}" get pod -n apps -l app=frontend -o jsonpath="{.items[0].metadata.name}") -kubectl -context="${CTX_CLUSTER_1}" -n apps port-forward ${POD} 80:3000 # Port Forwarding +POD=$(kubectl get pod -n apps -l app=frontend -o jsonpath="{.items[0].metadata.name}") +kubectl -n apps port-forward ${POD} 80:3000 # Port Forwarding -curl -X GET http://localhost/popular/category|jq -curl -X GET http://localhost/summary|jq # you could retry the summary to see if you get a different results from different versions +curl -X GET http://localhost/popular/category +curl -X GET http://localhost/summary # you could retry the summary to see if you get a different results from different versions ``` diff --git a/patterns/blueprint-vpc-lattice/cluster1/main.tf b/patterns/blueprint-vpc-lattice/cluster1/main.tf index 7c81f5acd2..9289a8de01 100644 --- a/patterns/blueprint-vpc-lattice/cluster1/main.tf +++ b/patterns/blueprint-vpc-lattice/cluster1/main.tf @@ -31,7 +31,6 @@ provider "helm" { data "aws_availability_zones" "available" {} data "aws_ecrpublic_authorization_token" "token" {} data "aws_caller_identity" "identity" {} -data "aws_region" "current" {} locals { name = basename(path.cwd) @@ -143,18 +142,6 @@ module "addons" { { name = "clusterName" value = module.eks.cluster_name - }, - { - name = "awsAccountId" - value = local.region - }, - { - name = "awsAccountId" - value = data.aws_caller_identity.identity.account_id - }, - { - name = "awsRegion" - value = local.region } ] @@ -163,11 +150,7 @@ module "addons" { } data "aws_ec2_managed_prefix_list" "ipv4" { - name = "com.amazonaws.${data.aws_region.current.name}.vpc-lattice" -} - -data "aws_ec2_managed_prefix_list" "ipv6" { - name = "com.amazonaws.${data.aws_region.current.name}.ipv6.vpc-lattice" + name = "com.amazonaws.${local.region}.vpc-lattice" } @@ -180,14 +163,4 @@ resource "aws_security_group_rule" "vpc_lattice_ipv4_ingress" { to_port = 0 protocol = "-1" prefix_list_ids = [data.aws_ec2_managed_prefix_list.ipv4.id] -} - -resource "aws_security_group_rule" "vpc_lattice_ipv6_ingress" { - description = "VPC lattice ivp6 ingress" - type = "ingress" - security_group_id = module.eks.cluster_security_group_id - from_port = 0 - to_port = 0 - protocol = "-1" - prefix_list_ids = [data.aws_ec2_managed_prefix_list.ipv6.id] } \ No newline at end of file diff --git a/patterns/blueprint-vpc-lattice/cluster1/outputs.tf b/patterns/blueprint-vpc-lattice/cluster1/outputs.tf index c952ef95d0..42ce6f201d 100644 --- a/patterns/blueprint-vpc-lattice/cluster1/outputs.tf +++ b/patterns/blueprint-vpc-lattice/cluster1/outputs.tf @@ -1,4 +1,4 @@ output "configure_kubectl" { description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = "aws eks update-kubeconfig --name ${module.eks.cluster_name} --alias ${module.eks.cluster_name} --region ${local.region}" -} + value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_name}" +} \ No newline at end of file diff --git a/patterns/blueprint-vpc-lattice/cluster2/main.tf b/patterns/blueprint-vpc-lattice/cluster2/main.tf index 37a2c62c67..38bfea5565 100644 --- a/patterns/blueprint-vpc-lattice/cluster2/main.tf +++ b/patterns/blueprint-vpc-lattice/cluster2/main.tf @@ -38,7 +38,6 @@ data "aws_ecrpublic_authorization_token" "token" { provider = aws.virginia } data "aws_caller_identity" "identity" {} -data "aws_region" "current" {} locals { name = basename(path.cwd) @@ -150,18 +149,6 @@ module "addons" { { name = "clusterName" value = module.eks.cluster_name - }, - { - name = "awsAccountId" - value = local.region - }, - { - name = "awsAccountId" - value = data.aws_caller_identity.identity.account_id - }, - { - name = "awsRegion" - value = local.region } ] @@ -170,11 +157,7 @@ module "addons" { } data "aws_ec2_managed_prefix_list" "ipv4" { - name = "com.amazonaws.${data.aws_region.current.name}.vpc-lattice" -} - -data "aws_ec2_managed_prefix_list" "ipv6" { - name = "com.amazonaws.${data.aws_region.current.name}.ipv6.vpc-lattice" + name = "com.amazonaws.${local.region}.vpc-lattice" } @@ -188,13 +171,3 @@ resource "aws_security_group_rule" "vpc_lattice_ipv4_ingress" { protocol = "-1" prefix_list_ids = [data.aws_ec2_managed_prefix_list.ipv4.id] } - -resource "aws_security_group_rule" "vpc_lattice_ipv6_ingress" { - description = "VPC lattice ivp6 ingress" - type = "ingress" - security_group_id = module.eks.cluster_security_group_id - from_port = 0 - to_port = 0 - protocol = "-1" - prefix_list_ids = [data.aws_ec2_managed_prefix_list.ipv6.id] -} \ No newline at end of file diff --git a/patterns/blueprint-vpc-lattice/cluster2/outputs.tf b/patterns/blueprint-vpc-lattice/cluster2/outputs.tf index 208a421c92..7de579108f 100644 --- a/patterns/blueprint-vpc-lattice/cluster2/outputs.tf +++ b/patterns/blueprint-vpc-lattice/cluster2/outputs.tf @@ -21,5 +21,5 @@ output "postgres_password" { output "configure_kubectl" { description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = "aws eks update-kubeconfig --name ${module.eks.cluster_name} --alias ${module.eks.cluster_name} --region ${local.region}" -} + value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_name}" +} \ No newline at end of file