diff --git a/.github/scripts/plan-examples.py b/.github/scripts/plan-examples.py index 2be720a15b..e05d8b1a9d 100644 --- a/.github/scripts/plan-examples.py +++ b/.github/scripts/plan-examples.py @@ -3,23 +3,23 @@ import re -def get_examples(): +def get_patterns(): """ - Get all Terraform example root directories using their respective `versions.tf`; + Get all pattern root directories using their respective `main.tf`; returning a string formatted json array of the example directories minus those that are excluded """ exclude = { - 'examples/appmesh-mtls', # excluded until Rout53 is setup - 'examples/blue-green-upgrade/environment', - 'examples/blue-green-upgrade/modules/eks_cluster', - 'examples/istio-multi-cluster/1.cluster1', # relies on remote state - 'examples/istio-multi-cluster/2.cluster2', # relies on remote state - 'examples/privatelink-access', + 'patterns/appmesh-mtls', # excluded until Rout53 is setup + 'patterns/blue-green-upgrade/environment', + 'patterns/blue-green-upgrade/modules/eks_cluster', + 'patterns/istio-multi-cluster/1.cluster1', # relies on remote state + 'patterns/istio-multi-cluster/2.cluster2', # relies on remote state + 'patterns/privatelink-access', } projects = { - x.replace('/versions.tf', '') - for x in glob.glob('patterns/**/versions.tf', recursive=True) + x.replace('/main.tf', '') + for x in glob.glob('patterns/**/main.tf', recursive=True) if not re.match(r'^.+/_', x) } @@ -27,4 +27,4 @@ def get_examples(): if __name__ == '__main__': - get_examples() + get_patterns() diff --git a/.github/workflows/e2e-parallel-destroy.yml b/.github/workflows/e2e-parallel-destroy.yml index e3c38a0058..5c5f538756 100644 --- a/.github/workflows/e2e-parallel-destroy.yml +++ b/.github/workflows/e2e-parallel-destroy.yml @@ -48,7 +48,7 @@ jobs: run: sed -i "s/# //g" ${{ matrix.example_path }}/versions.tf - name: Auth AWS - uses: aws-actions/configure-aws-credentials@v4.0.0 + uses: aws-actions/configure-aws-credentials@v4.0.1 with: role-to-assume: ${{ secrets.ROLE_TO_ASSUME }} aws-region: us-west-2 diff --git a/.github/workflows/e2e-parallel-full.yml b/.github/workflows/e2e-parallel-full.yml index 3317eb38fc..815a4d68fd 100644 --- a/.github/workflows/e2e-parallel-full.yml +++ b/.github/workflows/e2e-parallel-full.yml @@ -35,7 +35,7 @@ jobs: uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 - name: Auth AWS - uses: aws-actions/configure-aws-credentials@v4.0.0 + uses: aws-actions/configure-aws-credentials@v4.0.1 with: role-to-assume: ${{ secrets.ROLE_TO_ASSUME }} aws-region: us-west-2 @@ -81,7 +81,7 @@ jobs: run: sed -i "s/# //g" ${{ matrix.example_path }}/versions.tf - name: Auth AWS - uses: aws-actions/configure-aws-credentials@v4.0.0 + uses: aws-actions/configure-aws-credentials@v4.0.1 with: role-to-assume: ${{ secrets.ROLE_TO_ASSUME }} aws-region: us-west-2 @@ -167,7 +167,7 @@ jobs: uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 - name: Configure AWS credentials from Test account - uses: aws-actions/configure-aws-credentials@v4.0.0 + uses: aws-actions/configure-aws-credentials@v4.0.1 with: role-to-assume: ${{ secrets.ROLE_TO_ASSUME }} aws-region: us-west-2 diff --git a/.github/workflows/plan-examples.yml b/.github/workflows/plan-examples.yml index 1abd797d68..9354068e52 100644 --- a/.github/workflows/plan-examples.yml +++ b/.github/workflows/plan-examples.yml @@ -88,7 +88,7 @@ jobs: - '*.tf' - name: Configure AWS credentials from Test account - uses: aws-actions/configure-aws-credentials@v4.0.0 + uses: aws-actions/configure-aws-credentials@v4.0.1 if: steps.changes.outputs.src== 'true' with: role-to-assume: ${{ secrets.ROLE_TO_ASSUME }} diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index d2ccb7a5b7..8e9e89a5a4 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -41,7 +41,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@08b4669551908b1024bb425080c797723083c031 # v2.2.0 + uses: ossf/scorecard-action@483ef80eb98fb506c348f7d62e28055e49fe2398 # v2.3.0 with: results_file: results.sarif results_format: sarif @@ -71,6 +71,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@ddccb873888234080b77e9bc2d4764d5ccaaccf9 # v2.21.9 + uses: github/codeql-action/upload-sarif@fdcae64e1484d349b3366718cdfef3d404390e85 # v2.22.1 with: sarif_file: results.sarif diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 07a682c97c..69734a100a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -10,7 +10,7 @@ repos: - id: detect-aws-credentials args: ['--allow-missing-credentials'] - repo: https://github.com/antonbabenko/pre-commit-terraform - rev: v1.83.2 + rev: v1.83.4 hooks: - id: terraform_fmt - id: terraform_docs diff --git a/patterns/elastic-fabric-adapter/main.tf b/patterns/elastic-fabric-adapter/main.tf index f0476d7d71..550df48250 100644 --- a/patterns/elastic-fabric-adapter/main.tf +++ b/patterns/elastic-fabric-adapter/main.tf @@ -28,26 +28,14 @@ provider "helm" { } } -provider "kubectl" { - apply_retry_count = 5 - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - load_config_file = false - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - } -} - data "aws_availability_zones" "available" {} locals { name = basename(path.cwd) region = "us-west-2" + cluster_version = "1.27" + vpc_cidr = "10.0.0.0/16" azs = slice(data.aws_availability_zones.available.names, 0, 3) @@ -66,7 +54,7 @@ module "eks" { version = "~> 19.16" cluster_name = local.name - cluster_version = "1.27" + cluster_version = local.cluster_version cluster_endpoint_public_access = true cluster_addons = { @@ -98,6 +86,13 @@ module "eks" { } } + eks_managed_node_group_defaults = { + iam_role_additional_policies = { + # Not required, but used in the example to access the nodes to inspect drivers and devices + AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" + } + } + eks_managed_node_groups = { # For running services that do not require GPUs default = { @@ -113,7 +108,7 @@ module "eks" { instance_types = ["g5.8xlarge"] min_size = 1 - max_size = 3 + max_size = 1 desired_size = 1 subnet_ids = slice(module.vpc.private_subnets, 0, 1) @@ -133,14 +128,26 @@ module "eks" { } pre_bootstrap_user_data = <<-EOT - # Install EFA - curl -O https://efa-installer.amazonaws.com/aws-efa-installer-latest.tar.gz - tar -xf aws-efa-installer-latest.tar.gz && cd aws-efa-installer - ./efa_installer.sh -y --minimal - fi_info -p efa -t FI_EP_RDM - - # Disable ptrace - sysctl -w kernel.yama.ptrace_scope=0 + EFA_BIN='/opt/amazon/efa/bin/' + + # EFA driver is installed by default on EKS GPU AMI starting on EKS 1.28 + if [ ! -s "$EFA_BIN" ]; then + + # Install EFA + # Note: It is recommended to install the EFA driver on a custom AMI and + # not rely on dynamic installation during instance provisioning in user data + curl -O https://efa-installer.amazonaws.com/aws-efa-installer-latest.tar.gz + tar -xf aws-efa-installer-latest.tar.gz && cd aws-efa-installer + ./efa_installer.sh -y --minimal + cd .. && rm -rf aws-efa-installer* + + # Not required - just displays info on the EFA interfaces + $EFA_BIN/fi_info -p efa + + # Disable ptrace + sysctl -w kernel.yama.ptrace_scope=0 + + fi EOT taints = { @@ -211,8 +218,20 @@ module "eks_blueprints_addons" { repository = "https://nvidia.github.io/gpu-operator" values = [ <<-EOT + dcgmExporter: + enabled: false + driver: + enabled: false + toolkit: + version: v1.13.5-centos7 operator: defaultRuntime: containerd + validator: + driver: + env: + # https://github.com/NVIDIA/gpu-operator/issues/569 + - name: DISABLE_DEV_CHAR_SYMLINK_CREATION + value: "true" EOT ] } @@ -225,14 +244,96 @@ module "eks_blueprints_addons" { # Amazon Elastic Fabric Adapter (EFA) ################################################################################ -data "http" "efa_device_plugin_yaml" { - url = "https://raw.githubusercontent.com/aws-samples/aws-efa-eks/main/manifest/efa-k8s-device-plugin.yml" -} +resource "kubernetes_daemonset" "aws_efa_k8s_device_plugin" { + metadata { + name = "aws-efa-k8s-device-plugin-daemonset" + namespace = "kube-system" + } -resource "kubectl_manifest" "efa_device_plugin" { - yaml_body = <<-YAML - ${data.http.efa_device_plugin_yaml.response_body} - YAML + spec { + selector { + match_labels = { + name = "aws-efa-k8s-device-plugin" + } + } + + template { + metadata { + labels = { + name = "aws-efa-k8s-device-plugin" + } + } + + spec { + volume { + name = "device-plugin" + + host_path { + path = "/var/lib/kubelet/device-plugins" + } + } + + container { + name = "aws-efa-k8s-device-plugin" + image = "602401143452.dkr.ecr.us-west-2.amazonaws.com/eks/aws-efa-k8s-device-plugin:v0.3.3" + + volume_mount { + name = "device-plugin" + mount_path = "/var/lib/kubelet/device-plugins" + } + + image_pull_policy = "Always" + + security_context { + capabilities { + drop = ["ALL"] + } + } + } + + host_network = true + + affinity { + node_affinity { + required_during_scheduling_ignored_during_execution { + node_selector_term { + match_expressions { + key = "beta.kubernetes.io/instance-type" + operator = "In" + values = ["c5n.18xlarge", "c5n.9xlarge", "c5n.metal", "c6a.48xlarge", "c6a.metal", "c6gn.16xlarge", "c6i.32xlarge", "c6i.metal", "c6id.32xlarge", "c6id.metal", "c6in.32xlarge", "c6in.metal", "c7g.16xlarge", "c7g.metal", "c7gd.16xlarge", "c7gn.16xlarge", "c7i.48xlarge", "dl1.24xlarge", "g4dn.12xlarge", "g4dn.16xlarge", "g4dn.8xlarge", "g4dn.metal", "g5.12xlarge", "g5.16xlarge", "g5.24xlarge", "g5.48xlarge", "g5.8xlarge", "hpc7g.16xlarge", "hpc7g.4xlarge", "hpc7g.8xlarge", "i3en.12xlarge", "i3en.24xlarge", "i3en.metal", "i4g.16xlarge", "i4i.32xlarge", "i4i.metal", "im4gn.16xlarge", "inf1.24xlarge", "m5dn.24xlarge", "m5dn.metal", "m5n.24xlarge", "m5n.metal", "m5zn.12xlarge", "m5zn.metal", "m6a.48xlarge", "m6a.metal", "m6i.32xlarge", "m6i.metal", "m6id.32xlarge", "m6id.metal", "m6idn.32xlarge", "m6idn.metal", "m6in.32xlarge", "m6in.metal", "m7a.48xlarge", "m7a.metal-48xl", "m7g.16xlarge", "m7g.metal", "m7gd.16xlarge", "m7i.48xlarge", "p3dn.24xlarge", "p4d.24xlarge", "p5.48xlarge", "r5dn.24xlarge", "r5dn.metal", "r5n.24xlarge", "r5n.metal", "r6a.48xlarge", "r6a.metal", "r6i.32xlarge", "r6i.metal", "r6id.32xlarge", "r6id.metal", "r6idn.32xlarge", "r6idn.metal", "r6in.32xlarge", "r6in.metal", "r7a.48xlarge", "r7g.16xlarge", "r7g.metal", "r7gd.16xlarge", "r7iz.32xlarge", "trn1.32xlarge", "trn1n.32xlarge", "vt1.24xlarge", "x2idn.32xlarge", "x2idn.metal", "x2iedn.32xlarge", "x2iedn.metal", "x2iezn.12xlarge", "x2iezn.metal"] + } + } + + node_selector_term { + match_expressions { + key = "node.kubernetes.io/instance-type" + operator = "In" + values = ["c5n.18xlarge", "c5n.9xlarge", "c5n.metal", "c6a.48xlarge", "c6a.metal", "c6gn.16xlarge", "c6i.32xlarge", "c6i.metal", "c6id.32xlarge", "c6id.metal", "c6in.32xlarge", "c6in.metal", "c7g.16xlarge", "c7g.metal", "c7gd.16xlarge", "c7gn.16xlarge", "c7i.48xlarge", "dl1.24xlarge", "g4dn.12xlarge", "g4dn.16xlarge", "g4dn.8xlarge", "g4dn.metal", "g5.12xlarge", "g5.16xlarge", "g5.24xlarge", "g5.48xlarge", "g5.8xlarge", "hpc7g.16xlarge", "hpc7g.4xlarge", "hpc7g.8xlarge", "i3en.12xlarge", "i3en.24xlarge", "i3en.metal", "i4g.16xlarge", "i4i.32xlarge", "i4i.metal", "im4gn.16xlarge", "inf1.24xlarge", "m5dn.24xlarge", "m5dn.metal", "m5n.24xlarge", "m5n.metal", "m5zn.12xlarge", "m5zn.metal", "m6a.48xlarge", "m6a.metal", "m6i.32xlarge", "m6i.metal", "m6id.32xlarge", "m6id.metal", "m6idn.32xlarge", "m6idn.metal", "m6in.32xlarge", "m6in.metal", "m7a.48xlarge", "m7a.metal-48xl", "m7g.16xlarge", "m7g.metal", "m7gd.16xlarge", "m7i.48xlarge", "p3dn.24xlarge", "p4d.24xlarge", "p5.48xlarge", "r5dn.24xlarge", "r5dn.metal", "r5n.24xlarge", "r5n.metal", "r6a.48xlarge", "r6a.metal", "r6i.32xlarge", "r6i.metal", "r6id.32xlarge", "r6id.metal", "r6idn.32xlarge", "r6idn.metal", "r6in.32xlarge", "r6in.metal", "r7a.48xlarge", "r7g.16xlarge", "r7g.metal", "r7gd.16xlarge", "r7iz.32xlarge", "trn1.32xlarge", "trn1n.32xlarge", "vt1.24xlarge", "x2idn.32xlarge", "x2idn.metal", "x2iedn.32xlarge", "x2iedn.metal", "x2iezn.12xlarge", "x2iezn.metal"] + } + } + } + } + } + + toleration { + key = "CriticalAddonsOnly" + operator = "Exists" + } + + toleration { + key = "aws.amazon.com/efa" + operator = "Exists" + effect = "NoSchedule" + } + + priority_class_name = "system-node-critical" + } + } + + strategy { + type = "RollingUpdate" + } + } } ################################################################################ diff --git a/patterns/elastic-fabric-adapter/versions.tf b/patterns/elastic-fabric-adapter/versions.tf index d75641f32a..bea4d78d15 100644 --- a/patterns/elastic-fabric-adapter/versions.tf +++ b/patterns/elastic-fabric-adapter/versions.tf @@ -14,14 +14,6 @@ terraform { source = "hashicorp/kubernetes" version = ">= 2.20" } - kubectl = { - source = "gavinbunney/kubectl" - version = ">= 1.14" - } - http = { - source = "hashicorp/http" - version = ">= 3.3" - } } # ## Used for end-to-end testing on project; update to suit your needs diff --git a/patterns/wireguard-with-cilium/README.md b/patterns/wireguard-with-cilium/README.md index 877b58ab79..0cfbeacb88 100644 --- a/patterns/wireguard-with-cilium/README.md +++ b/patterns/wireguard-with-cilium/README.md @@ -1,9 +1,15 @@ # Transparent Encryption with Cilium and Wireguard -This pattern demonstrates Cilium configured in CNI chaining mode with VPC CNI and with Wireguard transparent encryption enabled on an Amazon EKS cluster. +This pattern demonstrates Cilium configured in CNI chaining mode with the VPC CNI and with Wireguard transparent encryption enabled on an Amazon EKS cluster. -- [Cilium CNI Chaining Documentation](https://docs.cilium.io/en/v1.12/gettingstarted/cni-chaining-aws-cni/) -- [Cilium Wireguard Encryption Documentation](https://docs.cilium.io/en/v1.12/gettingstarted/encryption-wireguard/) +- [Cilium CNI Chaining Documentation](https://docs.cilium.io/en/stable/installation/cni-chaining-aws-cni/) +- [Cilium Wireguard Encryption Documentation](https://docs.cilium.io/en/stable/security/network/encryption-wireguard/) + +## Focal Points + +- `eks.tf` contains the cluster configuration and the deployment of Cilium. + - There are no specific requirements from an EKS perspective, other than the Linux Kernel version used by the OS must be 5.10+ +- `example.yaml` provides a sample application used to demonstrate the encrypted connectivity. This is optional and not required for the pattern. ## Deploy @@ -11,33 +17,57 @@ See [here](https://aws-ia.github.io/terraform-aws-eks-blueprints/getting-started ## Validate -1. List the daemonsets +1. Deploy the example pods: ```sh - kubectl get ds -n kube-system + kubectl apply -f example.yaml ``` ```text - NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE - aws-node 2 2 2 2 2 156m - cilium 2 2 2 2 2 kubernetes.io/os=linux 152m - kube-proxy 2 2 2 2 2 156m + pod/server created + service/server created + pod/client created ``` -2. Open a shell inside the cilium container +2. Get the Cilium status from one of the Cilium pods. - ```sh - kubectl -n kube-system exec -ti ds/cilium -- bash - ``` - -3. Verify Encryption is enabled + Under the `Encryption` field, it should state `Wireguard` with a PubKey. + `NodeEncryption: Disabled` is expected since `NodeEncryption` was not enabled + via the Helm values provided. ```sh - cilium status | grep Encryption + kubectl -n kube-system exec -ti ds/cilium -- cilium status ``` ```text - Encryption: Wireguard [cilium_wg0 (Pubkey: b2krgbHgaCsVWALMnFLiS/RekhhcE36PXEjQ7T8+mW0=, Port: 51871, Peers: 1)] + Defaulted container "cilium-agent" out of: cilium-agent, config (init), mount-cgroup (init), apply-sysctl-overwrites (init), mount-bpf-fs (init), clean-cilium-state (init), install-cni-binaries (init) + KVStore: Ok Disabled + Kubernetes: Ok 1.28+ (v1.28.1-eks-43840fb) [linux/amd64] + Kubernetes APIs: ["EndpointSliceOrEndpoint", "cilium/v2::CiliumClusterwideNetworkPolicy", "cilium/v2::CiliumEndpoint", "cilium/v2::CiliumNetworkPolicy", "cilium/v2::CiliumNode", "cilium/v2alpha1::CiliumCIDRGroup", "core/v1::Namespace", "core/v1::Pods", "core/v1::Service", "networking.k8s.io/v1::NetworkPolicy"] + KubeProxyReplacement: False [eth0 10.0.21.109 (Direct Routing), eth1 10.0.27.0] + Host firewall: Disabled + CNI Chaining: aws-cni + Cilium: Ok 1.14.2 (v1.14.2-a6748946) + NodeMonitor: Listening for events on 2 CPUs with 64x4096 of shared memory + Cilium health daemon: Ok + IPAM: IPv4: 1/254 allocated from 10.0.0.0/24, + IPv4 BIG TCP: Disabled + IPv6 BIG TCP: Disabled + BandwidthManager: Disabled + Host Routing: Legacy + Masquerading: Disabled + Controller Status: 24/24 healthy + Proxy Status: No managed proxy redirect + Global Identity Range: min 256, max 65535 + Hubble: Ok Current/Max Flows: 410/4095 (10.01%), Flows/s: 1.59 Metrics: Disabled + Encryption: Wireguard [NodeEncryption: Disabled, cilium_wg0 (Pubkey: /yuqsZyG91AzVIkZ3AIq8qjQ0gGKQd6GWcRYh4LYpko=, Port: 51871, Peers: 1)] + Cluster health: Probe disabled + ``` + +3. Open a shell inside the cilium container + + ```sh + kubectl -n kube-system exec -ti ds/cilium -- bash ``` 4. Install [`tcpdump`](https://www.tcpdump.org/) @@ -54,15 +84,103 @@ See [here](https://aws-ia.github.io/terraform-aws-eks-blueprints/getting-started ``` ```text + tcpdump: verbose output suppressed, use -v[v]... for full protocol decode + listening on cilium_wg0, link-type RAW (Raw IP), snapshot length 262144 bytes Welcome to nginx!

Welcome to nginx!

- ... - 40 packets captured 40 packets received by filter 0 packets dropped by kernel ``` + !!! info "Exit" + Exit the container shell by typing `exit` before continuing to next step + +6. Deploy the Cilium connectivity resources to check and evaluate connectivity: + + ```sh + kubectl create ns cilium-test + kubectl apply -n cilium-test -f https://raw.githubusercontent.com/cilium/cilium/v1.14.1/examples/kubernetes/connectivity-check/connectivity-check.yaml + ``` + + ```text + deployment.apps/echo-a created + deployment.apps/echo-b created + deployment.apps/echo-b-host created + deployment.apps/pod-to-a created + deployment.apps/pod-to-external-1111 created + deployment.apps/pod-to-a-denied-cnp created + deployment.apps/pod-to-a-allowed-cnp created + deployment.apps/pod-to-external-fqdn-allow-google-cnp created + deployment.apps/pod-to-b-multi-node-clusterip created + deployment.apps/pod-to-b-multi-node-headless created + deployment.apps/host-to-b-multi-node-clusterip created + deployment.apps/host-to-b-multi-node-headless created + deployment.apps/pod-to-b-multi-node-nodeport created + deployment.apps/pod-to-b-intra-node-nodeport created + service/echo-a created + service/echo-b created + service/echo-b-headless created + service/echo-b-host-headless created + ciliumnetworkpolicy.cilium.io/pod-to-a-denied-cnp created + ciliumnetworkpolicy.cilium.io/pod-to-a-allowed-cnp created + ciliumnetworkpolicy.cilium.io/pod-to-external-fqdn-allow-google-cnp created + ``` + +7. View the logs of any of the connectivity tests to view the results: + + ```sh + kubectl logs -n cilium-test + ``` + + ```text + \{^_^}/ hi! + + Loading /default.json + Done + + Resources + http://:8080/private + http://:8080/public + + Home + http://:8080 + + Type s + enter at any time to create a snapshot of the database + Watching... + + GET /public 200 7.063 ms - 57 + GET /public 200 3.126 ms - 57 + GET /public 200 3.039 ms - 57 + GET /public 200 2.776 ms - 57 + GET /public 200 3.087 ms - 57 + GET /public 200 2.781 ms - 57 + GET /public 200 2.977 ms - 57 + GET /public 200 2.596 ms - 57 + GET /public 200 2.991 ms - 57 + GET /public 200 2.708 ms - 57 + GET /public 200 3.066 ms - 57 + GET /public 200 2.616 ms - 57 + GET /public 200 2.875 ms - 57 + GET /public 200 2.689 ms - 57 + GET /public 200 2.800 ms - 57 + GET /public 200 2.556 ms - 57 + GET /public 200 3.238 ms - 57 + GET /public 200 2.538 ms - 57 + GET /public 200 2.890 ms - 57 + GET /public 200 2.666 ms - 57 + GET /public 200 2.729 ms - 57 + GET /public 200 2.580 ms - 57 + GET /public 200 2.919 ms - 57 + GET /public 200 2.630 ms - 57 + GET /public 200 2.857 ms - 57 + GET /public 200 2.716 ms - 57 + GET /public 200 1.693 ms - 57 + GET /public 200 2.715 ms - 57 + GET /public 200 2.729 ms - 57 + GET /public 200 2.655 ms - 57 + ``` + ## Destroy {% diff --git a/patterns/wireguard-with-cilium/eks.tf b/patterns/wireguard-with-cilium/eks.tf new file mode 100644 index 0000000000..f5b6c33009 --- /dev/null +++ b/patterns/wireguard-with-cilium/eks.tf @@ -0,0 +1,98 @@ +################################################################################ +# Cluster +################################################################################ + +module "eks" { + source = "terraform-aws-modules/eks/aws" + version = "~> 19.16" + + cluster_name = local.name + cluster_version = "1.28" + cluster_endpoint_public_access = true + + # EKS Addons + cluster_addons = { + coredns = {} + kube-proxy = {} + vpc-cni = {} + } + + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + + eks_managed_node_groups = { + initial = { + instance_types = ["m5.large"] + min_size = 1 + max_size = 3 + desired_size = 2 + } + } + + # Extend node-to-node security group rules + node_security_group_additional_rules = { + # Cilium Wireguard Port https://github.com/cilium/cilium/blob/main/Documentation/security/network/encryption-wireguard.rst + ingress_cilium_wireguard = { + description = "Allow Cilium Wireguard node to node" + protocol = "udp" + from_port = 51871 + to_port = 51871 + type = "ingress" + self = true + } + } + + tags = local.tags +} + +################################################################################ +# Kubectl Output +################################################################################ + +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_name}" +} + +################################################################################ +# EKS Blueprints Addons +################################################################################ + +module "eks_blueprints_addons" { + source = "aws-ia/eks-blueprints-addons/aws" + version = "~> 1.7" + + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + oidc_provider_arn = module.eks.oidc_provider_arn + + helm_releases = { + cilium = { + name = "cilium" + chart = "cilium" + version = "1.14.1" + repository = "https://helm.cilium.io/" + description = "Cilium Add-on" + namespace = "kube-system" + create_namespace = false + + values = [ + <<-EOT + cni: + chainingMode: aws-cni + enableIPv4Masquerade: false + tunnel: disabled + endpointRoutes: + enabled: true + l7Proxy: false + encryption: + enabled: true + type: wireguard + EOT + ] + } + } + + tags = local.tags +} diff --git a/patterns/wireguard-with-cilium/example.yaml b/patterns/wireguard-with-cilium/example.yaml new file mode 100644 index 0000000000..77fb7c36b4 --- /dev/null +++ b/patterns/wireguard-with-cilium/example.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: server + labels: + blog: wireguard + name: server +spec: + containers: + - name: server + image: nginx + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: "kubernetes.io/hostname" + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + blog: wireguard +--- +apiVersion: v1 +kind: Service +metadata: + name: server +spec: + selector: + name: server + ports: + - port: 80 + sessionAffinity: ClientIP + sessionAffinityConfig: + clientIP: + timeoutSeconds: 600 +--- +apiVersion: v1 +kind: Pod +metadata: + name: client + labels: + blog: wireguard + name: client +spec: + containers: + - name: client + image: busybox + command: ["watch", "wget", "server"] + topologySpreadConstraints: + - maxSkew: 1 + topologyKey: "kubernetes.io/hostname" + whenUnsatisfiable: DoNotSchedule + labelSelector: + matchLabels: + blog: wireguard +--- diff --git a/patterns/wireguard-with-cilium/main.tf b/patterns/wireguard-with-cilium/main.tf index 1b6f28e54c..e6c70ffea4 100644 --- a/patterns/wireguard-with-cilium/main.tf +++ b/patterns/wireguard-with-cilium/main.tf @@ -1,3 +1,29 @@ +terraform { + required_version = ">= 1.0" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 4.47" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.9" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.20" + } + } + + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/wireguard-with-cilium/terraform.tfstate" + # } +} + provider "aws" { region = local.region } @@ -28,19 +54,9 @@ provider "helm" { } } -provider "kubectl" { - apply_retry_count = 5 - host = module.eks.cluster_endpoint - cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - load_config_file = false - - exec { - api_version = "client.authentication.k8s.io/v1beta1" - command = "aws" - # This requires the awscli to be installed locally where Terraform is executed - args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] - } -} +################################################################################ +# Common data/locals +################################################################################ data "aws_availability_zones" "available" {} @@ -51,208 +67,12 @@ locals { vpc_cidr = "10.0.0.0/16" azs = slice(data.aws_availability_zones.available.names, 0, 3) - tags = { Blueprint = local.name GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" } } -################################################################################ -# Cluster -################################################################################ - -module "eks" { - source = "terraform-aws-modules/eks/aws" - version = "~> 19.16" - - cluster_name = local.name - cluster_version = "1.27" - cluster_endpoint_public_access = true - - # EKS Addons - cluster_addons = { - coredns = {} - kube-proxy = {} - vpc-cni = {} - } - - vpc_id = module.vpc.vpc_id - subnet_ids = module.vpc.private_subnets - - - eks_managed_node_groups = { - initial = { - instance_types = ["m5.large"] - # Cilium Wireguard requires Linux Kernel 5.10 or aboved. - # For EKS 1.24 and above, the AMI the Kernerl version is 5.10 - # For EKS 1.23 and below, you need to use Bottlerocket OS. For example: - # ami_type = "BOTTLEROCKET_x86_64" - # platform = "bottlerocket" - min_size = 1 - max_size = 3 - desired_size = 2 - } - } - # Extend node-to-node security group rules - node_security_group_additional_rules = { - ingress_cilium_wireguard = { - description = "Allow Cilium Wireguard node to node" - protocol = "udp" - from_port = 51871 - to_port = 51871 # Cilium Wireguard Port https://github.com/cilium/cilium/blob/main/Documentation/security/network/encryption-wireguard.rst - type = "ingress" - self = true - } - } - - tags = local.tags -} - -################################################################################ -# Cilium Helm Chart for e2e encryption with Wireguard -################################################################################ - -resource "helm_release" "cilium" { - name = "cilium" - chart = "cilium" - version = "1.13.2" - repository = "https://helm.cilium.io/" - description = "Cilium Add-on" - namespace = "kube-system" - create_namespace = false - - values = [ - <<-EOT - cni: - chainingMode: aws-cni - enableIPv4Masquerade: false - tunnel: disabled - endpointRoutes: - enabled: true - l7Proxy: false - encryption: - enabled: true - type: wireguard - EOT - ] - - depends_on = [ - module.eks - ] -} - -#--------------------------------------------------------------- -# Sample App for Testing -#--------------------------------------------------------------- - -# For some reason the example pods can't be deployed right after helm install of cilium a delay needs to be introduced. This is being investigated -resource "time_sleep" "wait_wireguard" { - count = var.enable_example ? 1 : 0 - create_duration = "15s" - - depends_on = [helm_release.cilium] -} - -resource "kubectl_manifest" "server" { - count = var.enable_example ? 1 : 0 - - yaml_body = yamlencode({ - apiVersion = "v1" - kind = "Pod" - metadata = { - name = "server" - labels = { - blog = "wireguard" - name = "server" - } - } - spec = { - containers = [ - { - name = "server" - image = "nginx" - } - ] - topologySpreadConstraints = [ - { - maxSkew = 1 - topologyKey = "kubernetes.io/hostname" - whenUnsatisfiable = "DoNotSchedule" - labelSelector = { - matchLabels = { - blog = "wireguard" - } - } - } - ] - } - }) - - depends_on = [time_sleep.wait_wireguard] -} - -resource "kubectl_manifest" "service" { - count = var.enable_example ? 1 : 0 - - yaml_body = yamlencode({ - apiVersion = "v1" - kind = "Service" - metadata = { - name = "server" - } - spec = { - selector = { - name = "server" - } - ports = [ - { - port = 80 - } - ] - } - }) -} - -resource "kubectl_manifest" "client" { - count = var.enable_example ? 1 : 0 - - yaml_body = yamlencode({ - apiVersion = "v1" - kind = "Pod" - metadata = { - name = "client" - labels = { - blog = "wireguard" - name = "client" - } - } - spec = { - containers = [ - { - name = "client" - image = "busybox" - command = ["watch", "wget", "server"] - } - ] - topologySpreadConstraints = [ - { - maxSkew = 1 - topologyKey = "kubernetes.io/hostname" - whenUnsatisfiable = "DoNotSchedule" - labelSelector = { - matchLabels = { - blog = "wireguard" - } - } - } - ] - } - }) - - depends_on = [kubectl_manifest.server] -} - ################################################################################ # Supporting Resources ################################################################################ diff --git a/patterns/wireguard-with-cilium/outputs.tf b/patterns/wireguard-with-cilium/outputs.tf deleted file mode 100644 index c624023e90..0000000000 --- a/patterns/wireguard-with-cilium/outputs.tf +++ /dev/null @@ -1,4 +0,0 @@ -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_name}" -} diff --git a/patterns/wireguard-with-cilium/variables.tf b/patterns/wireguard-with-cilium/variables.tf deleted file mode 100644 index 433e9ec9d1..0000000000 --- a/patterns/wireguard-with-cilium/variables.tf +++ /dev/null @@ -1,6 +0,0 @@ -# tflint-ignore: terraform_unused_declarations -variable "enable_example" { - description = "Enable example to test this blueprint" - type = bool - default = true -} diff --git a/patterns/wireguard-with-cilium/versions.tf b/patterns/wireguard-with-cilium/versions.tf deleted file mode 100644 index d42480c1e6..0000000000 --- a/patterns/wireguard-with-cilium/versions.tf +++ /dev/null @@ -1,33 +0,0 @@ -terraform { - required_version = ">= 1.0" - - required_providers { - aws = { - source = "hashicorp/aws" - version = ">= 4.47" - } - helm = { - source = "hashicorp/helm" - version = ">= 2.9" - } - kubernetes = { - source = "hashicorp/kubernetes" - version = ">= 2.20" - } - kubectl = { - source = "gavinbunney/kubectl" - version = ">= 1.14" - } - time = { - source = "hashicorp/time" - version = ">= 0.9" - } - } - - # ## Used for end-to-end testing on project; update to suit your needs - # backend "s3" { - # bucket = "terraform-ssp-github-actions-state" - # region = "us-west-2" - # key = "e2e/wireguard-with-cilium/terraform.tfstate" - # } -}