Skip to content

Commit

Permalink
Merge branch 'main' into docs
Browse files Browse the repository at this point in the history
  • Loading branch information
commjoen committed Oct 7, 2022
2 parents ffdc9d4 + 54a0cf0 commit 684bf9f
Show file tree
Hide file tree
Showing 17 changed files with 1,854 additions and 1,856 deletions.
6 changes: 6 additions & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: 18
- name: Install Balancer
run: |
cd cleaner
Expand All @@ -23,6 +26,9 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: 18
- name: "Install & Build BalancerUI"
run: |
cd wrongsecrets-balancer/ui
Expand Down
4 changes: 2 additions & 2 deletions aws/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ Are you done playing? Please run `terraform destroy` twice to clean up.
### Test it
When you have completed the installation steps, you can do `kubectl port-forward service/wrongsecrets-balancer 3000:3000` and then go to [http://localhost:3000](http://localhost:3000).

Want to know how well your cluster is holding up? Check with
Want to know how well your cluster is holding up? Check with

```sh
kubectl top nodes
Expand All @@ -69,7 +69,7 @@ Want to know how well your cluster is holding up? Check with
When you're done:

1. Kill the port forward.
2. Run the cleanup script: `cleanup-aws-loadbalancing-and-helm.sh`
2. Run the cleanup script: `cleanup-aws-autoscaling-and-helm.sh`
3. Run `terraform destroy` to clean up the infrastructure.
1. If you've deployed the `shared-state` s3 bucket, also `cd shared-state` and `terraform destroy` there.
4. Run `unset KUBECONFIG` to unset the KUBECONFIG env var.
Expand Down
3 changes: 2 additions & 1 deletion aws/build-an-deploy-aws.sh
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ eksctl create iamserviceaccount \
--region=$AWS_REGION \
--namespace=kube-system \
--name=cluster-autoscaler \
--role-name=AmazonEKSClusterAutoscalerRole \
--attach-policy-arn=arn:aws:iam::${ACCOUNT_ID}:policy/AmazonEKSClusterAutoscalerPolicy \
--override-existing-serviceaccounts \
--approve
Expand Down Expand Up @@ -111,4 +112,4 @@ wait
DEFAULT_PASSWORD=thankyou
#TODO: REWRITE ABOVE, REWRITE THE HARDCODED DEPLOYMENT VALS INTO VALUES AND OVERRIDE THEM HERE!
echo "default password is ${DEFAULT_PASSWORD}"
helm upgrade --install mj ../helm/wrongsecrets-ctf-party --set="imagePullPolicy=Always" --set="balancer.env.K8S_ENV=aws" --set"balancer.env.IRSA_ROLE=arn:aws:iam::${ACCOUNT_ID}:role/wrongsecrets-secret-manager" --set="balancer.env.REACT_APP_ACCESS_PASSWORD=${DEFAULT_PASSWORD}" --set="balancer.cookie.cookieParserSecret=thisisanewrandomvaluesowecanworkatit" --set="balancer.repository=jeroenwillemsen/wrongsecrets-balancer" --set="balancer.tag=1.0aws" --set="balancer.replicas=4" --set="wrongsecretsCleanup.repository=jeroenwillemsen/wrongsecrets-ctf-cleaner" --set="wrongsecretsCleanup.tag=0.2"
helm upgrade --install mj ../helm/wrongsecrets-ctf-party --set="imagePullPolicy=Always" --set="balancer.env.K8S_ENV=aws" --set="balancer.env.IRSA_ROLE=arn:aws:iam::${ACCOUNT_ID}:role/wrongsecrets-secret-manager" --set="balancer.env.REACT_APP_ACCESS_PASSWORD=${DEFAULT_PASSWORD}" --set="balancer.cookie.cookieParserSecret=thisisanewrandomvaluesowecanworkatit" --set="balancer.repository=jeroenwillemsen/wrongsecrets-balancer" --set="balancer.tag=1.0aws" --set="balancer.replicas=4" --set="wrongsecretsCleanup.repository=jeroenwillemsen/wrongsecrets-ctf-cleaner" --set="wrongsecretsCleanup.tag=0.2"
Original file line number Diff line number Diff line change
Expand Up @@ -44,4 +44,4 @@ eksctl delete iamserviceaccount \
sleep 5 # Prevents race condition - command below may error out because it's still 'attached'

aws iam delete-policy \
--policy-arn arn:aws:iam::${ACCOUNT_ID}:policy/AmazonEKSClusterAutoscalerPolicy
--policy-arn arn:aws:iam::${ACCOUNT_ID}:policy/AmazonEKSClusterAutoscalerPolicy
6 changes: 3 additions & 3 deletions aws/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ data "aws_availability_zones" "available" {}

module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 3.14.4"
version = "~> 3.16.0"

name = "${var.cluster_name}-vpc"
cidr = local.vpc_cidr
Expand All @@ -62,7 +62,7 @@ module "vpc" {

module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "18.29.0"
version = "18.30.0"

cluster_name = var.cluster_name
cluster_version = var.cluster_version
Expand All @@ -87,7 +87,7 @@ module "eks" {
disk_type = "gp3"
disk_throughput = 150
disk_iops = 3000
instance_types = ["t3a.xlarge"]
instance_types = ["t3a.large"]

iam_role_additional_policies = [
"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
Expand Down
2 changes: 1 addition & 1 deletion aws/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ variable "region" {
variable "cluster_version" {
description = "The EKS cluster version to use"
type = string
default = "1.22"
default = "1.23"
}

variable "cluster_name" {
Expand Down
6 changes: 3 additions & 3 deletions cleaner/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
FROM node:16-alpine as build
FROM node:18-alpine as build
RUN mkdir -p /home/app
WORKDIR /home/app
COPY package.json package-lock.json ./
RUN npm ci --production
RUN npm ci --omit=dev

FROM node:16-alpine
FROM node:18-alpine
RUN addgroup --system --gid 1001 app && adduser app --system --uid 1001 --ingroup app
WORKDIR /home/app/
COPY --from=build --chown=app:app /home/app/node_modules/ ./node_modules/
Expand Down
Loading

0 comments on commit 684bf9f

Please sign in to comment.