diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml index cdfe62cba..f556c7377 100644 --- a/.github/workflows/pre-commit.yml +++ b/.github/workflows/pre-commit.yml @@ -10,6 +10,7 @@ env: TF_DOCS_VERSION: v0.16.0 TFLINT_VERSION: v0.44.1 TF_VERSION: "1.3.4" + HELM_DOCS_VERSION: "1.7.0" permissions: contents: read jobs: @@ -42,6 +43,12 @@ jobs: tar -zxvf terraform_docs.tar.gz terraform-docs chmod +x terraform-docs mv terraform-docs /usr/local/bin/ + - name: Setup helm-docs + run: | + cd /tmp + wget https://github.com/norwoodj/helm-docs/releases/download/v${{env.HELM_DOCS_VERSION}}/helm-docs_${{env.HELM_DOCS_VERSION}}_Linux_x86_64.tar.gz + tar -xvf helm-docs_${{env.HELM_DOCS_VERSION}}_Linux_x86_64.tar.gz + sudo mv helm-docs /usr/local/sbin - name: Pre-commit checks uses: pre-commit/action@v3.0.0 - name: pre-commit-ci-lite diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 2f02ea67b..c7e66b940 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -8,6 +8,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - name: "Install yq" run: | sudo snap install yq @@ -19,11 +23,44 @@ jobs: RELEASE_VERSION="${RELEASE_VERSION//v}" # patch the version & appVersion in the Chart.yaml to the release version yq eval -i ".version = \"$RELEASE_VERSION\", .appVersion = \"$RELEASE_VERSION\"" Chart.yaml - - uses: J12934/helm-gh-pages-action@v2.0.0 + - name: install helm-docs + env: + HELM_DOCS_VERSION: "1.7.0" + run: | + cd /tmp + wget https://github.com/norwoodj/helm-docs/releases/download/v${{env.HELM_DOCS_VERSION}}/helm-docs_${{env.HELM_DOCS_VERSION}}_Linux_x86_64.tar.gz + tar -xvf helm-docs_${{env.HELM_DOCS_VERSION}}_Linux_x86_64.tar.gz + sudo mv helm-docs /usr/local/sbin + - name: Generate and Release Helm Docs + run: | + helm-docs -t helm/wrongsecrets-ctf-party/README.md.gotmpl -t helm/wrongsecrets-ctf-party/_template.gotmpl -o README.md + cp helm/wrongsecrets-ctf-party/README.md /tmp/README.md + - name: Update gh-pages branch + uses: actions/checkout@v3 + with: + ref: gh-pages + fetch-depth: 0 + - name: Copy README.md to gh-pages + run: | + cp /tmp/README.md README.md + - name: Commit and push changes (if any) + run: | + if git diff --exit-code; then + echo "No changes detected." + else + echo "Changes detected." + git add README.md + git commit -m "Update README.md" + git push + fi + - name: Get back to original branch + uses: actions/checkout@v3 + - name: Run chart-releaser + uses: helm/chart-releaser-action@v1.5.0 with: - access-token: ${{ secrets.ACCESS_TOKEN }} - charts-folder: helm - deploy-branch: gh-pages + charts_dir: helm + env: + CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" dockerBuilds: name: "Build" runs-on: ubuntu-latest diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 87ea50935..56b3248c8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -31,6 +31,20 @@ repos: - "--args=--only=terraform_standard_module_structure" - "--args=--only=terraform_workspace_remote" - id: terraform_docs + - repo: https://github.com/norwoodj/helm-docs + rev: v1.2.0 + hooks: + - id: helm-docs + args: + # Make the tool search for charts only under the `example-charts` directory + - --chart-search-root=helm + + # The `./` makes it relative to the chart-search-root set above + - --template-files=./_templates.gotmpl + + # Repeating the flag adds this to the list, now [./_templates.gotmpl, README.md.gotmpl] + # A base filename makes it relative to each chart directory found + - --template-files=README.md.gotmpl - repo: https://github.com/alessandrojcm/commitlint-pre-commit-hook rev: v9.4.0 hooks: diff --git a/helm/wrongsecrets-ctf-party/Chart.yaml b/helm/wrongsecrets-ctf-party/Chart.yaml index dd3326384..0841cec2a 100644 --- a/helm/wrongsecrets-ctf-party/Chart.yaml +++ b/helm/wrongsecrets-ctf-party/Chart.yaml @@ -3,7 +3,7 @@ name: wrongsecrets-ctf-party description: Run Multi User "Capture the Flags" or Security Trainings with OWASP Wrongsecrets icon: https://github.com/OWASP/wrongsecrets/blob/master/icon.png?raw=true -home: https://github.com/ +home: https://owasp.org/www-project-wrongsecrets/ sources: - https://github.com/OWASP/wrongsecrets-ctf-party diff --git a/helm/wrongsecrets-ctf-party/README.md b/helm/wrongsecrets-ctf-party/README.md index 694ac271e..41e7e2438 100644 --- a/helm/wrongsecrets-ctf-party/README.md +++ b/helm/wrongsecrets-ctf-party/README.md @@ -1,88 +1,191 @@ -![MultiJuicer, Multi User Juice Shop Platform](https://raw.githubusercontent.com/iteratec/multi-juicer/main/images/multijuicer-cover.svg) +![WrongSecrets CTF Party, to use WrongSecrets for CTF or online Education](https://raw.githubusercontent.com/OWASP/wrongsecrets/master/icon.png) +_Powered by MultiJuicer_ -Running CTFs and Security Trainings with [OWASP Juice Shop](https://github.com/bkimminich/juice-shop) is usually quite tricky, Juice Shop just isn't intended to be used by multiple users at a time. -Instructing everybody how to start Juice Shop on their own machine works ok, but takes away too much valuable time. +Running CTFs and Security Trainings with [OWASP WrongSecrets](https://github.com/OWASP/wrongsecrets) is usually quite tricky, WrongSecrets can be used by multiple users at one time, but this can cause issues when people start fuzzing. +Instructing everybody how to start WrongSecrets on their own machine works ok, but takes away too much valuable time. +Next, installing the additional tools required to learn basics of reverse-engineering might take to much time as well. -MultiJuicer gives you the ability to run separate Juice Shop instances for every participant on a central kubernetes cluster, to run events without the need for local Juice Shop instances. +WrongSecrets CTF Party gives you the ability to run separate WrongSecrets instances for every participant on a central kubernetes cluster, to run events without the need for local WrongSecrets instances. **What it does:** -- dynamically create new Juice Shop instances when needed -- runs on a single domain, comes with a LoadBalancer sending the traffic to the participants Juice Shop instance +- dynamically create new WrongSecrets instance when needed +- dynamically create new WrongSecret virtual desktop instances with all the addiontal tooling required to do the CTF/training when needed +- runs on a single domain, comes with a LoadBalancer sending the traffic to the participants WrongSecrets instance - backup and auto apply challenge progress in case of Juice Shop container restarts - cleanup old & unused instances automatically +It follows the same architecture as MultiJuicer below: ![MultiJuicer, High Level Architecture Diagram](https://raw.githubusercontent.com/iteratec/multi-juicer/main/high-level-architecture.svg) -## Configuration - -| Key | Type | Default | Description | -| ------------------------------------------- | ------ | ---------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| balancer.affinity | object | `{}` | Optional Configure kubernetes scheduling affinity for the created JuiceShops (see: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) | -| balancer.cookie.cookieParserSecret | string | `nil` | Set this to a fixed random alpa-numeric string (recommended length 24 chars). If not set this get randomly generated with every helm upgrade, each rotation invalidates all active cookies / sessions requirering users to login again. | -| balancer.cookie.name | string | `"balancer"` | Changes the cookies name used to identify teams. Note will automatically be prefixed with "\_\_Secure-" when balancer.cookie.secure is set to `true` | -| balancer.cookie.secure | bool | `false` | | -| balancer.metrics.basicAuth.password | string | `"ERzCT4pwBDxfCKRGmfrMa8KQ8sXf8GKy"` | Should be changed when metrics are enabled. | -| balancer.metrics.basicAuth.username | string | `"prometheus-scraper"` | | -| balancer.metrics.dashboards.enabled | bool | `false` | if true, creates a Grafana Dashboard Config Map. (also requires metrics.enabled to be true). These will automatically be imported by Grafana when using the Grafana helm chart, see: https://github.com/helm/charts/tree/main/stable/grafana#sidecar-for-dashboards | -| balancer.metrics.enabled | bool | `true` | enables prometheus metrics for the balancer. If set to true you should change the prometheus-scraper password | -| balancer.metrics.serviceMonitor.enabled | bool | `false` | If true, creates a Prometheus Operator ServiceMonitor (also requires metrics.enabled to be true). This will also deploy a servicemonitor which monitors metrics from the Juice Shop instances | -| balancer.replicas | int | `1` | Number of replicas of the wrongsecrets-balancer deployment | -| balancer.repository | string | `"iteratec/wrongsecrets-balancer"` | | -| balancer.resources.limits.cpu | string | `"400m"` | | -| balancer.resources.limits.memory | string | `"256Mi"` | | -| balancer.resources.requests.cpu | string | `"400m"` | | -| balancer.resources.requests.memory | string | `"256Mi"` | | -| balancer.securityContext | object | `{}` | | -| balancer.service.clusterIP | string | `nil` | internal cluster service IP | -| balancer.service.externalIPs | string | `nil` | IP address to assign to load balancer (if supported) | -| balancer.service.loadBalancerIP | string | `nil` | IP address to assign to load balancer (if supported) | -| balancer.service.loadBalancerSourceRanges | string | `nil` | list of IP CIDRs allowed access to lb (if supported) | -| balancer.service.type | string | `"ClusterIP"` | Kubernetes service type | -| balancer.skipOwnerReference | bool | `false` | If set to true this skips setting ownerReferences on the teams JuiceShop Deployment and Services. This lets MultiJuicer run in older kubernetes cluster which don't support the reference type or the app/v1 deployment type | -| balancer.tag | string | `nil` | | -| balancer.tolerations | list | `[]` | Optional Configure kubernetes toleration for the created JuiceShops (see: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) | -| imagePullPolicy | string | `"Always"` | | -| ingress.annotations | object | `{}` | | -| ingress.enabled | bool | `false` | | -| ingress.hosts[0].host | string | `"wrongsecrets-ctf-party.local"` | | -| ingress.hosts[0].paths[0] | string | `"/"` | | -| ingress.tls | list | `[]` | | -| juiceShop.affinity | object | `{}` | Optional Configure kubernetes scheduling affinity for the created JuiceShops (see: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) | -| juiceShop.config | string | See values.yaml for full details | Specify a custom Juice Shop config.yaml. See the JuiceShop Config Docs for more detail: https://pwning.owasp-juice.shop/part1/customization.html#yaml-configuration-file | -| juiceShop.ctfKey | string | `"zLp@.-6fMW6L-7R3b!9uR_K!NfkkTr"` | Change the key when hosting a CTF event. This key gets used to generate the challenge flags. See: https://pwning.owasp-juice.shop/part1/ctf.html#overriding-the-ctfkey | -| juiceShop.env | list | `[]` | Optional environment variables to set for each JuiceShop instance (see: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/) | -| juiceShop.envFrom | list | `[]` | | -| juiceShop.image | string | `"bkimminich/juice-shop"` | Juice Shop Image to use | -| juiceShop.maxInstances | int | `10` | Specifies how many JuiceShop instances MultiJuicer should start at max. Set to -1 to remove the max Juice Shop instance cap | -| juiceShop.nodeEnv | string | `"multi-juicer"` | Specify a custom NODE_ENV for JuiceShop. If value is changed to something other than 'multi-juicer' it's not possible to set a custom config via `juiceShop.config`. | -| juiceShop.resources | object | `{"requests":{"cpu":"150m","memory":"200Mi"}}` | Optional resources definitions to set for each JuiceShop instance | -| juiceShop.securityContext | object | `{}` | | -| juiceShop.tag | string | `"v12.8.1"` | | -| juiceShop.tolerations | list | `[]` | Optional Configure kubernetes toleration for the created JuiceShops (see: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) | -| juiceShop.volumeMounts | list | `[]` | | -| juiceShop.volumes | list | `[]` | Optional Volumes to set for each JuiceShop instance (see: https://kubernetes.io/docs/concepts/storage/volumes/) | -| wrongsecretsCleanup.affinity | object | `{}` | Optional Configure kubernetes scheduling affinity for the wrongsecretsCleanup Job(see: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) | -| wrongsecretsCleanup.cron | string | `"0 * * * *"` | Cron in which the clean up job is run. Defaults to once in an hour. Change this if your grace period if shorter than 1 hour | -| wrongsecretsCleanup.enabled | bool | `true` | | -| wrongsecretsCleanup.failedJobsHistoryLimit | int | `1` | | -| wrongsecretsCleanup.gracePeriod | string | `"1d"` | Specifies when Juice Shop instances will be deleted when unused for that period. | -| wrongsecretsCleanup.repository | string | `"iteratec/cleaner"` | | -| wrongsecretsCleanup.resources.limits.memory | string | `"256Mi"` | | -| wrongsecretsCleanup.resources.requests.memory | string | `"256Mi"` | | -| wrongsecretsCleanup.securityContext | object | `{}` | | -| wrongsecretsCleanup.successfulJobsHistoryLimit | int | `1` | | -| wrongsecretsCleanup.tag | string | `nil` | | -| wrongsecretsCleanup.tolerations | list | `[]` | Optional Configure kubernetes toleration for the wrongsecretsCleanup Job (see: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) | -| nodeSelector | object | `{}` | | -| progressWatchdog.affinity | object | `{}` | Optional Configure kubernetes scheduling affinity for the ProgressWatchdog (see: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) | -| progressWatchdog.repository | string | `"iteratec/progress-watchdog"` | | -| progressWatchdog.resources.limits.cpu | string | `"20m"` | | -| progressWatchdog.resources.limits.memory | string | `"48Mi"` | | -| progressWatchdog.resources.requests.cpu | string | `"20m"` | | -| progressWatchdog.resources.requests.memory | string | `"48Mi"` | | -| progressWatchdog.securityContext | object | `{}` | | -| progressWatchdog.tag | string | `nil` | | -| progressWatchdog.tolerations | list | `[]` | Optional Configure kubernetes toleration for the ProgressWatchdog (see: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) | -| service.port | int | `3000` | | -| service.type | string | `"ClusterIP"` | | +## Usage + +[Helm](https://helm.sh) must be installed to use the charts. Please refer to +Helm's [documentation](https://helm.sh/docs) to get started. + +Once Helm has been set up correctly, add the repo as follows: + + helm repo add wrongsecrets https://wrongsecrets.github.io/wrongsecrets-ctf-party + +If you had already added this repo earlier, run `helm repo update` to retrieve +the latest versions of the packages. You can then run `helm search repo +wrongsecrets` to see the charts. + +To install the wrongsecrets-ctf-party chart: + + helm install my-wrongsecrets-ctf-party wrongsecrets/wrongsecrets-ctf-party + +To uninstall the chart: + + helm delete my-wrongsecrets-ctf-party +# wrongsecrets-ctf-party + +![Version: 0.1.0-alpha](https://img.shields.io/badge/Version-0.1.0--alpha-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.5.9](https://img.shields.io/badge/AppVersion-1.5.9-informational?style=flat-square) + +Run Multi User "Capture the Flags" or Security Trainings with OWASP Wrongsecrets + +**Homepage:** + +## Source Code + +* + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| balancer.affinity | object | `{}` | Optional Configure kubernetes scheduling affinity for the created wrongsecrets instances (see: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) | +| balancer.basicAuth | object | `{"username":"admin"}` | Credentials used in wrongsecrets-balancer-secret to authenticate with the wrongsecrets-api | +| balancer.basicAuth.username | string | `"admin"` | Username for the basic auth credentials | +| balancer.containerPort | int | `3000` | Port to expose on the balancer pods which the container listens on | +| balancer.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | | +| balancer.containerSecurityContext.capabilities.add[0] | string | `"CAP_NET_ADMIN"` | | +| balancer.containerSecurityContext.capabilities.add[1] | string | `"CAP_NET_BIND_SERVICE"` | | +| balancer.containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | +| balancer.containerSecurityContext.enabled | bool | `true` | If true, sets the securityContext on the created containers. This is required for the podSecurityPolicy to work | +| balancer.containerSecurityContext.readOnlyRootFilesystem | bool | `true` | | +| balancer.containerSecurityContext.runAsNonRoot | bool | `true` | | +| balancer.containerSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| balancer.cookie.cookieParserSecret | string | `nil` | Set this to a fixed random alpa-numeric string (recommended length 24 chars). If not set this get randomly generated with every helm upgrade, each rotation invalidates all active cookies / sessions requirering users to login again. | +| balancer.cookie.name | string | `"balancer"` | Changes the cookies name used to identify teams. Note will automatically be prefixed with "__Secure-" when balancer.cookie.secure is set to `true` | +| balancer.cookie.secure | bool | `false` | Sets the secure attribute on cookie so that it only be send over https | +| balancer.env.CHALLENGE33_VALUE | string | `"VkJVR2gzd3UvM0kxbmFIajFVZjk3WTBMcThCNS85MnExandwMy9hWVN3SFNKSThXcWRabllMajc4aEVTbGZQUEtmMVpLUGFwNHoyK3IrRzlOUndkRlUvWUJNVFkzY05ndU1tNUM2bDJwVEs5SmhQRm5VemVySXdNcm5odTlHanJxU0ZuL0J0T3ZMblFhL21TZ1hETkpZVU9VOGdDSEZzOUpFZVF2OWhwV3B5eGxCMk5xdTBNSHJQTk9EWTNab2hoa2pXWGF4YmpDWmk5U3BtSHlkVTA2WjdMcVd5RjM5RzZWOENGNkxCUGtkVW4zYUpBVisrRjBROUljU009Cg=="` | | +| balancer.env.IRSA_ROLE | string | `"arn:aws:iam::233483431651:role/wrongsecrets-secret-manager"` | | +| balancer.env.K8S_ENV | string | `"k8s"` | | +| balancer.env.REACT_APP_ACCESS_PASSWORD | string | `""` | | +| balancer.env.REACT_APP_CREATE_TEAM_HMAC_KEY | string | `"hardcodedkey"` | | +| balancer.env.REACT_APP_CTFD_URL | string | `"https://ctfd.io"` | | +| balancer.env.REACT_APP_HEROKU_WRONGSECRETS_URL | string | `"https://wrongsecrets-ctf.herokuapp.com"` | | +| balancer.env.REACT_APP_MOVING_GIF_LOGO | string | `"https://i.gifer.com/9kGQ.gif"` | | +| balancer.env.REACT_APP_S3_BUCKET_URL | string | `"s3://funstuff"` | | +| balancer.env.SECRETS_MANAGER_SECRET_ID_1 | string | `"wrongsecret"` | | +| balancer.env.SECRETS_MANAGER_SECRET_ID_2 | string | `"wrongsecret-2"` | | +| balancer.livenessProbe | object | `{"httpGet":{"path":"/balancer/","port":"http"}}` | livenessProbe: Checks if the balancer pod is still alive | +| balancer.metrics.basicAuth.password | string | `"ERzCT4pwBDxfCKRGmfrMa8KQ8sXf8GKy"` | Should be changed when metrics are enabled. | +| balancer.metrics.basicAuth.username | string | `"prometheus-scraper"` | | +| balancer.metrics.dashboards.enabled | bool | `false` | if true, creates a Grafana Dashboard Config Map. (also requires metrics.enabled to be true). These will automatically be imported by Grafana when using the Grafana helm chart, see: https://github.com/helm/charts/tree/main/stable/grafana#sidecar-for-dashboards | +| balancer.metrics.enabled | bool | `true` | enables prometheus metrics for the balancer. If set to true you should change the prometheus-scraper password | +| balancer.metrics.serviceMonitor.enabled | bool | `false` | If true, creates a Prometheus Operator ServiceMonitor (also requires metrics.enabled to be true). This will also deploy a servicemonitor which monitors metrics from the Wrongsecrets instances | +| balancer.metrics.serviceMonitor.path | string | `"/balancer/metrics"` | Path to scrape for metrics | +| balancer.metrics.serviceMonitor.targetPort | int | `3000` | Target port for the ServiceMonitor to scrape | +| balancer.podSecurityContext.enabled | bool | `true` | If true, sets the securityContext on the created pods. This is required for the podSecurityPolicy to work | +| balancer.podSecurityContext.fsGroup | int | `2000` | | +| balancer.podSecurityContext.runAsGroup | int | `3000` | | +| balancer.podSecurityContext.runAsUser | int | `1000` | | +| balancer.podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| balancer.readinessProbe | object | `{"httpGet":{"path":"/balancer/","port":"http"}}` | readinessProbe: Checks if the balancer pod is ready to receive traffic | +| balancer.replicas | int | `2` | Number of replicas of the wrongsecrets-balancer deployment. Changing this in a commit? PLEASE UPDATE THE GITHUB WORKLFOWS THEN!(NUMBER OF "TRUE") | +| balancer.repository | string | `"jeroenwillemsen/wrongsecrets-balancer"` | | +| balancer.resources | object | `{"limits":{"cpu":"1000m","memory":"1024Mi"},"requests":{"cpu":"400m","memory":"256Mi"}}` | Resource limits and requests for the balancer pods | +| balancer.service.clusterIP | string | `nil` | internal cluster service IP | +| balancer.service.externalIPs | string | `nil` | IP address to assign to load balancer (if supported) | +| balancer.service.loadBalancerIP | string | `nil` | IP address to assign to load balancer (if supported) | +| balancer.service.loadBalancerSourceRanges | string | `nil` | list of IP CIDRs allowed access to lb (if supported) | +| balancer.service.type | string | `"ClusterIP"` | Kubernetes service type | +| balancer.skipOwnerReference | bool | `false` | If set to true this skips setting ownerReferences on the teams wrongsecrets Deployment and Services. This lets MultiJuicer run in older kubernetes cluster which don't support the reference type or the app/v1 deployment type | +| balancer.tag | string | `"1.6.6aws"` | | +| balancer.tolerations | list | `[]` | Optional Configure kubernetes toleration for the created wrongsecrets instances (see: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) | +| balancer.volumeMounts[0] | object | `{"mountPath":"/home/app/config/","name":"config-volume"}` | If true, creates a volumeMount for the created pods. This is required for the podSecurityPolicy to work | +| balancer.volumes[0] | object | `{"configMap":{"name":"wrongsecrets-balancer-config"},"name":"config-volume"}` | If true, creates a volume for the created pods. This is required for the podSecurityPolicy to work | +| imagePullPolicy | string | `"IfNotPresent"` | | +| ingress.annotations | object | `{}` | Annotations to be added to the ingress object. | +| ingress.enabled | bool | `false` | If true, Wrongsecrets will create an Ingress object for the balancer service. Useful if you want to expose the balancer service externally for example with a loadbalancer in order to view any webpages that are hosted on the balancer service. | +| ingress.hosts | list | `[{"host":"wrongsecrets-ctf-party.local","paths":["/"]}]` | Hostnames to your Wrongsecrets balancer installation. | +| ingress.tls | list | `[]` | TLS configuration for Wrongsecrets balancer | +| nodeSelector | object | `{}` | | +| service.port | int | `3000` | | +| service.portName | string | `"web"` | | +| service.type | string | `"ClusterIP"` | | +| vaultContainer.affinity | object | `{}` | | +| vaultContainer.envFrom | list | `[]` | | +| vaultContainer.image | string | `"hashicorp/vault"` | Juice Shop Image to use | +| vaultContainer.maxInstances | int | `500` | Specifies how many JuiceShop instances MultiJuicer should start at max. Set to -1 to remove the max Juice Shop instance cap | +| vaultContainer.repository | string | `"commjoenie/wrongSecrets"` | | +| vaultContainer.resources.limits.cpu | string | `"1200m"` | | +| vaultContainer.resources.limits.memory | string | `"256mb"` | | +| vaultContainer.resources.request.cpu | string | `"50m"` | | +| vaultContainer.resources.request.memory | string | `"128mb"` | | +| vaultContainer.runtimeClassName | object | `{}` | | +| vaultContainer.securityContext.allowPrivilegeEscalation | bool | `false` | | +| vaultContainer.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| vaultContainer.securityContext.readOnlyRootFilesystem | bool | `true` | | +| vaultContainer.securityContext.runAsNonRoot | bool | `true` | | +| vaultContainer.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| vaultContainer.tag | string | `"1.15.1"` | | +| vaultContainer.tolerations | list | `[]` | | +| virtualdesktop.affinity | object | `{}` | | +| virtualdesktop.envFrom | list | `[]` | | +| virtualdesktop.image | string | `"jeroenwillemsen/wrongsecrets-desktop-k8s"` | Wrongsecrets Image to use | +| virtualdesktop.maxInstances | int | `500` | Specifies how many Wrongsecrets instances balancer should start at max. Set to -1 to remove the max Wrongsecrets instance cap | +| virtualdesktop.repository | string | `"commjoenie/wrongSecrets"` | | +| virtualdesktop.resources.limits.cpu | string | `"1200m"` | | +| virtualdesktop.resources.limits.memory | string | `"2GB"` | | +| virtualdesktop.resources.request.cpu | string | `"50m"` | | +| virtualdesktop.resources.request.memory | string | `"1GB"` | | +| virtualdesktop.runtimeClassName | object | `{}` | | +| virtualdesktop.securityContext.allowPrivilegeEscalation | bool | `false` | | +| virtualdesktop.securityContext.capabilities.drop[0] | string | `"ALL"` | | +| virtualdesktop.securityContext.readOnlyRootFilesystem | bool | `true` | | +| virtualdesktop.securityContext.runAsNonRoot | bool | `true` | | +| virtualdesktop.securityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| virtualdesktop.tag | string | `"1.6.6"` | | +| virtualdesktop.tolerations | list | `[]` | | +| wrongsecrets.affinity | object | `{}` | Optional Configure kubernetes scheduling affinity for the created Wrongsecrets instances (see: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) | +| wrongsecrets.config | string | See values.yaml for full details | Specify a custom Wrongsecrets config.yaml. See the Wrongsecrets Docs for any needed ENVs: https://github.com/OWASP/wrongsecrets | +| wrongsecrets.ctfKey | string | `"zLp@.-6fMW6L-7R3b!9uR_K!NfkkTr"` | Change the key when hosting a CTF event. This key gets used to generate the challenge flags. See: https://github.com/OWASP/wrongsecrets#ctf | +| wrongsecrets.env | list | `[{"name":"K8S_ENV","value":"k8s"},{"name":"SPECIAL_K8S_SECRET","valueFrom":{"configMapKeyRef":{"key":"funny.entry","name":"secrets-file"}}},{"name":"SPECIAL_SPECIAL_K8S_SECRET","valueFrom":{"secretKeyRef":{"key":"funnier","name":"funnystuff"}}}]` | Optional environment variables to set for each Wrongsecrets instance (see: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/) | +| wrongsecrets.envFrom | list | `[]` | Optional mount environment variables from configMaps or secrets (see: https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables) | +| wrongsecrets.image | string | `"jeroenwillemsen/wrongsecrets"` | Wrongsecrets Image to use | +| wrongsecrets.maxInstances | int | `500` | Specifies how many Wrongsecrets instances should start at max. Set to -1 to remove the max Wrongsecrets instance cap | +| wrongsecrets.nodeEnv | string | `"wrongsecrets-ctf-party"` | Specify a custom NODE_ENV for Wrongsecrets. If value is changed to something other than 'wrongsecrets-ctf-party' it's not possible to set a custom config via `wrongsecrets-balancer-config`. | +| wrongsecrets.resources | object | `{"requests":{"cpu":"256Mi","memory":"300Mi"}}` | Optional resources definitions to set for each Wrongsecrets instance | +| wrongsecrets.runtimeClassName | string | `nil` | Optional Can be used to configure the runtime class for the Wrongsecrets instances pods to add an additional layer of isolation to reduce the impact of potential container escapes. (see: https://kubernetes.io/docs/concepts/containers/runtime-class/) | +| wrongsecrets.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true,"runAsNonRoot":true,"seccompProfile":{"type":"RuntimeDefault"}}` | Optional securityContext definitions to set for each Wrongsecrets instance | +| wrongsecrets.tag | string | `"1.6.7RC3-no-vault"` | | +| wrongsecrets.tolerations | list | `[]` | Optional Configure kubernetes toleration for the created Wrongsecrets instances (see: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) | +| wrongsecrets.volumes | list | `[]` | Optional Volumes to set for each Wrongsecrets instance (see: https://kubernetes.io/docs/concepts/storage/volumes/) | +| wrongsecretsCleanup.affinity | object | `{}` | Optional Configure kubernetes scheduling affinity for the wrongsecretsCleanup Job(see: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity) | +| wrongsecretsCleanup.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | | +| wrongsecretsCleanup.containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | +| wrongsecretsCleanup.containerSecurityContext.enabled | bool | `true` | If true, sets the securityContext on the created containers. This is required for the podSecurityPolicy to work | +| wrongsecretsCleanup.containerSecurityContext.readOnlyRootFilesystem | bool | `true` | | +| wrongsecretsCleanup.containerSecurityContext.runAsNonRoot | bool | `true` | | +| wrongsecretsCleanup.containerSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | | +| wrongsecretsCleanup.cron | string | `"0,15,30,45 * * * *"` | Cron in which the clean up job is run. Defaults to once in a quarter. Change this if your grace period if shorter than 15 minutes. See "https://crontab.guru/#0,15,30,45_*_*_*_*" for more details. | +| wrongsecretsCleanup.enabled | bool | `true` | | +| wrongsecretsCleanup.env.MAX_INACTIVE_DURATION | string | `"2d"` | | +| wrongsecretsCleanup.env.SHOULD_DELETE | bool | `false` | | +| wrongsecretsCleanup.failedJobsHistoryLimit | int | `1` | | +| wrongsecretsCleanup.podSecurityContext.enabled | bool | `true` | If true, sets the securityContext on the created pods. This is required for the podSecurityPolicy to work | +| wrongsecretsCleanup.podSecurityContext.fsGroup | int | `2000` | | +| wrongsecretsCleanup.podSecurityContext.runAsGroup | int | `3000` | | +| wrongsecretsCleanup.podSecurityContext.runAsUser | int | `1000` | | +| wrongsecretsCleanup.repository | string | `"jeroenwillemsen/wrongsecrets-ctf-cleaner"` | | +| wrongsecretsCleanup.resources.limits.memory | string | `"256Mi"` | | +| wrongsecretsCleanup.resources.requests.memory | string | `"256Mi"` | | +| wrongsecretsCleanup.successfulJobsHistoryLimit | int | `1` | | +| wrongsecretsCleanup.tag | float | `0.4` | | +| wrongsecretsCleanup.tolerations | list | `[]` | Optional Configure kubernetes toleration for the wrongsecretsCleanup Job (see: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.7.0](https://github.com/norwoodj/helm-docs/releases/v1.7.0) diff --git a/helm/wrongsecrets-ctf-party/README.md.gotmpl b/helm/wrongsecrets-ctf-party/README.md.gotmpl index fb1e29d30..f55cbb6f2 100644 --- a/helm/wrongsecrets-ctf-party/README.md.gotmpl +++ b/helm/wrongsecrets-ctf-party/README.md.gotmpl @@ -18,6 +18,24 @@ WrongSecrets CTF Party gives you the ability to run separate WrongSecrets instan It follows the same architecture as MultiJuicer below: ![MultiJuicer, High Level Architecture Diagram](https://raw.githubusercontent.com/iteratec/multi-juicer/main/high-level-architecture.svg) -## Configuration -{{ template "chart.valuesTable" . }} +## Usage + +[Helm](https://helm.sh) must be installed to use the charts. Please refer to +Helm's [documentation](https://helm.sh/docs) to get started. + +Once Helm has been set up correctly, add the repo as follows: + + helm repo add wrongsecrets https://wrongsecrets.github.io/wrongsecrets-ctf-party + +If you had already added this repo earlier, run `helm repo update` to retrieve +the latest versions of the packages. You can then run `helm search repo +wrongsecrets` to see the charts. + +To install the wrongsecrets-ctf-party chart: + + helm install my-wrongsecrets-ctf-party wrongsecrets/wrongsecrets-ctf-party + +To uninstall the chart: + + helm delete my-wrongsecrets-ctf-party diff --git a/readme.md b/readme.md index ebf38df84..14c5a737a 100644 --- a/readme.md +++ b/readme.md @@ -97,14 +97,13 @@ To use the 2 domain setup with CTFD: This setup works best if you have Calico installed as your CNI, if you want to use the helm directly, without the AWS Challenges, do: ```shell +helm repo add wrongsecrets https://wrongsecrets.github.io/wrongsecrets-ctf-party -helm upgrade --install mj ./helm/wrongsecrets-ctf-party +helm upgrade --install my-wrongsecrets-ctf-party wrongsecrets/wrongsecrets-ctf-party ``` -from this repo. We will host the helm chart soon for you. - -### Play with Minikube: +Play with Minikube: ** NOTE: The below steps require at least minikube version v1.30.1 and yq (https://github.com/mikefarah/yq/) version v4.34.1. ** @@ -164,13 +163,15 @@ The default ctfd config values are here: [aws/k8s/ctfd-values.yaml](aws/k8s/ctfd Download & Save the file and tell helm to use your config file over the default by running: ```sh -helm install -f values.yaml wrongsecrets-ctf-party ./wrongsecrets-ctf-party/helm/wrongsecrets-ctf-party/ +helm repo add wrongsecrets https://wrongsecrets.github.io/wrongsecrets-ctf-party + +helm install -f values.yaml my-wrongsecrets-ctf-party wrongsecrets/wrongsecrets-ctf-party ``` ### Deinstallation ```sh -helm delete wrongsecrets-ctf-party +helm delete my-wrongsecrets-ctf-party ``` And if you are running AWS (including CTFd): @@ -224,7 +225,6 @@ kubectl -n kube-system get pod -l component=kube-apiserver -o=jsonpath="{.items[ Still having trouble to connect to that host at that port? run `./scripts/patch-nsp-for-kubectl.sh` to make sure the NSPs are updated. - ## Talk with Us! You can reach us in the `#project-wrongsecrets` channel of the OWASP Slack Workspace. We'd love to hear any feedback or usage reports you got. If you are not already in the OWASP Slack Workspace, you can join via [this link](https://owasp.slack.com/join/shared_invite/enQtNjExMTc3MTg0MzU4LWQ2Nzg3NGJiZGQ2MjRmNzkzN2Q4YzU1MWYyZTdjYjA2ZTA5M2RkNzE2ZjdkNzI5ZThhOWY5MjljYWZmYmY4ZjM)