From d574a985fe8349b8a28104e59ebdad58727f1816 Mon Sep 17 00:00:00 2001 From: ashnamehrotra Date: Fri, 12 Jan 2024 01:53:46 +0000 Subject: [PATCH] chore: Prepare v1.4.0-beta.0 release Signed-off-by: Sertac Ozercan --- Makefile | 2 +- charts/eraser/Chart.yaml | 4 +- charts/eraser/values.yaml | 15 +- deploy/eraser.yaml | 15 +- .../version-v1.4.0-beta.0/architecture.md | 21 ++ .../version-v1.4.0-beta.0/code-of-conduct.md | 10 + .../version-v1.4.0-beta.0/contributing.md | 14 + .../version-v1.4.0-beta.0/custom-scanner.md | 12 + .../version-v1.4.0-beta.0/customization.md | 230 ++++++++++++++ .../version-v1.4.0-beta.0/exclusion.md | 25 ++ .../version-v1.4.0-beta.0/faq.md | 12 + .../version-v1.4.0-beta.0/installation.md | 15 + .../version-v1.4.0-beta.0/introduction.md | 10 + .../version-v1.4.0-beta.0/manual-removal.md | 59 ++++ .../version-v1.4.0-beta.0/metrics.md | 35 ++ .../version-v1.4.0-beta.0/quick-start.md | 103 ++++++ .../release-management.md | 67 ++++ .../version-v1.4.0-beta.0/releasing.md | 32 ++ .../version-v1.4.0-beta.0/setup.md | 299 ++++++++++++++++++ .../version-v1.4.0-beta.0/trivy.md | 6 + .../version-v1.4.0-beta.0-sidebars.json | 43 +++ docs/versions.json | 1 + manifest_staging/charts/eraser/Chart.yaml | 4 +- manifest_staging/charts/eraser/values.yaml | 8 +- manifest_staging/deploy/eraser.yaml | 8 +- .../gatekeeper/helmify/static/Chart.yaml | 4 +- .../gatekeeper/helmify/static/values.yaml | 8 +- 27 files changed, 1031 insertions(+), 31 deletions(-) create mode 100644 docs/versioned_docs/version-v1.4.0-beta.0/architecture.md create mode 100644 docs/versioned_docs/version-v1.4.0-beta.0/code-of-conduct.md create mode 100644 docs/versioned_docs/version-v1.4.0-beta.0/contributing.md create mode 100644 docs/versioned_docs/version-v1.4.0-beta.0/custom-scanner.md create mode 100644 docs/versioned_docs/version-v1.4.0-beta.0/customization.md create mode 100644 docs/versioned_docs/version-v1.4.0-beta.0/exclusion.md create mode 100644 docs/versioned_docs/version-v1.4.0-beta.0/faq.md create mode 100644 docs/versioned_docs/version-v1.4.0-beta.0/installation.md create mode 100644 docs/versioned_docs/version-v1.4.0-beta.0/introduction.md create mode 100644 docs/versioned_docs/version-v1.4.0-beta.0/manual-removal.md create mode 100644 docs/versioned_docs/version-v1.4.0-beta.0/metrics.md create mode 100644 docs/versioned_docs/version-v1.4.0-beta.0/quick-start.md create mode 100644 docs/versioned_docs/version-v1.4.0-beta.0/release-management.md create mode 100644 docs/versioned_docs/version-v1.4.0-beta.0/releasing.md create mode 100644 docs/versioned_docs/version-v1.4.0-beta.0/setup.md create mode 100644 docs/versioned_docs/version-v1.4.0-beta.0/trivy.md create mode 100644 docs/versioned_sidebars/version-v1.4.0-beta.0-sidebars.json diff --git a/Makefile b/Makefile index 6ec84337be..a3ae8e5fdd 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -VERSION := v1.3.0-beta.0 +VERSION := v1.4.0-beta.0 MANAGER_TAG ?= ${VERSION} TRIVY_SCANNER_TAG ?= ${VERSION} diff --git a/charts/eraser/Chart.yaml b/charts/eraser/Chart.yaml index e07afb0d12..16a9923ecf 100644 --- a/charts/eraser/Chart.yaml +++ b/charts/eraser/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: eraser description: A Helm chart for Eraser type: application -version: 1.3.0-beta.0 -appVersion: v1.3.0-beta.0 +version: 1.4.0-beta.0 +appVersion: v1.4.0-beta.0 home: https://github.com/eraser-dev/eraser sources: - https://github.com/eraser-dev/eraser.git diff --git a/charts/eraser/values.yaml b/charts/eraser/values.yaml index 9a58c912ab..484d3ca3c2 100644 --- a/charts/eraser/values.yaml +++ b/charts/eraser/values.yaml @@ -1,5 +1,5 @@ runtimeConfig: - apiVersion: eraser.sh/v1alpha2 + apiVersion: eraser.sh/v1alpha3 kind: EraserConfig health: {} # healthProbeBindAddress: :8081 @@ -11,7 +11,9 @@ runtimeConfig: # leaderElect: true # resourceName: e29e094a.k8s.io manager: - runtime: containerd + runtime: + name: containerd + address: unix:///run/containerd/containerd.sock otlpEndpoint: "" logLevel: info scheduling: {} @@ -37,7 +39,7 @@ runtimeConfig: enabled: true image: # repo: "" - tag: "v1.3.0-beta.0" + tag: "v1.4.0-beta.0" request: {} # mem: "" # cpu: "" @@ -48,7 +50,7 @@ runtimeConfig: enabled: true image: # repo: "" - tag: "v1.3.0-beta.0" + tag: "v1.4.0-beta.0" request: {} # mem: "" # cpu: "" @@ -72,13 +74,14 @@ runtimeConfig: # - HIGH # - MEDIUM # - LOW + # ignoredStatuses: # timeout: # total: 23h # perImage: 1h remover: image: # repo: "" - tag: "v1.3.0-beta.0" + tag: "v1.4.0-beta.0" request: {} # mem: "" # cpu: "" @@ -91,7 +94,7 @@ deploy: repo: ghcr.io/eraser-dev/eraser-manager pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: "v1.3.0-beta.0" + tag: "v1.4.0-beta.0" additionalArgs: [] priorityClassName: "" diff --git a/deploy/eraser.yaml b/deploy/eraser.yaml index 7a5eee8d24..f79dc8a462 100644 --- a/deploy/eraser.yaml +++ b/deploy/eraser.yaml @@ -382,10 +382,12 @@ subjects: apiVersion: v1 data: controller_manager_config.yaml: | - apiVersion: eraser.sh/v1alpha2 + apiVersion: eraser.sh/v1alpha3 kind: EraserConfig manager: - runtime: containerd + runtime: + name: containerd + address: unix:///run/containerd/containerd.sock otlpEndpoint: "" logLevel: info scheduling: @@ -411,7 +413,7 @@ data: enabled: true image: repo: ghcr.io/eraser-dev/collector - tag: v1.3.0-beta.0 + tag: v1.4.0-beta.0 request: mem: 25Mi cpu: 7m @@ -423,7 +425,7 @@ data: enabled: true image: repo: ghcr.io/eraser-dev/eraser-trivy-scanner # supply custom image for custom scanner - tag: v1.3.0-beta.0 + tag: v1.4.0-beta.0 request: mem: 500Mi cpu: 1000m @@ -453,13 +455,14 @@ data: - HIGH - MEDIUM - LOW + ignoredStatuses: timeout: total: 23h perImage: 1h remover: image: repo: ghcr.io/eraser-dev/remover - tag: v1.3.0-beta.0 + tag: v1.4.0-beta.0 request: mem: 25Mi # https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#how-pods-with-resource-limits-are-run @@ -502,7 +505,7 @@ spec: fieldPath: metadata.namespace - name: OTEL_SERVICE_NAME value: eraser-manager - image: ghcr.io/eraser-dev/eraser-manager:v1.3.0-beta.0 + image: ghcr.io/eraser-dev/eraser-manager:v1.4.0-beta.0 livenessProbe: httpGet: path: /healthz diff --git a/docs/versioned_docs/version-v1.4.0-beta.0/architecture.md b/docs/versioned_docs/version-v1.4.0-beta.0/architecture.md new file mode 100644 index 0000000000..0ec0c9f4d1 --- /dev/null +++ b/docs/versioned_docs/version-v1.4.0-beta.0/architecture.md @@ -0,0 +1,21 @@ +--- +title: Architecture +--- +At a high level, Eraser has two main modes of operation: manual and automated. + +Manual image removal involves supplying a list of images to remove; Eraser then +deploys pods to clean up the images you supplied. + +Automated image removal runs on a timer. By default, the automated process +removes images based on the results of a vulnerability scan. The default +vulnerability scanner is Trivy, but others can be provided in its place. Or, +the scanner can be disabled altogether, in which case Eraser acts as a garbage +collector -- it will remove all non-running images in your cluster. + +## Manual image cleanup + + + +## Automated analysis, scanning, and cleanup + + diff --git a/docs/versioned_docs/version-v1.4.0-beta.0/code-of-conduct.md b/docs/versioned_docs/version-v1.4.0-beta.0/code-of-conduct.md new file mode 100644 index 0000000000..81bc080653 --- /dev/null +++ b/docs/versioned_docs/version-v1.4.0-beta.0/code-of-conduct.md @@ -0,0 +1,10 @@ +--- +title: Code of Conduct +--- + +This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). + +Resources: + +- [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md) +- [Code of Conduct Reporting](https://github.com/cncf/foundation/blob/main/code-of-conduct.md) diff --git a/docs/versioned_docs/version-v1.4.0-beta.0/contributing.md b/docs/versioned_docs/version-v1.4.0-beta.0/contributing.md new file mode 100644 index 0000000000..a74c2389ea --- /dev/null +++ b/docs/versioned_docs/version-v1.4.0-beta.0/contributing.md @@ -0,0 +1,14 @@ +--- +title: Contributing +--- + +There are several ways to get involved with Eraser + +- Join the [mailing list](https://groups.google.com/u/1/g/eraser-dev) to get notifications for releases, security announcements, etc. +- Participate in the [biweekly community meetings](https://docs.google.com/document/d/1Sj5u47K3WUGYNPmQHGFpb52auqZb1FxSlWAQnPADhWI/edit) to disucss development, issues, use cases, etc. +- Join the `#eraser` channel on the [Kubernetes Slack](https://slack.k8s.io/) +- View the [development setup instructions](https://eraser-dev.github.io/eraser/docs/development) + +This project welcomes contributions and suggestions. + +This project has adopted the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). \ No newline at end of file diff --git a/docs/versioned_docs/version-v1.4.0-beta.0/custom-scanner.md b/docs/versioned_docs/version-v1.4.0-beta.0/custom-scanner.md new file mode 100644 index 0000000000..d7235abbde --- /dev/null +++ b/docs/versioned_docs/version-v1.4.0-beta.0/custom-scanner.md @@ -0,0 +1,12 @@ +--- +title: Custom Scanner +--- + +## Creating a Custom Scanner +To create a custom scanner for non-compliant images, use the following [template](https://github.com/eraser-dev/eraser-scanner-template/). + +In order to customize your scanner, start by creating a `NewImageProvider()`. The ImageProvider interface can be found can be found [here](../../pkg/scanners/template/scanner_template.go). + +The ImageProvider will allow you to retrieve the list of all non-running and non-excluded images from the collector container through the `ReceiveImages()` function. Process these images with your customized scanner and threshold, and use `SendImages()` to pass the images found non-compliant to the eraser container for removal. Finally, complete the scanning process by calling `Finish()`. + +When complete, provide your custom scanner image to Eraser in deployment. diff --git a/docs/versioned_docs/version-v1.4.0-beta.0/customization.md b/docs/versioned_docs/version-v1.4.0-beta.0/customization.md new file mode 100644 index 0000000000..a05a4cf353 --- /dev/null +++ b/docs/versioned_docs/version-v1.4.0-beta.0/customization.md @@ -0,0 +1,230 @@ +--- +title: Customization +--- + +## Overview + +Eraser uses a configmap to configure its behavior. The configmap is part of the +deployment and it is not necessary to deploy it manually. Once deployed, the configmap +can be edited at any time: + +```bash +kubectl edit configmap --namespace eraser-system eraser-manager-config +``` + +If an eraser job is already running, the changes will not take effect until the job completes. +The configuration is in yaml. + +## Key Concepts + +### Basic architecture + +The _manager_ runs as a pod in your cluster and manages _ImageJobs_. Think of +an _ImageJob_ as a unit of work, performed on every node in your cluster. Each +node runs a sub-job. The goal of the _ImageJob_ is to assess the images on your +cluster's nodes, and to remove the images you don't want. There are two stages: +1. Assessment +1. Removal. + + +### Scheduling + +An _ImageJob_ can either be created on-demand (see [Manual Removal](https://eraser-dev.github.io/eraser/docs/manual-removal)), +or they can be spawned on a timer like a cron job. On-demand jobs skip the +assessment stage and get right down to the business of removing the images you +specified. The behavior of an on-demand job is quite different from that of +timed jobs. + +### Fault Tolerance + +Because an _ImageJob_ runs on every node in your cluster, and the conditions on +each node may vary widely, some of the sub-jobs may fail. If you cannot +tolerate any failure, set the `manager.imageJob.successRatio` property to +`1.0`. If 75% success sounds good to you, set it to `0.75`. In that case, if +fewer than 75% of the pods spawned by the _ImageJob_ report success, the job as +a whole will be marked as a failure. + +This is mainly to help diagnose error conditions. As such, you can set +`manager.imageJob.cleanup.delayOnFailure` to a long value so that logs can be +captured before the spawned pods are cleaned up. + +### Excluding Nodes + +For various reasons, you may want to prevent Eraser from scheduling pods on +certain nodes. To do so, the nodes can be given a special label. By default, +this label is `eraser.sh/cleanup.filter`, but you can configure the behavior with +the options under `manager.nodeFilter`. The [table](#detailed-options) provides more detail. + +### Configuring Components + +An _ImageJob_ is made up of various sub-jobs, with one sub-job for each node. +These sub-jobs can be broken down further into three stages. +1. Collection (What is on the node?) +1. Scanning (What images conform to the policy I've provided?) +1. Removal (Remove images based on the results of the above) + +Of the above stages, only Removal is mandatory. The others can be disabled. +Furthermore, manually triggered _ImageJobs_ will skip right to removal, even if +Eraser is configured to collect and scan. Collection and Scanning will only +take place when: +1. The collector and/or scanner `components` are enabled, AND +1. The job was *not* triggered manually by creating an _ImageList_. + +### Swapping out components + +The collector, scanner, and remover components can all be swapped out. This +enables you to build and host the images yourself. In addition, the scanner's +behavior can be completely tailored to your needs by swapping out the default +image with one of your own. To specify the images, use the +`components..image.repo` and `components..image.tag`, +where `` is one of `collector`, `scanner`, or `remover`. + +## Universal Options + +The following portions of the configmap apply no matter how you spawn your +_ImageJob_. The values provided below are the defaults. For more detail on +these options, see the [table](#detailed-options). + +```yaml +manager: + runtime: + name: containerd + address: unix:///run/containerd/containerd.sock + otlpEndpoint: "" # empty string disables OpenTelemetry + logLevel: info + profile: + enabled: false + port: 6060 + imageJob: + successRatio: 1.0 + cleanup: + delayOnSuccess: 0s + delayOnFailure: 24h + pullSecrets: [] # image pull secrets for collector/scanner/remover + priorityClassName: "" # priority class name for collector/scanner/remover + nodeFilter: + type: exclude # must be either exclude|include + selectors: + - eraser.sh/cleanup.filter + - kubernetes.io/os=windows +components: + remover: + image: + repo: ghcr.io/eraser-dev/remover + tag: v1.0.0 + request: + mem: 25Mi + cpu: 0 + limit: + mem: 30Mi + cpu: 1000m +``` + +## Component Options + +```yaml +components: + collector: + enabled: true + image: + repo: ghcr.io/eraser-dev/collector + tag: v1.0.0 + request: + mem: 25Mi + cpu: 7m + limit: + mem: 500Mi + cpu: 0 + scanner: + enabled: true + image: + repo: ghcr.io/eraser-dev/eraser-trivy-scanner + tag: v1.0.0 + request: + mem: 500Mi + cpu: 1000m + limit: + mem: 2Gi + cpu: 0 + config: | + # this is the schema for the provided 'trivy-scanner'. custom scanners + # will define their own configuration. see the below + remover: + image: + repo: ghcr.io/eraser-dev/remover + tag: v1.0.0 + request: + mem: 25Mi + cpu: 0 + limit: + mem: 30Mi + cpu: 1000m +``` + +## Scanner Options + +These options can be provided to `components.scanner.config`. They will be +passed through as a string to the scanner container and parsed there. If you +want to configure your own scanner, you must provide some way to parse this. + +Below are the values recognized by the provided `eraser-trivy-scanner` image. +Values provided below are the defaults. + +```yaml +cacheDir: /var/lib/trivy # The file path inside the container to store the cache +dbRepo: ghcr.io/aquasecurity/trivy-db # The container registry from which to fetch the trivy database +deleteFailedImages: true # if true, remove images for which scanning fails, regardless of why it failed +deleteEOLImages: true # if true, remove images that have reached their end-of-life date +vulnerabilities: + ignoreUnfixed: true # consider the image compliant if there are no known fixes for the vulnerabilities found. + types: # a list of vulnerability types. for more info, see trivy's documentation. + - os + - library + securityChecks: # see trivy's documentation for more information + - vuln + severities: # in this case, only flag images with CRITICAL vulnerability for removal + - CRITICAL + ignoredStatuses: # a list of trivy statuses to ignore. See https://aquasecurity.github.io/trivy/v0.44/docs/configuration/filtering/#by-status. +timeout: + total: 23h # if scanning isn't completed before this much time elapses, abort the whole scan + perImage: 1h # if scanning a single image exceeds this time, scanning will be aborted +``` + +## Detailed Options + +| Option | Description | Default | +| --- | --- | --- | +| manager.runtime.name | The runtime to use for the manager's containers. Must be one of containerd, crio, or dockershim. It is assumed that your nodes are all using the same runtime, and there is currently no way to configure multiple runtimes. | containerd | +| manager.runtime.address | The runtime socket address to use for the containers. Can provide a custom address for containerd and dockershim runtimes, but not for crio due to Trivy restrictions. | unix:///run/containerd/containerd.sock | +| manager.otlpEndpoint | The endpoint to send OpenTelemetry data to. If empty, data will not be sent. | "" | +| manager.logLevel | The log level for the manager's containers. Must be one of debug, info, warn, error, dpanic, panic, or fatal. | info | +| manager.scheduling.repeatInterval | Use only when collector ando/or scanner are enabled. This is like a cron job, and will spawn an _ImageJob_ at the interval provided. | 24h | +| manager.scheduling.beginImmediately | If set to true, the fist _ImageJob_ will run immediately. If false, the job will not be spawned until after the interval (above) has elapsed. | true | +| manager.profile.enabled | Whether to enable profiling for the manager's containers. This is for debugging with `go tool pprof`. | false | +| manager.profile.port | The port on which to expose the profiling endpoint. | 6060 | +| manager.imageJob.successRatio | The ratio of successful image jobs required before a cleanup is performed. | 1.0 | +| manager.imageJob.cleanup.delayOnSuccess | The amount of time to wait after a successful image job before performing cleanup. | 0s | +| manager.imageJob.cleanup.delayOnFailure | The amount of time to wait after a failed image job before performing cleanup. | 24h | +| manager.pullSecrets | The image pull secrets to use for collector, scanner, and remover containers. | [] | +| manager.priorityClassName | The priority class to use for collector, scanner, and remover containers. | "" | +| manager.nodeFilter.type | The type of node filter to use. Must be either "exclude" or "include". | exclude | +| manager.nodeFilter.selectors | A list of selectors used to filter nodes. | [] | +| components.collector.enabled | Whether to enable the collector component. | true | +| components.collector.image.repo | The repository containing the collector image. | ghcr.io/eraser-dev/collector | +| components.collector.image.tag | The tag of the collector image. | v1.0.0 | +| components.collector.request.mem | The amount of memory to request for the collector container. | 25Mi | +| components.collector.request.cpu | The amount of CPU to request for the collector container. | 7m | +| components.collector.limit.mem | The maximum amount of memory the collector container is allowed to use. | 500Mi | +| components.collector.limit.cpu | The maximum amount of CPU the collector container is allowed to use. | 0 | +| components.scanner.enabled | Whether to enable the scanner component. | true | +| components.scanner.image.repo | The repository containing the scanner image. | ghcr.io/eraser-dev/eraser-trivy-scanner | +| components.scanner.image.tag | The tag of the scanner image. | v1.0.0 | +| components.scanner.request.mem | The amount of memory to request for the scanner container. | 500Mi | +| components.scanner.request.cpu | The amount of CPU to request for the scanner container. | 1000m | +| components.scanner.limit.mem | The maximum amount of memory the scanner container is allowed to use. | 2Gi | +| components.scanner.limit.cpu | The maximum amount of CPU the scanner container is allowed to use. | 0 | +| components.scanner.config | The configuration to pass to the scanner container, as a YAML string. | See YAML below | +| components.remover.image.repo | The repository containing the remover image. | ghcr.io/eraser-dev/remover | +| components.remover.image.tag | The tag of the remover image. | v1.0.0 | +| components.remover.request.mem | The amount of memory to request for the remover container. | 25Mi | +| components.remover.request.cpu | The amount of CPU to request for the remover container. | 0 | diff --git a/docs/versioned_docs/version-v1.4.0-beta.0/exclusion.md b/docs/versioned_docs/version-v1.4.0-beta.0/exclusion.md new file mode 100644 index 0000000000..b43425d147 --- /dev/null +++ b/docs/versioned_docs/version-v1.4.0-beta.0/exclusion.md @@ -0,0 +1,25 @@ +--- +title: Exclusion +--- + +## Excluding registries, repositories, and images +Eraser can exclude registries (example, `docker.io/library/*`) and also specific images with a tag (example, `docker.io/library/ubuntu:18.04`) or digest (example, `sha256:80f31da1ac7b312ba29d65080fd...`) from its removal process. + +To exclude any images or registries from the removal, create configmap(s) with the label `eraser.sh/exclude.list=true` in the eraser-system namespace with a JSON file holding the excluded images. + +```bash +$ cat > sample.json <<"EOF" +{ + "excluded": [ + "docker.io/library/*", + "ghcr.io/eraser-dev/test:latest" + ] +} +EOF + +$ kubectl create configmap excluded --from-file=sample.json --namespace=eraser-system +$ kubectl label configmap excluded eraser.sh/exclude.list=true -n eraser-system +``` + +## Exempting Nodes from the Eraser Pipeline +Exempting nodes from cleanup was added in v1.0.0. When deploying Eraser, you can specify whether there is a list of nodes you would like to `include` or `exclude` from the cleanup process using the configmap. For more information, see the section on [customization](https://eraser-dev.github.io/eraser/docs/customization). diff --git a/docs/versioned_docs/version-v1.4.0-beta.0/faq.md b/docs/versioned_docs/version-v1.4.0-beta.0/faq.md new file mode 100644 index 0000000000..d0fb4f99e8 --- /dev/null +++ b/docs/versioned_docs/version-v1.4.0-beta.0/faq.md @@ -0,0 +1,12 @@ +--- +title: FAQ +--- +## Why am I still seeing vulnerable images? +Eraser currently targets **non-running** images, so any vulnerable images that are currently running will not be removed. In addition, the default vulnerability scanning with Trivy removes images with `CRITICAL` vulnerabilities. Any images with lower vulnerabilities will not be removed. This can be configured using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#scanner-options). + +## How is Eraser different from Kubernetes garbage collection? +The native garbage collection in Kubernetes works a bit differently than Eraser. By default, garbage collection begins when disk usage reaches 85%, and stops when it gets down to 80%. More details about Kubernetes garbage collection can be found in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/architecture/garbage-collection/), and configuration options can be found in the [Kubelet documentation](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/). + +There are a couple core benefits to using Eraser for image cleanup: +* Eraser can be configured to use image vulnerability data when making determinations on image removal +* By interfacing directly with the container runtime, Eraser can clean up images that are not managed by Kubelet and Kubernetes diff --git a/docs/versioned_docs/version-v1.4.0-beta.0/installation.md b/docs/versioned_docs/version-v1.4.0-beta.0/installation.md new file mode 100644 index 0000000000..dbf84f7645 --- /dev/null +++ b/docs/versioned_docs/version-v1.4.0-beta.0/installation.md @@ -0,0 +1,15 @@ +--- +title: Installation +--- + +## Manifest + +To install Eraser with the manifest file, run the following command: + +```bash +kubectl apply -f https://raw.githubusercontent.com/eraser-dev/eraser/v1.1.0-beta.0/deploy/eraser.yaml +``` + +## Helm + +If you'd like to install and manage Eraser with Helm, follow the install instructions [here](https://github.com/eraser-dev/eraser/blob/main/charts/eraser/README.md) diff --git a/docs/versioned_docs/version-v1.4.0-beta.0/introduction.md b/docs/versioned_docs/version-v1.4.0-beta.0/introduction.md new file mode 100644 index 0000000000..623ce143bb --- /dev/null +++ b/docs/versioned_docs/version-v1.4.0-beta.0/introduction.md @@ -0,0 +1,10 @@ +--- +title: Introduction +slug: / +--- + +# Introduction + +When deploying to Kubernetes, it's common for pipelines to build and push images to a cluster, but it's much less common for these images to be cleaned up. This can lead to accumulating bloat on the disk, and a host of non-compliant images lingering on the nodes. + +The current garbage collection process deletes images based on a percentage of load, but this process does not consider the vulnerability state of the images. **Eraser** aims to provide a simple way to determine the state of an image, and delete it if it meets the specified criteria. diff --git a/docs/versioned_docs/version-v1.4.0-beta.0/manual-removal.md b/docs/versioned_docs/version-v1.4.0-beta.0/manual-removal.md new file mode 100644 index 0000000000..3cb9d0ef51 --- /dev/null +++ b/docs/versioned_docs/version-v1.4.0-beta.0/manual-removal.md @@ -0,0 +1,59 @@ +--- +title: Manual Removal +--- + +Create an `ImageList` and specify the images you would like to remove. In this case, the image `docker.io/library/alpine:3.7.3` will be removed. + +```shell +cat < `ImageList` is a cluster-scoped resource and must be called imagelist. `"*"` can be specified to remove all non-running images instead of individual images. + +Creating an `ImageList` should trigger an `ImageJob` that will deploy Eraser pods on every node to perform the removal given the list of images. + +```shell +$ kubectl get pods -n eraser-system +eraser-system eraser-controller-manager-55d54c4fb6-dcglq 1/1 Running 0 9m8s +eraser-system eraser-kind-control-plane 1/1 Running 0 11s +eraser-system eraser-kind-worker 1/1 Running 0 11s +eraser-system eraser-kind-worker2 1/1 Running 0 11s +``` + +Pods will run to completion and the images will be removed. + +```shell +$ kubectl get pods -n eraser-system +eraser-system eraser-controller-manager-6d6d5594d4-phl2q 1/1 Running 0 4m16s +eraser-system eraser-kind-control-plane 0/1 Completed 0 22s +eraser-system eraser-kind-worker 0/1 Completed 0 22s +eraser-system eraser-kind-worker2 0/1 Completed 0 22s +``` + +The `ImageList` custom resource status field will contain the status of the last job. The success and failure counts indicate the number of nodes the Eraser agent was run on. + +```shell +$ kubectl describe ImageList imagelist +... +Status: + Failed: 0 + Success: 3 + Timestamp: 2022-02-25T23:41:55Z +... +``` + +Verify the unused images are removed. + +```shell +$ docker exec kind-worker ctr -n k8s.io images list | grep alpine +``` + +If the image has been successfully removed, there will be no output. diff --git a/docs/versioned_docs/version-v1.4.0-beta.0/metrics.md b/docs/versioned_docs/version-v1.4.0-beta.0/metrics.md new file mode 100644 index 0000000000..4de1b47f62 --- /dev/null +++ b/docs/versioned_docs/version-v1.4.0-beta.0/metrics.md @@ -0,0 +1,35 @@ +--- +title: Metrics +--- + +To view Eraser metrics, you will need to deploy an Open Telemetry collector in the 'eraser-system' namespace, and an exporter. An example collector with a Prometheus exporter is [otelcollector.yaml](https://github.com/eraser-dev/eraser/blob/main/test/e2e/test-data/otelcollector.yaml), and the endpoint can be specified using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#universal-options). In this example, we are logging the collected data to the otel-collector pod, and exporting metrics through Prometheus at 'http://localhost:8889/metrics', but a separate exporter can also be configured. + +Below is the list of metrics provided by Eraser per run: + +#### Eraser +```yaml +- count + - name: images_removed_run_total + - description: Total images removed by eraser +``` + + #### Scanner + ```yaml +- count + - name: vulnerable_images_run_total + - description: Total vulnerable images detected + ``` + + #### ImageJob + ```yaml + - count + - name: imagejob_run_total + - description: Total ImageJobs scheduled + - name: pods_completed_run_total + - description: Total pods completed + - name: pods_failed_run_total + - description: Total pods failed +- summary + - name: imagejob_duration_run_seconds + - description: Total time for ImageJobs scheduled to complete +``` diff --git a/docs/versioned_docs/version-v1.4.0-beta.0/quick-start.md b/docs/versioned_docs/version-v1.4.0-beta.0/quick-start.md new file mode 100644 index 0000000000..601e474830 --- /dev/null +++ b/docs/versioned_docs/version-v1.4.0-beta.0/quick-start.md @@ -0,0 +1,103 @@ +--- +title: Quick Start +--- + +This tutorial demonstrates the functionality of Eraser and validates that non-running images are removed succesfully. + +## Deploy a DaemonSet + +After following the [install instructions](installation.md), we'll apply a demo `DaemonSet`. For illustrative purposes, a DaemonSet is applied and deleted so the non-running images remain on all nodes. The alpine image with the `3.7.3` tag will be used in this example. This is an image with a known critical vulnerability. + +First, apply the `DaemonSet`: + +```shell +cat < 45m v1.24.0 +kind-worker2 Ready 44m v1.24.0 +``` + +List the images then filter for `alpine`: + +```shell +$ docker exec kind-worker ctr -n k8s.io images list | grep alpine +docker.io/library/alpine:3.7.3 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed +docker.io/library/alpine@sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 application/vnd.docker.distribution.manifest.list.v2+json sha256:8421d9a84432575381bfabd248f1eb56f3aa21d9d7cd2511583c68c9b7511d10 2.0 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm64/v8,linux/ppc64le,linux/s390x io.cri-containerd.image=managed + +``` + +## Automatically Cleaning Images + +After deploying Eraser, it will automatically clean images in a regular interval. This interval can be set using the `manager.scheduling.repeatInterval` setting in the [configmap](https://eraser-dev.github.io/eraser/docs/customization#detailed-options). The default interval is 24 hours (`24h`). Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + +Eraser will schedule eraser pods to each node in the cluster, and each pod will contain 3 containers: collector, scanner, and remover that will run to completion. + +```shell +$ kubectl get pods -n eraser-system +NAMESPACE NAME READY STATUS RESTARTS AGE +eraser-system eraser-kind-control-plane-sb789 0/3 Completed 0 26m +eraser-system eraser-kind-worker-j84hm 0/3 Completed 0 26m +eraser-system eraser-kind-worker2-4lbdr 0/3 Completed 0 26m +eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 26m +``` + +The collector container sends the list of all images to the scanner container, which scans and reports non-compliant images to the remover container for removal of images that are non-running. Once all pods are completed, they will be automatically cleaned up. + +> If you want to remove all the images periodically, you can skip the scanner container by setting the `components.scanner.enabled` value to `false` using the [configmap](https://eraser-dev.github.io/eraser/docs/customization#detailed-options). In this case, each collector pod will hold 2 containers: collector and remover. + +```shell +$ kubectl get pods -n eraser-system +NAMESPACE NAME READY STATUS RESTARTS AGE +eraser-system eraser-kind-control-plane-ksk2b 0/2 Completed 0 50s +eraser-system eraser-kind-worker-cpgqc 0/2 Completed 0 50s +eraser-system eraser-kind-worker2-k25df 0/2 Completed 0 50s +eraser-system eraser-controller-manager-86cdb4cbf9-x8d7q 1/1 Running 0 55s +``` diff --git a/docs/versioned_docs/version-v1.4.0-beta.0/release-management.md b/docs/versioned_docs/version-v1.4.0-beta.0/release-management.md new file mode 100644 index 0000000000..02fecdee4e --- /dev/null +++ b/docs/versioned_docs/version-v1.4.0-beta.0/release-management.md @@ -0,0 +1,67 @@ +# Release Management + +## Overview + +This document describes Eraser project release management, which includes release versioning, supported releases, and supported upgrades. + +## Legend + +- **X.Y.Z** refers to the version (git tag) of Eraser that is released. This is the version of the Eraser images and the Chart version. +- **Breaking changes** refer to schema changes, flag changes, and behavior changes of Eraser that may require a clean installation during upgrade, and it may introduce changes that could break backward compatibility. +- **Milestone** should be designed to include feature sets to accommodate 2 months release cycles including test gates. GitHub's milestones are used by maintainers to manage each release. PRs and Issues for each release should be created as part of a corresponding milestone. +- **Patch releases** refer to applicable fixes, including security fixes, may be backported to support releases, depending on severity and feasibility. +- **Test gates** should include soak tests and upgrade tests from the last minor version. + +## Release Versioning + +All releases will be of the form _vX.Y.Z_ where X is the major version, Y is the minor version and Z is the patch version. This project strictly follows semantic versioning. + +The rest of the doc will cover the release process for the following kinds of releases: + +**Major Releases** + +No plan to move to 2.0.0 unless there is a major design change like an incompatible API change in the project + +**Minor Releases** + +- X.Y.0-alpha.W, W >= 0 (Branch : main) + - Released as needed before we cut a beta X.Y release + - Alpha release, cut from master branch +- X.Y.0-beta.W, W >= 0 (Branch : main) + - Released as needed before we cut a stable X.Y release + - More stable than the alpha release to signal users to test things out + - Beta release, cut from master branch +- X.Y.0-rc.W, W >= 0 (Branch : main) + - Released as needed before we cut a stable X.Y release + - soak for ~ 2 weeks before cutting a stable release + - Release candidate release, cut from master branch +- X.Y.0 (Branch: main) + - Released every ~ 3 months + - Stable release, cut from master when X.Y milestone is complete + +**Patch Releases** + +- Patch Releases X.Y.Z, Z > 0 (Branch: release-X.Y, only cut when a patch is needed) + - No breaking changes + - Applicable fixes, including security fixes, may be cherry-picked from master into the latest supported minor release-X.Y branches. + - Patch release, cut from a release-X.Y branch + +## Supported Releases + +Applicable fixes, including security fixes, may be cherry-picked into the release branch, depending on severity and feasibility. Patch releases are cut from that branch as needed. + +We expect users to stay reasonably up-to-date with the versions of Eraser they use in production, but understand that it may take time to upgrade. We expect users to be running approximately the latest patch release of a given minor release and encourage users to upgrade as soon as possible. + +We expect to "support" n (current) and n-1 major.minor releases. "Support" means we expect users to be running that version in production. For example, when v1.2.0 comes out, v1.0.x will no longer be supported for patches, and we encourage users to upgrade to a supported version as soon as possible. + +## Supported Kubernetes Versions + +Eraser is assumed to be compatible with the [current Kubernetes Supported Versions](https://kubernetes.io/releases/patch-releases/#detailed-release-history-for-active-branches) per [Kubernetes Supported Versions policy](https://kubernetes.io/releases/version-skew-policy/). + +For example, if Eraser _supported_ versions are v1.2 and v1.1, and Kubernetes _supported_ versions are v1.22, v1.23, v1.24, then all supported Eraser versions (v1.2, v1.1) are assumed to be compatible with all supported Kubernetes versions (v1.22, v1.23, v1.24). If Kubernetes v1.25 is released later, then Eraser v1.2 and v1.1 will be assumed to be compatible with v1.25 if those Eraser versions are still supported at that time. + +If you choose to use Eraser with a version of Kubernetes that it does not support, you are using it at your own risk. + +## Acknowledgement + +This document builds on the ideas and implementations of release processes from projects like Kubernetes and Helm. \ No newline at end of file diff --git a/docs/versioned_docs/version-v1.4.0-beta.0/releasing.md b/docs/versioned_docs/version-v1.4.0-beta.0/releasing.md new file mode 100644 index 0000000000..67fc98fb08 --- /dev/null +++ b/docs/versioned_docs/version-v1.4.0-beta.0/releasing.md @@ -0,0 +1,32 @@ +--- +title: Releasing +--- + +## Create Release Pull Request + +1. Go to `create_release_pull_request` workflow under actions. +2. Select run workflow, and use the workflow from your branch. +3. Input release version with the semantic version identifying the release. +4. Click run workflow and review the PR created by github-actions. + +# Releasing + +5. Once the PR is merged to `main`, tag that commit with release version and push tags to remote repository. + + ``` + git checkout + git pull origin + git tag -a -m '' + git push origin + ``` +6. Pushing the release tag will trigger GitHub Actions to trigger `release` job. + This will build the `ghcr.io/eraser-dev/remover`, `ghcr.io/eraser-dev/eraser-manager`, `ghcr.io/eraser-dev/collector`, and `ghcr.io/eraser-dev/eraser-trivy-scanner` images automatically, then publish the new release tag. + +## Publishing + +1. GitHub Action will create a new release, review and edit it at https://github.com/eraser-dev/eraser/releases + +## Notifying + +1. Send an email to the [Eraser mailing list](https://groups.google.com/g/eraser-dev) announcing the release, with links to GitHub. +2. Post a message on the [Eraser Slack channel](https://kubernetes.slack.com/archives/C03Q8KV8YQ4) with the same information. \ No newline at end of file diff --git a/docs/versioned_docs/version-v1.4.0-beta.0/setup.md b/docs/versioned_docs/version-v1.4.0-beta.0/setup.md new file mode 100644 index 0000000000..2fdfa7d412 --- /dev/null +++ b/docs/versioned_docs/version-v1.4.0-beta.0/setup.md @@ -0,0 +1,299 @@ +--- +title: Setup +--- + +# Development Setup + +This document describes the steps to get started with development. +You can either utilize [Codespaces](https://docs.github.com/en/codespaces/overview) or setup a local environment. + +## Local Setup + +### Prerequisites: + +- [go](https://go.dev/) with version 1.17 or later. +- [docker](https://docs.docker.com/get-docker/) +- [kind](https://kind.sigs.k8s.io/) +- `make` + +### Get things running + +- Get dependencies with `go get` + +- This project uses `make`. You can utilize `make help` to see available targets. For local deployment make targets help to build, test and deploy. + +### Making changes + +Please refer to [Development Reference](#development-reference) for more details on the specific commands. + +To test your changes on a cluster: + +```bash +# generate necessary api files (optional - only needed if changes to api folder). +make generate + +# build applicable images +make docker-build-manager MANAGER_IMG=eraser-manager:dev +make docker-build-remover REMOVER_IMG=remover:dev +make docker-build-collector COLLECTOR_IMG=collector:dev +make docker-build-trivy-scanner TRIVY_SCANNER_IMG=eraser-trivy-scanner:dev + +# make sure updated image is present on cluster (e.g., see kind example below) +kind load docker-image \ + eraser-manager:dev \ + eraser-trivy-scanner:dev \ + remover:dev \ + collector:dev + +make manifests +make deploy + +# to remove the deployment +make undeploy +``` + +To test your changes to manager locally: + +```bash +make run +``` + +Example Output: + +``` +you@local:~/eraser$ make run +docker build . \ + -t eraser-tooling \ + -f build/tooling/Dockerfile +[+] Building 7.8s (8/8) FINISHED + => => naming to docker.io/library/eraser-tooling 0.0s +docker run -v /home/eraser/config:/config -w /config/manager \ + registry.k8s.io/kustomize/kustomize:v3.8.9 edit set image controller=eraser-manager:dev +docker run -v /home/eraser:/eraser eraser-tooling controller-gen \ + crd \ + rbac:roleName=manager-role \ + webhook \ + paths="./..." \ + output:crd:artifacts:config=config/crd/bases +rm -rf manifest_staging +mkdir -p manifest_staging/deploy +docker run --rm -v /home/eraser:/eraser \ + registry.k8s.io/kustomize/kustomize:v3.8.9 build \ + /eraser/config/default -o /eraser/manifest_staging/deploy/eraser.yaml +docker run -v /home/eraser:/eraser eraser-tooling controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..." +go fmt ./... +go vet ./... +go run ./main.go +{"level":"info","ts":1652985685.1663408,"logger":"controller-runtime.metrics","msg":"Metrics server is starting to listen","addr":":8080"} +... +``` + +## Development Reference + +Eraser is using tooling from [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder). For Eraser this tooling is containerized into the `eraser-tooling` image. The `make` targets can use this tooling and build the image when necessary. + +You can override the default configuration using environment variables. Below you can find a reference of targets and configuration options. + +### Common Configuration + +| Environment Variable | Description | +| -------------------- | --------------------------------------------------------------------------------------------- | +| VERSION | Specifies the version (i.e., the image tag) of eraser to be used. | +| MANAGER_IMG | Defines the image url for the Eraser manager. Used for tagging, pulling and pushing the image | +| REMOVER_IMG | Defines the image url for the Eraser. Used for tagging, pulling and pushing the image | +| COLLECTOR_IMG | Defines the image url for the Collector. Used for tagging, pulling and pushing the image | + +### Linting + +- `make lint` + +Lints the go code. + +| Environment Variable | Description | +| -------------------- | ------------------------------------------------------- | +| GOLANGCI_LINT | Specifies the go linting binary to be used for linting. | + +### Development + +- `make generate` + +Generates necessary files for the k8s api stored under `api/v1alpha1/zz_generated.deepcopy.go`. See the [kubebuilder docs](https://book.kubebuilder.io/cronjob-tutorial/other-api-files.html) for details. + +- `make manifests` + +Generates the eraser deployment yaml files under `manifest_staging/deploy`. + +Configuration Options: + +| Environment Variable | Description | +| -------------------- | -------------------------------------------------- | +| REMOVER_IMG | Defines the image url for the Eraser. | +| MANAGER_IMG | Defines the image url for the Eraser manager. | +| KUSTOMIZE_VERSION | Define Kustomize version for generating manifests. | + +- `make test` + +Runs the unit tests for the eraser project. + +Configuration Options: + +| Environment Variable | Description | +| -------------------- | ----------------------------------------------------------- | +| ENVTEST | Specifies the envtest setup binary. | +| ENVTEST_K8S_VERSION | Specifies the Kubernetes version for envtest setup command. | + +- `make e2e-test` + +Runs e2e tests on a cluster. + +Configuration Options: + +| Environment Variable | Description | +| -------------------- | ------------------------------------------------------------------------------------------------------------- | +| REMOVER_IMG | Eraser image to be used for e2e test. | +| MANAGER_IMG | Eraser manager image to be used for e2e test. | +| KUBERNETES_VERSION | Kubernetes version for e2e test. | +| TEST_COUNT | Sets repetition for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. | +| TIMEOUT | Sets timeout for test. Please refer to [go docs](https://pkg.go.dev/cmd/go#hdr-Testing_flags) for details. | +| TESTFLAGS | Sets additional test flags | + +### Build + +- `make build` + +Builds the eraser manager binaries. + +- `make run` + +Runs the eraser manager on your local machine. + +- `make docker-build-manager` + +Builds the docker image for the eraser manager. + +Configuration Options: + +| Environment Variable | Description | +| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). | +| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). | +| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). | +| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). | +| MANAGER_IMG | Specifies the target repository, image name and tag for building image. | + +- `make docker-push-manager` + +Builds the docker image for the eraser manager. + +Configuration Options: + +| Environment Variable | Description | +| -------------------- | ----------------------------------------------------------------------- | +| MANAGER_IMG | Specifies the target repository, image name and tag for building image. | + +- `make docker-build-remover` + +Builds the docker image for eraser remover. + +Configuration Options: + +| Environment Variable | Description | +| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). | +| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). | +| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). | +| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). | +| REMOVER_IMG | Specifies the target repository, image name and tag for building image. | + +- `make docker-push-remover` + +Builds the docker image for the eraser remover. + +Configuration Options: + +| Environment Variable | Description | +| -------------------- | ----------------------------------------------------------------------- | +| REMOVER_IMG | Specifies the target repository, image name and tag for building image. | + +- `make docker-build-collector` + +Builds the docker image for the eraser collector. + +Configuration Options: + +| Environment Variable | Description | +| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | +| CACHE_FROM | Sets the target of the buildx --cache-from flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-from). | +| CACHE_TO | Sets the target of the buildx --cache-to flag [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#cache-to). | +| PLATFORM | Sets the target platform for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#platform). | +| OUTPUT_TYPE | Sets the output for buildx [see buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_build/#output). | +| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. | + +- `make docker-push-collector` + +Builds the docker image for the eraser collector. + +Configuration Options: + +| Environment Variable | Description | +| -------------------- | ----------------------------------------------------------------------- | +| COLLECTOR_IMG | Specifies the target repository, image name and tag for building image. | + +### Deployment + +- `make install` + +Install CRDs into the K8s cluster specified in ~/.kube/config. + +Configuration Options: + +| Environment Variable | Description | +| -------------------- | ---------------------------------------------------------------- | +| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. | + +- `make uninstall` + +Uninstall CRDs from the K8s cluster specified in ~/.kube/config. + +Configuration Options: + +| Environment Variable | Description | +| -------------------- | ---------------------------------------------------------------- | +| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. | + +- `make deploy` + +Deploys eraser to the cluster specified in ~/.kube/config. + +Configuration Options: + +| Environment Variable | Description | +| -------------------- | -------------------------------------------------------------------- | +| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources for deployment. | +| MANAGER_IMG | Specifies the eraser manager image version to be used for deployment | + +- `make undeploy` + +Undeploy controller from the K8s cluster specified in ~/.kube/config. + +Configuration Options: + +| Environment Variable | Description | +| -------------------- | ------------------------------------------------------------------------- | +| KUSTOMIZE_VERSION | Kustomize version used to generate k8s resources that need to be removed. | + +### Release + +- `make release-manifest` + +Generates k8s manifests files for a release. + +Configuration Options: + +| Environment Variable | Description | +| -------------------- | ------------------------------------ | +| NEWVERSION | Sets the new version in the Makefile | + +- `make promote-staging-manifest` + +Promotes the k8s deployment yaml files to release. diff --git a/docs/versioned_docs/version-v1.4.0-beta.0/trivy.md b/docs/versioned_docs/version-v1.4.0-beta.0/trivy.md new file mode 100644 index 0000000000..d19e9777db --- /dev/null +++ b/docs/versioned_docs/version-v1.4.0-beta.0/trivy.md @@ -0,0 +1,6 @@ +--- +title: Trivy +--- + +## Trivy Provider Options +The Trivy provider is used in Eraser for image scanning and detecting vulnerabilities. See [Customization](https://eraser-dev.github.io/eraser/docs/customization#scanner-options) for more details on configuring the scanner. diff --git a/docs/versioned_sidebars/version-v1.4.0-beta.0-sidebars.json b/docs/versioned_sidebars/version-v1.4.0-beta.0-sidebars.json new file mode 100644 index 0000000000..ec01a569c5 --- /dev/null +++ b/docs/versioned_sidebars/version-v1.4.0-beta.0-sidebars.json @@ -0,0 +1,43 @@ +{ + "sidebar": [ + "introduction", + "installation", + "quick-start", + "architecture", + { + "type": "category", + "label": "Topics", + "collapsible": true, + "collapsed": false, + "items": [ + "manual-removal", + "exclusion", + "customization", + "metrics" + ] + }, + { + "type": "category", + "label": "Development", + "collapsible": true, + "collapsed": false, + "items": [ + "setup", + "releasing" + ] + }, + { + "type": "category", + "label": "Scanning", + "collapsible": true, + "collapsed": false, + "items": [ + "custom-scanner", + "trivy" + ] + }, + "faq", + "contributing", + "code-of-conduct" + ] +} diff --git a/docs/versions.json b/docs/versions.json index c094f2ab68..e7295a559a 100644 --- a/docs/versions.json +++ b/docs/versions.json @@ -1,4 +1,5 @@ [ + "v1.4.0-beta.0", "v1.2.x", "v1.1.x", "v1.0.x", diff --git a/manifest_staging/charts/eraser/Chart.yaml b/manifest_staging/charts/eraser/Chart.yaml index e07afb0d12..16a9923ecf 100644 --- a/manifest_staging/charts/eraser/Chart.yaml +++ b/manifest_staging/charts/eraser/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: eraser description: A Helm chart for Eraser type: application -version: 1.3.0-beta.0 -appVersion: v1.3.0-beta.0 +version: 1.4.0-beta.0 +appVersion: v1.4.0-beta.0 home: https://github.com/eraser-dev/eraser sources: - https://github.com/eraser-dev/eraser.git diff --git a/manifest_staging/charts/eraser/values.yaml b/manifest_staging/charts/eraser/values.yaml index 6053e45f96..484d3ca3c2 100644 --- a/manifest_staging/charts/eraser/values.yaml +++ b/manifest_staging/charts/eraser/values.yaml @@ -39,7 +39,7 @@ runtimeConfig: enabled: true image: # repo: "" - tag: "v1.3.0-beta.0" + tag: "v1.4.0-beta.0" request: {} # mem: "" # cpu: "" @@ -50,7 +50,7 @@ runtimeConfig: enabled: true image: # repo: "" - tag: "v1.3.0-beta.0" + tag: "v1.4.0-beta.0" request: {} # mem: "" # cpu: "" @@ -81,7 +81,7 @@ runtimeConfig: remover: image: # repo: "" - tag: "v1.3.0-beta.0" + tag: "v1.4.0-beta.0" request: {} # mem: "" # cpu: "" @@ -94,7 +94,7 @@ deploy: repo: ghcr.io/eraser-dev/eraser-manager pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: "v1.3.0-beta.0" + tag: "v1.4.0-beta.0" additionalArgs: [] priorityClassName: "" diff --git a/manifest_staging/deploy/eraser.yaml b/manifest_staging/deploy/eraser.yaml index 90fe701322..f79dc8a462 100644 --- a/manifest_staging/deploy/eraser.yaml +++ b/manifest_staging/deploy/eraser.yaml @@ -413,7 +413,7 @@ data: enabled: true image: repo: ghcr.io/eraser-dev/collector - tag: v1.3.0-beta.0 + tag: v1.4.0-beta.0 request: mem: 25Mi cpu: 7m @@ -425,7 +425,7 @@ data: enabled: true image: repo: ghcr.io/eraser-dev/eraser-trivy-scanner # supply custom image for custom scanner - tag: v1.3.0-beta.0 + tag: v1.4.0-beta.0 request: mem: 500Mi cpu: 1000m @@ -462,7 +462,7 @@ data: remover: image: repo: ghcr.io/eraser-dev/remover - tag: v1.3.0-beta.0 + tag: v1.4.0-beta.0 request: mem: 25Mi # https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#how-pods-with-resource-limits-are-run @@ -505,7 +505,7 @@ spec: fieldPath: metadata.namespace - name: OTEL_SERVICE_NAME value: eraser-manager - image: ghcr.io/eraser-dev/eraser-manager:v1.3.0-beta.0 + image: ghcr.io/eraser-dev/eraser-manager:v1.4.0-beta.0 livenessProbe: httpGet: path: /healthz diff --git a/third_party/open-policy-agent/gatekeeper/helmify/static/Chart.yaml b/third_party/open-policy-agent/gatekeeper/helmify/static/Chart.yaml index e07afb0d12..16a9923ecf 100644 --- a/third_party/open-policy-agent/gatekeeper/helmify/static/Chart.yaml +++ b/third_party/open-policy-agent/gatekeeper/helmify/static/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: eraser description: A Helm chart for Eraser type: application -version: 1.3.0-beta.0 -appVersion: v1.3.0-beta.0 +version: 1.4.0-beta.0 +appVersion: v1.4.0-beta.0 home: https://github.com/eraser-dev/eraser sources: - https://github.com/eraser-dev/eraser.git diff --git a/third_party/open-policy-agent/gatekeeper/helmify/static/values.yaml b/third_party/open-policy-agent/gatekeeper/helmify/static/values.yaml index 6053e45f96..484d3ca3c2 100644 --- a/third_party/open-policy-agent/gatekeeper/helmify/static/values.yaml +++ b/third_party/open-policy-agent/gatekeeper/helmify/static/values.yaml @@ -39,7 +39,7 @@ runtimeConfig: enabled: true image: # repo: "" - tag: "v1.3.0-beta.0" + tag: "v1.4.0-beta.0" request: {} # mem: "" # cpu: "" @@ -50,7 +50,7 @@ runtimeConfig: enabled: true image: # repo: "" - tag: "v1.3.0-beta.0" + tag: "v1.4.0-beta.0" request: {} # mem: "" # cpu: "" @@ -81,7 +81,7 @@ runtimeConfig: remover: image: # repo: "" - tag: "v1.3.0-beta.0" + tag: "v1.4.0-beta.0" request: {} # mem: "" # cpu: "" @@ -94,7 +94,7 @@ deploy: repo: ghcr.io/eraser-dev/eraser-manager pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: "v1.3.0-beta.0" + tag: "v1.4.0-beta.0" additionalArgs: [] priorityClassName: ""