diff --git a/pkg/buildinfo/version.go b/pkg/buildinfo/version.go index 3324e382c..0736bef7e 100644 --- a/pkg/buildinfo/version.go +++ b/pkg/buildinfo/version.go @@ -21,7 +21,7 @@ package buildinfo // Version is the current version of Sonobuoy, set by the go linker's -X flag at build time. // Ensure that this is bumped manually as well so that Sonobuoy, as a library, also has the right version. -var Version = "v0.56.4" +var Version = "v0.56.5" // GitSHA is the actual commit that is being built, set by the go linker's -X flag at build time. var GitSHA string diff --git a/site/config.yaml b/site/config.yaml index a63a55cfd..23e4166bf 100644 --- a/site/config.yaml +++ b/site/config.yaml @@ -32,9 +32,10 @@ params: docs_search_index_name: index_name docs_search_api_key: api_key docs_versioning: true - docs_latest: v0.56.4 + docs_latest: v0.56.5 docs_versions: - main + - v0.56.5 - v0.56.4 - v0.56.3 - v0.56.2 diff --git a/site/content/docs/v0.56.5/_index.md b/site/content/docs/v0.56.5/_index.md new file mode 100644 index 000000000..259e68248 --- /dev/null +++ b/site/content/docs/v0.56.5/_index.md @@ -0,0 +1,266 @@ +--- +version: v0.56.5 +cascade: + layout: docs + gh: https://github.com/vmware-tanzu/sonobuoy/tree/v0.56.5 +--- +# ![Sonobuoy Logo](img/sonobuoy-logo.png) + +[![Test](https://github.com/vmware-tanzu/sonobuoy/actions/workflows/ci-test.yaml/badge.svg)](https://github.com/vmware-tanzu/sonobuoy/actions/workflows/ci-test.yaml/badge.svg) +[![Lint](https://github.com/vmware-tanzu/sonobuoy/actions/workflows/ci-lint.yaml/badge.svg)](https://github.com/vmware-tanzu/sonobuoy/actions/workflows/ci-lint.yaml/badge.svg) + +## [Overview][oview] + +Sonobuoy is a diagnostic tool that makes it easier to understand the state of a Kubernetes cluster by running a set of +plugins (including [Kubernetes][k8s] conformance tests) in an accessible and non-destructive manner. It is a +customizable, extendable, and cluster-agnostic way to generate clear, informative reports about your cluster. + +Its selective data dumps of Kubernetes resource objects and cluster nodes allow for the following use cases: + +* Integrated end-to-end (e2e) [conformance-testing][e2ePlugin] +* Workload debugging +* Custom data collection via extensible plugins + +Starting v0.20, Sonobuoy supports Kubernetes v1.17 or later. Sonobuoy releases will be independent of Kubernetes +release, while ensuring that new releases continue to work functionally across different versions of Kubernetes. Read +more about the new release cycles in [our blog][decoupling-sonobuoy-k8s]. + +> Note: You can skip this version enforcement by running Sonobuoy with the `--skip-preflight` flag. + +## Prerequisites + +* Access to an up-and-running Kubernetes cluster. If you do not have a cluster, we recommend either: + * following the [AWS Quickstart for Kubernetes][quickstart] instructions. + * setting up a local cluster using [KinD][kind] + +* An admin `kubeconfig` file, and the KUBECONFIG environment variable set. + +* For some advanced workflows it may be required to have `kubectl` installed. + See [installing via Homebrew (MacOS)][brew] or [building the binary (Linux)][linux]. + +* The `sonobuoy images` subcommand requires [Docker](https://www.docker.com) to be installed. + See [installing Docker][docker]. + +## Installation + +1. Download the [latest release][releases] for your client platform. +2. Extract the tarball: + + ``` + tar -xvf .tar.gz + ``` + + Move the extracted `sonobuoy` executable to somewhere on your `PATH`. + +## Getting Started + +To launch conformance tests (ensuring [CNCF][cncf] conformance) and wait until they are finished run: + +```bash +sonobuoy run --wait +``` + +> Note: Using `--mode quick` will significantly shorten the runtime of Sonobuoy. It runs just a single test, helping to quickly validate your Sonobuoy and Kubernetes configuration. + +Get the results from the plugins (e.g. e2e test results): + +```bash +results=$(sonobuoy retrieve) +``` + +Inspect results for test failures. This will list the number of tests failed and their names: + +```bash +sonobuoy results $results +``` + +> Note: The `results` command has lots of useful options for various situations. See the [results page][results] for more details. + +You can also extract the entire contents of the file to get much more [detailed data][snapshot] about your cluster. + +Sonobuoy creates a few resources in order to run and expects to run within its own namespace. + +Deleting Sonobuoy entails removing its namespace as well as a few cluster scoped resources. + +```bash +sonobuoy delete --wait +``` + +> Note: The --wait option ensures the Kubernetes namespace is deleted, avoiding conflicts if another Sonobuoy run is started quickly. + +If you have an issue with permissions in your cluster but you still want to run Sonobuoy, you can use `--aggregator-permissions` flag. Read more details about it [here][aggregator-permissions]. + +### Other Tests + +By default, `sonobuoy run` runs the Kubernetes conformance tests but this can easily be configured. The same plugin that +has the conformance tests has all the Kubernetes end-to-end tests which include other tests such as: + +* tests for specific storage features +* performance tests +* scaling tests +* provider specific tests +* and many more + +To modify which tests you want to run, checkout our page on the [e2e plugin][e2ePlugin]. + +If you want to run other tests or tools which are not a part of the Kubernetes end-to-end suite, refer to our +documentation on [custom plugins][customPlugins]. + +### Monitoring Sonobuoy during a run + +You can check on the status of each of the plugins running with: + +```bash +sonobuoy status +``` + +You can also inspect the logs of all Sonobuoy containers: + +```bash +sonobuoy logs +``` + +## Troubleshooting + +If you encounter any problems that the documentation does not address, [file an issue][issue]. + +## Docker Hub rate limit + +This year, Docker has started rate limiting image pulls from Docker Hub. We're planning a future release with a better +user interface to work around this. Until then, this is the recommended approach. + +### Sonobuoy Pod + +Sonobuoy by default pulls from Docker Hub for [`sonobuoy/sonobuoy` image](https://hub.docker.com/r/sonobuoy/sonobuoy). +If you're encountering rate limit on this, you can use VMware-provided mirror with: + +```bash +sonobuoy run --sonobuoy-image projects.registry.vmware.com/sonobuoy/sonobuoy: +``` + +### Conformance + +Kubernetes end-to-end conformance test pulls several images from Docker Hub as part of testing. To override this, you +will need to create a registry manifest file locally (e.g. `conformance-image-config.yaml`) containing the following: + +```yaml +dockerLibraryRegistry: mirror.gcr.io/library +``` + +Then on running conformance: + +```bash +sonobuoy run --sonobuoy-image projects.registry.vmware.com/sonobuoy/sonobuoy: --e2e-repo-config conformance-image-config.yaml +``` + +Technically `dockerGluster` is also a registry pulling from Docker Hub, but it's not part of Conformance test suite at +the moment, so overriding `dockerLibraryRegistry` should be enough. + +## Known Issues + +### Leaked End-to-end namespaces + +There are some Kubernetes e2e tests that may leak resources. Sonobuoy can help clean those up as well by deleting all +namespaces prefixed with `e2e`: + +```bash +sonobuoy delete --all +``` + +### Run on Google Cloud Platform (GCP) + +Sonobuoy requires admin permissions which won't be automatic if you are running via Google Kubernetes Engine (GKE) +cluster. You must first create an admin role for the user under which you run Sonobuoy: + +```bash +kubectl create clusterrolebinding --clusterrole=cluster-admin --user= +``` + +### Run on Kubernetes for Docker Desktop + +We don't recommend running via a cluster set up via Docker Desktop. Known issues include: + +- `kubectl logs` will not function +- `sonobuoy logs` will not function +- `sonobuoy retrieve` will not function +- `systemd-logs` plugin will hang + +Most of these issues revolve around issues with kube-proxy on Docker Desktop so if you know of how to resolve these +issues, let us know. + +### Certified-Conformance bug (versions v0.53.0 and v0.53.1) + +These versions of Sonobuoy have a bug that runs the wrong set of tests without additional actions. See more +details [here][issue1388]. The simplest way to avoid this is to update your version of Sonobuoy to >= v0.53.2. + +## Strategy Document + +See our current [strategy document][strategy] for context on what our highest priority use cases and work items +will be. Feel free to make comments on Github or start conversations in Slack. + +## Contributing + +Thanks for taking the time to join our community and start contributing! We welcome pull requests. Feel free to dig +through the [issues][issue] and jump in. + +### Before you start + +* Please familiarize yourself with the [Code of Conduct][coc] before contributing. +* See [CONTRIBUTING.md][contrib] for instructions on the developer certificate of origin that we require. +* There is a [Slack channel][slack] if you want to interact with other members of the community + +## Changelog + +See [the list of releases][releases] to find out about feature changes. + +[decoupling-sonobuoy-k8s]: https://sonobuoy.io/decoupling-sonobuoy-and-kubernetes + +[airgap]: airgap + +[brew]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-with-homebrew-on-macos + +[cncf]: https://github.com/cncf/k8s-conformance#certified-kubernetes + +[coc]: https://github.com/vmware-tanzu/sonobuoy/blob/main/CODE_OF_CONDUCT.md + +[contrib]: https://github.com/vmware-tanzu/sonobuoy/blob/main/CONTRIBUTING.md + +[docker]: https://docs.docker.com/get-docker/ + +[docs]: https://sonobuoy.io/docs/v0.56.5 + +[e2ePlugin]: e2eplugin + +[customPlugins]: plugins + +[gen]: gen + +[issue]: https://github.com/vmware-tanzu/sonobuoy/issues + +[issue1388]: issue1388 + +[k8s]: https://github.com/kubernetes/kubernetes + +[kind]: https://github.com/kubernetes-sigs/kind + +[linux]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#tabset-1 + +[oview]: https://youtu.be/8QK-Hg2yUd4 + +[plugins]: plugins + +[quickstart]: https://aws.amazon.com/quickstart/architecture/vmware-kubernetes/ + +[releases]: https://github.com/vmware-tanzu/sonobuoy/releases + +[results]: results + +[slack]: https://kubernetes.slack.com/messages/sonobuoy + +[snapshot]:snapshot + +[sonobuoyconfig]: sonobuoy-config + +[strategy]: strategy + +[aggregator-permissions]: aggregator-permissions diff --git a/site/content/docs/v0.56.5/aggregator-permissions.md b/site/content/docs/v0.56.5/aggregator-permissions.md new file mode 100644 index 000000000..b9e2c28fb --- /dev/null +++ b/site/content/docs/v0.56.5/aggregator-permissions.md @@ -0,0 +1,25 @@ +# Aggregator Permissions + +By default, the Sonobuoy aggregator is given very elevated permissions in order to successfully run the Kubernetes end-to-end tests. In some situations you may want to (or need to) limit the permissions of the aggregator so that the aggregator and the pods that it creates do not have such wide-reaching permissions. You can always customize the exact permissions of the ServiceAccount via editing `sonobuoy gen` output manually, but Sonobuoy also provides useful presets via the CLI flag, `--aggregator-permissions`. + +## Type of Aggregator Permissions + +Allowable values are `[namespaced, clusterAdmin, clusterRead]`, `clusterAdmin` is default value. + +### clusterAdmin + +- `clusterAdmin` is the default value. With this value Sonobuoy can do pretty much everything in the run, it does not implement any restrictions. Most of these are required for the e2e conformance tests to work since they create/destroy namespaces, pods etc. + +### namespaceAdmin + +namespaceAdmin is the most restrictive preset permissions Sonobuoy provides and ensures that Sonobuoy and its plugins do not impact other namespaces at all. + +Due to these limitations there are a number of things to note: + - Sonobuoy does not create the namespace so it needs to already exist + - You must provide `--skip-preflight` to avoid Sonobuoy from complaining about the preexisting namespace + - The `e2e` plugin (conformance tests) will not work in this mode and won't even start up due to severely limited permissions + - Daemonset plugins will not work in this mode because Sonobuoy monitors them on a per-node basis. Since Sonobuoy can't query the list of nodes in the cluster, it can't properly monitor or gather results from them. At this time, Daemonset plugins will simply be ignored. + +### clusterRead + +`clusterRead` is a compromise between `namespaceAdmin` and `clusterAdmin`. It adds ability to GET any resource from the API so that the Sonobuoy queries work OK, it is able to get nodes so daemonsets run fine, and e2e tests can technically start. Sonobuoy can't create namespaces so e2e tests can't run in this mode in any useful manner either. However, this may be a more reasonable mode to run less intrusive, custom plugins in. In this mode Sonobuoy don't create the namespace either so it has to be created first and sonobuoy run with the `--skip-preflight` flag. diff --git a/site/content/docs/v0.56.5/airgap.md b/site/content/docs/v0.56.5/airgap.md new file mode 100644 index 000000000..76675d6f5 --- /dev/null +++ b/site/content/docs/v0.56.5/airgap.md @@ -0,0 +1,132 @@ +# Custom registries and air-gapped testing + +In air-gapped deployments where there is no access to the public Docker registries Sonobuoy supports running the end-to-end tests with custom registries. +This enables you to test your air-gapped deployment once you've loaded the necessary images into a registry that is reachable by your cluster. + +You will need to make the Sonobuoy image available as well as the images for any plugins you wish to run. +Below, you will find the details of how to use the Sonobuoy image, as well as the images for the `e2e` and `systemd-logs` plugins in this kind of deployment. + +## Sonobuoy Image +To run any Sonobuoy plugin in an air-gapped deployment, you must ensure that the Sonobuoy image is available in a registry that is reachable by your cluster. +You will need to pull, tag, and then push the image as follows: + +``` +PRIVATE_REG= +SONOBUOY_VERSION= + +docker pull sonobuoy/sonobuoy:$SONOBUOY_VERSION +docker tag sonobuoy/sonobuoy:$SONOBUOY_VERSION $PRIVATE_REG/sonobuoy:$SONOBUOY_VERSION +docker push $PRIVATE_REG/sonobuoy:$SONOBUOY_VERSION +``` + +By default, Sonobuoy will attempt to use the image available in the public registry. +To use the image in your own registry, you will need to override it when using the `gen` or `run` command with the `--sonobuoy-image` flag as follows: + +``` +sonobuoy run --sonobuoy-image $PRIVATE_REG/sonobuoy:$SONOBUOY_VERSION +``` + +## E2E Plugin + +To use the `e2e` plugin, the conformance test image and the images the tests use must be available in your registry. + +### Conformance Image +The process for making the conformance image available in your registry is the same as the Sonobuoy image. +You need to pull, tag, and then push the image. +To ensure you use the correct version of the conformance image, check your server version using `kubectl version`. + + +``` +PRIVATE_REG= +CLUSTER_VERSION= + +docker pull k8s.gcr.io/conformance:$CLUSTER_VERSION +docker tag k8s.gcr.io/conformance:$CLUSTER_VERSION $PRIVATE_REG/conformance:$CLUSTER_VERSION +docker push $PRIVATE_REG/conformance:$CLUSTER_VERSION +``` + +To use the conformance image in your registry, you will need to override the default when using the `gen` or `run` commands with the `--kube-conformance-image` flag as follows: + +``` +sonobuoy run --kube-conformance-image $PRIVATE_REG/conformance:$CLUSTER_VERSION +``` + +### Test Images + +The end-to-end tests use a number of different images across multiple registries. +When running the `e2e` plugin, you must provide a mapping that details which custom registries should be used instead of the public registries. + +If you need only a single, custom registry, use the `--e2e-repo` flag to specify that all test registry should be set to the same, given value. + +If you need multiple registries, you must provide a mapping that the upstream Kubernetes tests understand. It is provided via a YAML file which maps the registry category to the corresponding registry URL. +The keys in this file are specified in the Kubernetes test framework. +The tests for each minor version of Kubernetes use a different set of registries so the mapping you create will depend on which Kubernetes version you are testing against. + +To create this mapping, you can use the `gen default-image-config` command to provide the mapping with the default registry values for your cluster version. +The following is an example of using this command with a v1.16 cluster: + +``` +$ sonobuoy gen default-image-config +dockerLibraryRegistry: docker.io/library +e2eRegistry: gcr.io/kubernetes-e2e-test-images +gcRegistry: k8s.gcr.io +googleContainerRegistry: gcr.io/google-containers +sampleRegistry: gcr.io/google-samples +``` + +You can save this output to a file and modify it to specify your own registries instead. +You can modify all of the registry values or just a subset. +If you specify only a subset, the defaults will be used instead. + +Sonobuoy provides the command `images` to help you easily pull the test images and push them to your own custom registries. +First, you must pull the images to your local machine using the following command: + +``` +sonobuoy images pull +``` + +> **NOTE:** Some versions of Kubernetes reference images that do not exist or cannot be pulled without authentication. +> You may see these errors when running the above command. This is expected behaviour. +> These images are referenced by some end-to-end tests, but **not** by the conformance tests. + +To push the images, you must provide the mapping using the `--e2e-repo` or `--e2e-repo-config` flag as follows: + +``` +sonobuoy images push --e2e-repo +// OR +sonobuoy images push --e2e-repo-config +``` + +If you are pushing to a single registry; use the first flag; if you need fine-grained controle +over which images go to which registry, use the second. + +When running the `e2e` plugin, you will need to provide this information using the same flag as follows: + +``` +sonobuoy run --e2e-repo +// OR +sonobuoy run --e2e-repo-config +``` + +## systemd-logs plugin + +If you want to run the `systemd-logs` plugin you will again need to pull, tag, and push the image. + + +``` +PRIVATE_REG= + +docker pull gcr.io/heptio-images/sonobuoy-plugin-systemd-logs:latest +docker tag gcr.io/heptio-images/sonobuoy-plugin-systemd-logs:latest $PRIVATE_REG/sonobuoy-plugin-systemd-logs:latest +docker push $PRIVATE_REG/sonobuoy-plugin-systemd-logs:latest +``` + +To use the image in your own registry, you will need to override the default when using the `gen` or `run` commands with the `--systemd-logs-image` flag as follows: + +``` +sonobuoy run --systemd-logs-image $PRIVATE_REG/sonobuoy-plugin-systemd-logs:latest +``` + +If you do not wish to run this plugin, you can remove it from the list of [plugins][plugins] to be run within the manifest, or you can explicitly specify which plugin you with to run with the `--plugin` flag. + +[plugins]: plugins.md#choosing-which-plugins-to-run diff --git a/site/content/docs/v0.56.5/dryRun-listGenerator.md b/site/content/docs/v0.56.5/dryRun-listGenerator.md new file mode 100644 index 000000000..3845c3958 --- /dev/null +++ b/site/content/docs/v0.56.5/dryRun-listGenerator.md @@ -0,0 +1,63 @@ +# Plugins used to help create test lists + +To get the lists of tests for a version, we need to first gather the list of tests for each of those versions. + +There are too many releases to get _all_ k8s releases so I used the following as a guide: + +```bash +git tag -l --sort=-creatordate|grep -v "alpha\|beta\|rc" |head -n75 +``` + +This gets the latest 75 releases that aren't alpha/beta/rc releases. We will use this list +to create a Sonobuoy plugin for each release. + +First I generate a list of the versions: +```bash +# From my kubernetes/kubernetes repo directory +rm ./tmpversions.txt +git fetch --all --tags +git tag -l --sort=-creatordate | + grep -v "alpha\|beta\|rc" | + head -n75|sort|xargs -t -I % sh -c \ + 'echo % >> ~/go/src/github.com/vmware-tanzu/sonobuoy/tmpversions.txt' +``` + +After trial and error I realized we need to trim that list a bit since +older versions will not have E2E_DRYRUN at all. Manually removing values from the versions list +before v1.14.0 (if there are any). + +Since we already have some versions data, we only need to find the new ones. To see the new versions: +``` +ls cmd/sonobuoy/app/e2e/testLists|cut -f 1-3 -d '.' > existingversions.txt +diff tmpversions.txt existingversions.txt +``` + +You should expect to see the v0.0.0 as a difference (a test value) but then modify the tmpVersions.txt to only include the new versions. + +Then, using xargs and sonobuoy I generate the plugin for the releases of k8s. I need to modify the default e2e plugin in two ways: + - make the name unique + - remove E2E_EXTRA_ARGS since some of the older versions dont have the progress URL flag. + +```bash +# From the sonobuoy directory +rm ./tmpplugins/p* +cat tmpversions.txt|xargs -t -I % sh -c \ + 'sonobuoy gen plugin e2e --plugin-env=e2e.E2E_EXTRA_ARGS= --plugin-env=e2e.E2E_DRYRUN=true --kubernetes-version=% | sed "s/plugin-name: e2e/plugin-name: e2e%/" > ./tmpplugins/p%.yaml' +``` + +Now, when I run sonobuoy I can run with each of those plugins, get the results, and gzip them to save space: + +```bash +# From the root of this project +sonobuoy run -p ./tmpplugins --wait +sonobuoy retrieve -f output.tar.gz +cat tmpversions.txt | xargs -t -I % sh -c \ + "sonobuoy results output.tar.gz -p e2e% --mode=detailed | jq .name -r | sort > ./cmd/sonobuoy/app/e2e/testLists/%" +gzip * +# Any older ones archives will just need you to say not to overwrite. TODO(jschnake) script this better to avoid answer 'n' over and over. +``` + +**DEBUG** +Server could run out of space for more conformance images (e.g. "no space left on device") + - Clear docker cache with `docker image prune -a` + - Add more disc space to docker (60GB to 300GB) \ No newline at end of file diff --git a/site/content/docs/v0.56.5/e2eplugin.md b/site/content/docs/v0.56.5/e2eplugin.md new file mode 100644 index 000000000..47ade33cb --- /dev/null +++ b/site/content/docs/v0.56.5/e2eplugin.md @@ -0,0 +1,74 @@ +# The Kubernetes End-To-End Testing Plugin + +The Kubernetes end-to-end testing plugin (the e2e plugin) is used to run tests which are maintained by the upstream Kubernetes community in the [kubernetes/kubernetes][kubernetesRepo] repo. + +There are numerous ways to run this plugin in order to meet your testing needs. + +## Choosing Which Tests To Run + +The most common point of customization is changing the set of tests to run. This is controlled by two environment variables the test image recognizes: + +* E2E_FOCUS +* E2E_SKIP + +Each of these is a regular expression describing which tests to run or skip. The "E2E_FOCUS" value is applied first and the "E2E_SKIP" value then further restricts that list. These can be set using Sonobuoy flags: + +``` +sonobuoy run \ + --e2e-focus= \ + --e2e-skip= +``` + +> Note: These flags are just special cases of the more general flag `--plugin-env`. For instance, you could set the env vars by using the flag `--plugin-env e2e.E2E_SKIP=` + +# Built-In Configurations + +There are a few commonly run configurations which Sonobuoy hard-codes for convenience: + +* non-disruptive-conformance + +This is the default mode and will run all the tests in the `e2e` plugin which are marked `Conformance` which are known to not be disruptive to other workloads in your cluster. This mode is ideal for checking that an existing cluster continues to behave is conformant manner. + +> NOTE: The length of time it takes to run conformance can vary based on the size of your cluster---the timeout can be adjusted in the Server.timeoutseconds field of the Sonobuoy `config.json` or on the CLI via the `--timeout` flag. + +* quick + +This mode will run a single test from the `e2e` test suite which is known to be simple and fast. Use this mode as a quick check that the cluster is responding and reachable. + +* certified-conformance + +This mode runs all of the `Conformance` tests and is the mode used when applying for the [Certified Kubernetes Conformance Program](https://www.cncf.io/certification/software-conformance). Some of these tests may be disruptive to other workloads so it is not recommended that you run this mode on production clusters. In those situations, use the default "non-disruptive-conformance" mode. + +> NOTE: The length of time it takes to run conformance can vary based on the size of your cluster---the timeout can be adjusted in the Server.timeoutseconds field of the Sonobuoy `config.json` or on the CLI via the `--timeout` flag. + +## Dry Run + +When specifying your own focus/skip values, it may be useful to set the run to operate in dry run mode: + +``` +sonobuoy run \ + --plugin-env e2e.E2E_FOCUS=pods \ + --plugin-env e2e.E2E_DRYRUN=true +``` + +By setting `E2E_DRYRUN`, the run will execute and produce results like normal except that the actual test code won't execute, just the test selection. Each test that _would have been run_ will be reported as passing. This can help you fine-tune your focus/skip values to target just the tests you want without wasting hours on test runs which target unnecessary tests. + +## Why Conformance Matters + +With such a [wide array][configs] of Kubernetes distributions available, *conformance tests* help ensure that a Kubernetes cluster meets the minimal set of features. They are a subset of end-to-end (e2e) tests that should pass on any Kubernetes cluster. + +A conformance-passing cluster provides the following guarantees: + +* **Best practices**: Your Kubernetes is properly configured. This is useful to know whether you are running a distribution out of the box or handling your own custom setup. + +* **Predictability**: All your cluster behavior is well-documented. Available features in the official Kubernetes documentation can be taken as a given. Unexpected bugs should be rare, because distribution-specific issues are weeded out during the conformance tests. + +* **Interoperability**: Workloads from other conforming clusters can be ported into your cluster, or vice versa. This standardization of Kubernetes is a key advantage of open source software, and allows you to avoid vendor lock-in. + +Individual Kubernetes distributions may offer additional features beyond conformance testing, but if you change distributions, these features can't be expected to be provided. + +See the [official documentation][conformanceDocs] for Kubernetes's existing conformance tests. + +[configs]: https://docs.google.com/spreadsheets/d/1LxSqBzjOxfGx3cmtZ4EbB_BGCxT_wlxW_xgHVVa23es/edit#gid=0 +[conformanceDocs]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-testing/e2e-tests.md#conformance-tests +[kubernetesRepo]: https://github.com/kubernetes/kubernetes/tree/master/test/conformance/image diff --git a/site/content/docs/v0.56.5/faq.md b/site/content/docs/v0.56.5/faq.md new file mode 100644 index 000000000..c3897aa8d --- /dev/null +++ b/site/content/docs/v0.56.5/faq.md @@ -0,0 +1,221 @@ +# Frequently Asked Questions + +## Kubernetes Conformance and end-to-end testing + +### Why were so many tests skipped? + +When running the `e2e` plugin on Sonobuoy, you will notice that a large number of tests are skipped by default. The +reason for this is that the image used by Sonobuoy to run the Kubernetes conformance tests contains all the end-to-end +tests for Kubernetes. However, only a subset of those tests are required to check conformance. For example, the v1.16 +Kubernetes test image contains over 4000 tests however only 215 of those are conformance tests. + +The default mode for the e2e plugin (`non-disruptive-conformance`) will run all tests which contain the +tag `[Conformance]` and exclude those that with the `[Disruptive]` tag. This is to help prevent you from accidentally +running tests which may disrupt workloads on your cluster. To run all the conformance tests, use +the `certified-conformance` mode. + +Please refer to our [documentation for the `e2e` plugin][e2ePlugin] for more details of the built-in configurations or +our blog [post][testsuiteblog] on the Kubernetes test suite. + +### How do I determine why my tests failed? + +Before debugging test failures, we recommend isolating any failures to verify that they are genuine and are not spurious +or transient. Unfortunately, such failures can be common in complex, distributed systems. To do this, you can make use +of the `--e2e-focus` flag when using the `run` command. This flag accepts a regex which will be used to find and run +only the tests matching that regex. For example, you can provide the name of a test to run only that test: + +``` +sonobuoy run --e2e-focus "should update pod when spec was updated and update strategy is RollingUpdate" +``` + +If the test continues to fail and it appears to be a genuine failure, the next step would be to read the logs to +understand why the test failed. To read the logs for a test failure, you can find the log file within the results +tarball from Sonobuoy (`plugins/e2e/results/global/e2e.log`) or you can use the `results` command to show details of +test failures. For example, the following commands retrieve the results tarball and then use [jq][jq] to return an +object for each test failure with the failure message and the associated stdout. + +``` +outfile=$(sonobuoy retrieve) && \ + sonobuoy results --mode detailed --plugin e2e $outfile | jq '. | select(.status == "failed") | .details' +``` + +Carefully read the test logs to see if anything stands out which could be the cause of the failure. For example: Were +there difficulties when contacting a particular service? Are there any commonalities in the failed tests due to a +particular feature? Often, the test logs will provide enough detail to allow you to determine why a test failed. + +If you need more information, Sonobuoy also queries the cluster upon completion of plugins. The details collected allow +you to see the state of the cluster and whether there were any issues. For example: Did any of the nodes have memory +pressure? Did the scheduler pod go down? + +As a final resort, you can also read the upstream test code to determine what actions were being performed at the point +when the test failed. If you decide to take this approach, you must ensure that you are reading the version of the test +code that corresponds to your test image. You can verify which version of the test image was used by inspecting the +plugin definition which is available in the results tarball in `plugins/e2e/definition.json` under the +key `Definition.spec.image`. For example, if the test image was `k8s.gcr.io/conformance:v1.15.3`, you should read the +code at the corresponding [v1.15.3 tag in GitHub][kubernetes-1.15.3]. All the tests can be found within the `test/e2e` +directory in the Kubernetes repository. + +### How can I run the E2E tests with certain test framework options set? What are the available options? + +How you provide options to the E2E test framework and determining which options you can set depends on which version of +Kubernetes you are testing. + +To view the available options that you can set when running the tests, you can run the test executable for the +conformance image you will be using as follows: + +``` +KUBE_VERSION= +docker run -it k8s.gcr.io/conformance:$KUBE_VERSION ./e2e.test --help +``` + +You can also view the definitions of these test framework flags in the [Kubernetes repository][framework-flags]. + +If you are running Kubernetes v1.16.0 or greater, a new feature was included in this release which makes it easier to +specify your own options. This new feature allows arbitrary options to be specified when the tests are invoked. To use +this, you must ensure the environment variable `E2E_USE_GO_RUNNER=true` is set. This is the default behavior from +Sonobuoy v0.16.1 in the CLI and only needs to be manually set if working with a Sonobuoy manifest generated by an +earlier version. If this is enabled, then you can provide your options with the flag `--plugin-env=e2e.E2E_EXTRA_ARGS`. +For example, the following allows you set provider specific flags for running on GCE: + +``` +sonobuoy run --plugin-env=e2e.E2E_USE_GO_RUNNER=true \ + --plugin-env=e2e.E2E_PROVIDER=gce \ + --plugin-env=e2e.E2E_EXTRA_ARGS="--gce-zone=foo --gce-region=bar" +``` + +Before this version, it was necessary to build your own custom image which could execute the tests with the desired +options. + +For details on the two different approaches that you can take, please refer to [our blog post][custom-e2e-image] which +describes in more detail how to use the new v1.16.0 Go test runner and how to build your own custom images. + +### Some of the registries required for the tests are blocked with my test infrastructure. Can I still run the tests? + +Yes! Sonobuoy can be configured to use custom registries so that you can run the tests in airgapped environments. + +For more information and details on how to configure your environment, please refer +to [our documentation for custom registries and air-gapped environments][airgap]. + +### We have some nodes with custom taints in our cluster and the tests won't start. How can I run the tests? + +Although Sonobuoy plugins can be adapted to use [custom Kubernetes PodSpecs][custom-podspecs] where tolerations for +custom taints can be specified, these settings do not apply to workloads started by the Kubernetes end-to-end testing +framework as part of running the `e2e` plugin. + +The end-to-end test framework checks the status of the cluster before beginning to run the tests. One of the checks that +it runs, is checking that all of the nodes are schedulable and ready to accept workloads. This check deems any nodes +with a taint other than the master node taint (`node-role.kubernetes.io/master`) to be unschedulable. This means that +any node with a different taint will not be considered ready for testing and will block the tests from starting. + +With the release of Kubernetes v1.17.0, you will be able to provide a list of allowed node taints so that any node with +an allowed taint will be deemed schedulable as part of the pre-test checks. This will ensure that these nodes will not +block the tests from starting. If you are running Kubernetes v1.17.0 or greater, you will be able to specify the taints +to allow using the flag `--non-blocking-taints` which takes a comma-separated list of taints. To find out how to set +this flag via Sonobuoy, please refer to our previous answer on how to set test framework options. + +This solution does not enable workloads created by the tests to run on these nodes. This is still +an [open issue in Kubernetes][support-custom-taints]. The workloads created by the end-to-end tests will continue to run +only on untainted nodes. + +For all versions of Kubernetes prior to v1.17.0, there are two approaches that you may be able to take to allow the +tests to run. + +The first is adjusting the number of nodes the test framework allows to be "not-ready". By default, the test framework +will wait for all nodes to be ready. However, if only a subset of your nodes are tainted and the rest are otherwise +suitable for accepting test workloads, you could provide the test framework flag `--allowed-not-ready-nodes` specifying +the number of tainted nodes you have. By setting this, the test framework will allow for your tainted nodes to be in a " +not-ready" state. This does not guarantee that your tests will start however as a node in your cluster may not be ready +for another reason. Also, this approach will only work if there are untainted nodes as some will still need to be +available for the tests to run on. + +The only other approach is to untaint the nodes for the purposes of testing. + +### What tests can I run? How can I figure out what tests/tags I can select? + +The `e2e` plugin has a number of preconfigured modes for running tests, with the default mode running all conformance +tests which are non-disruptive. It is possible to [configure the plugin][e2ePlugin] to provide a specific set of E2E +tests to run instead. + +Which tests you can run depends on the version of Kubernetes you are testing as the list of tests changes with each +release. + +A list of the conformance tests is maintained in the [Kubernetes repository][kubernetes-conformance]. Within the GitHub +UI, you can change the branch to the tag that matches your Kubernetes version to see all the tests for that version. +This list provides each test name as well where you can find the test in the repository. You can include these test +names in the `E2E_FOCUS` or `E2E_SKIP` environment variables when [running the plugin][e2ePlugin]. + +Although the default behavior is to run the Conformance tests, you can run any of the other Kubernetes E2E tests with +Sonobuoy. These are not required for checking that your cluster is conformant and we only recommend running these if +there is specific behavior you wish to check. + +There are a large number of E2E tests available (over 4000 as of v1.16.0). Many of these tests have "tags" which show +that they belong to a specific group, or have a particular trait. There isn't a definitive list of these tags, however +below are some of the most commonly seen tags: + +- Conformance +- NodeConformance +- Slow +- Serial +- Disruptive +- Flaky +- LinuxOnly +- Feature:* (there are numerous feature tags) + +There are also specific tags for tests that belong to a particular [Special Interest Group (SIG)][sig-list]. The +following SIG tags exist within the E2E tests: + +- [sig-api-machinery] +- [sig-apps] +- [sig-auth] +- [sig-autoscaling] +- [sig-cli] +- [sig-cloud-provider] +- [sig-cloud-provider-gcp] +- [sig-cluster-lifecycle] +- [sig-instrumentation] +- [sig-network] +- [sig-node] +- [sig-scheduling] +- [sig-service-catalog] +- [sig-storage] +- [sig-ui] +- [sig-windows] + +### The Sonobuoy aggregator wont start on my Windows node. Why not? + +When the Sonobuoy aggregator may land on a Windows node, you need to add the `--security-context-mode=none` flag when +invoking Sonobuoy. This is because Windows nodes currently do not support fields such as `runAsUser` which causes +problems for the pod when it starts up. The node tries to start the pod and `chown` certain files but that process +errors out on Windows, causing the pod to be unable to properly start up. + +[kubernetes-podspec]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.16/#podspec-v1-core + +[custom-e2e-image]: https://sonobuoy.io/custom-e2e-image/ + +[custom-podspecs]: https://sonobuoy.io/customizing-plugin-podspecs/ + +[sig-list]: https://github.com/kubernetes/community/blob/master/sig-list.md + +[jq]: https://stedolan.github.io/jq/ + +[kubernetes-1.15.3]: https://github.com/kubernetes/kubernetes/tree/v1.15.3 + +[kubernetes-conformance]: https://github.com/kubernetes/kubernetes/blob/master/test/conformance/testdata/conformance.yaml + +[airgap]: airgap.md + +[e2ePlugin]: e2eplugin.md + +[customPlugins]: plugins.md + +[support-custom-taints]: https://github.com/kubernetes/kubernetes/issues/83329 + +[framework-flags]: https://github.com/kubernetes/kubernetes/blob/master/test/e2e/framework/test_context.go + +[testsuiteblog]: https://sonobuoy.io/understanding-e2e-tests + +[sonobuoy-query]: sonobuoy-query.md + +### The information gathered on the cluster is useful for me, but do I have to run a plugin to obtain it? + +No, you can run the cluster queries via the command `sonobuoy query`. Read more details about it [here][sonobuoy-query]. diff --git a/site/content/docs/v0.56.5/gen.md b/site/content/docs/v0.56.5/gen.md new file mode 100644 index 000000000..a3958bbd0 --- /dev/null +++ b/site/content/docs/v0.56.5/gen.md @@ -0,0 +1,24 @@ +# Customization + +Sonobuoy provides many flags to customize your run but sometimes you have a special use case that isn't supported yet. For these cases, Sonobuoy provides `sonobuoy gen`. + +The command `sonobuoy gen` will print the YAML for your run to stdout instead of actually creating it. It accepts all of the relevant flags for customizing the run just like `sonobuoy run` would. You can then edit it yourself and apply it as if Sonobuoy had run it. + +Output the YAML Sonobuoy would create to a file: + +``` +sonobuoy gen --e2e-focus="sig-networking" --e2e-skip="Alpha" > sonobuoy.yaml +``` + +Then manually modify it as necessary. Maybe you need special options for plugins or want your own sidecar to be running with the images. + +Finally, create the resources yourself via kubectl. + +``` +sonobuoy run -f sonobuoy.yaml +``` + +> Note: If you find that you need this flow to accomplish your work, talk to us about it in our [Slack][slack] channel or file an [issue][issue] in Github. Others may have the same need and we'd love to help support you. + +[slack]: https://kubernetes.slack.com/messages/sonobuoy +[issue]: https://github.com/vmware-tanzu/sonobuoy/issues diff --git a/site/content/docs/v0.56.5/img/README.md b/site/content/docs/v0.56.5/img/README.md new file mode 100644 index 000000000..f44575760 --- /dev/null +++ b/site/content/docs/v0.56.5/img/README.md @@ -0,0 +1 @@ +Some of these diagrams (for instance sonobuoy-plugins.png), have been created on [draw.io](https://www.draw.io), using the "Include a copy of my diagram" option. If you want to make changes to these diagrams, try importing them into draw.io, and you should have access to the original shapes/text that went into the originals. diff --git a/site/content/docs/v0.56.5/img/plugin-contract.png b/site/content/docs/v0.56.5/img/plugin-contract.png new file mode 100755 index 000000000..d18ec66d2 Binary files /dev/null and b/site/content/docs/v0.56.5/img/plugin-contract.png differ diff --git a/site/content/docs/v0.56.5/img/scanner.png b/site/content/docs/v0.56.5/img/scanner.png new file mode 100644 index 000000000..e3c5ebd6d Binary files /dev/null and b/site/content/docs/v0.56.5/img/scanner.png differ diff --git a/site/content/docs/v0.56.5/img/snapshot-00-overview.png b/site/content/docs/v0.56.5/img/snapshot-00-overview.png new file mode 100644 index 000000000..379331dfb Binary files /dev/null and b/site/content/docs/v0.56.5/img/snapshot-00-overview.png differ diff --git a/site/content/docs/v0.56.5/img/snapshot-10-resources.png b/site/content/docs/v0.56.5/img/snapshot-10-resources.png new file mode 100644 index 000000000..c3c4ec24f Binary files /dev/null and b/site/content/docs/v0.56.5/img/snapshot-10-resources.png differ diff --git a/site/content/docs/v0.56.5/img/snapshot-20-hosts.png b/site/content/docs/v0.56.5/img/snapshot-20-hosts.png new file mode 100644 index 000000000..166f2a2a9 Binary files /dev/null and b/site/content/docs/v0.56.5/img/snapshot-20-hosts.png differ diff --git a/site/content/docs/v0.56.5/img/snapshot-30-podlogs.png b/site/content/docs/v0.56.5/img/snapshot-30-podlogs.png new file mode 100644 index 000000000..9de295723 Binary files /dev/null and b/site/content/docs/v0.56.5/img/snapshot-30-podlogs.png differ diff --git a/site/content/docs/v0.56.5/img/snapshot-40-plugins.png b/site/content/docs/v0.56.5/img/snapshot-40-plugins.png new file mode 100644 index 000000000..dd1b59169 Binary files /dev/null and b/site/content/docs/v0.56.5/img/snapshot-40-plugins.png differ diff --git a/site/content/docs/v0.56.5/img/snapshot-50-meta.png b/site/content/docs/v0.56.5/img/snapshot-50-meta.png new file mode 100644 index 000000000..efbb9be92 Binary files /dev/null and b/site/content/docs/v0.56.5/img/snapshot-50-meta.png differ diff --git a/site/content/docs/v0.56.5/img/sonobuoy-logo.png b/site/content/docs/v0.56.5/img/sonobuoy-logo.png new file mode 100644 index 000000000..edd5379b6 Binary files /dev/null and b/site/content/docs/v0.56.5/img/sonobuoy-logo.png differ diff --git a/site/content/docs/v0.56.5/img/sonobuoy-plugins.png b/site/content/docs/v0.56.5/img/sonobuoy-plugins.png new file mode 100644 index 000000000..0fcea8160 Binary files /dev/null and b/site/content/docs/v0.56.5/img/sonobuoy-plugins.png differ diff --git a/site/content/docs/v0.56.5/img/sonobuoy-query-1.png b/site/content/docs/v0.56.5/img/sonobuoy-query-1.png new file mode 100644 index 000000000..dd8ad24a4 Binary files /dev/null and b/site/content/docs/v0.56.5/img/sonobuoy-query-1.png differ diff --git a/site/content/docs/v0.56.5/img/sonobuoy-query-2.png b/site/content/docs/v0.56.5/img/sonobuoy-query-2.png new file mode 100644 index 000000000..1286a6311 Binary files /dev/null and b/site/content/docs/v0.56.5/img/sonobuoy-query-2.png differ diff --git a/site/content/docs/v0.56.5/index-frontmatter.yaml b/site/content/docs/v0.56.5/index-frontmatter.yaml new file mode 100644 index 000000000..5cf8e00f9 --- /dev/null +++ b/site/content/docs/v0.56.5/index-frontmatter.yaml @@ -0,0 +1,6 @@ +--- +version: v0.56.5 +cascade: + layout: docs + gh: https://github.com/vmware-tanzu/sonobuoy/tree/v0.56.5 +--- diff --git a/site/content/docs/v0.56.5/issue1388.md b/site/content/docs/v0.56.5/issue1388.md new file mode 100644 index 000000000..e5a6a2468 --- /dev/null +++ b/site/content/docs/v0.56.5/issue1388.md @@ -0,0 +1,25 @@ +# Issue Regarding Certified-Conformance Mode + +### Versions of Sonobuoy Impacted + - v0.53.0 + - v0.53.1 + +### Description + +When running `sonobuoy run --mode=certified-conformance` the `E2E_SKIP` value is not properly cleared, leading to `disruptive` tests being skipped. In certified-conformance mode, all tests must be run to be valid for submission to the CNCF so this bug would invalidate your results. + +### Work-around #1 + +You can manually work-around this issue by adding an extra flag at the end of your command: +``` +sonobuoy run --mode=certified-conformance --plugin-env e2e.E2E_SKIP +``` +This will set the focus value to conformance as expected and then remove the E2E_SKIP value. + +### Work-around #2 + +Use a patched version of Sonobuoy. After this bug was reported and patched, we released v0.53.2. + +### Original issue + +Thanks to [BobyMCbobs](https://github.com/BobyMCbobs) for reporting the original issue: https://github.com/vmware-tanzu/sonobuoy/issues/1388 \ No newline at end of file diff --git a/site/content/docs/v0.56.5/plugins.md b/site/content/docs/v0.56.5/plugins.md new file mode 100644 index 000000000..a0755a692 --- /dev/null +++ b/site/content/docs/v0.56.5/plugins.md @@ -0,0 +1,205 @@ +# Sonobuoy Plugins + +## Overview + +The main function of Sonobuoy is running plugins; each plugin may run tests or gather data in the cluster. + +When you first run Sonobuoy, an aggregator pod is created in the cluster which reads the configuration you've chosen and launches each plugin. + +The aggregator then waits for each plugin to report results back to it. If the plugin fails to launch correctly or does not report results within the timeout period, an error is recorded. + +Below are the useful resources to understand what plugins are and how they work, what plugins are available, +how you can customize them, and how you can build your own: + + - [Plugin Types](#plugin-types) + - [Built-in Plugins](#built-in-plugins) + - [Specifying Which Plugins To Run](#specifying-which-plugins-to-run) + - [How Plugins Work](#how-plugins-work) + - [Writing your own plugin](#writing-your-own-plugin) + - [Plugin Result Types](#plugin-result-types) + - [Customizing PodSpec options](#customizing-podspec-options) + - [Providing your own PodSpec](#providing-your-own-podspec) + - [Plugin Installation (Experimental Feature)](#plugin-installation-experimental-feature) + - [Existing Plugins][examplePlugins] + +## Plugin Types + +There are two types of plugins: + +* Job plugins + +Job plugins are plugins which only need to run once. The Sonobuoy aggregator will create a single pod for this type of plugin. The Kubernetes E2E plugin is a job-type plugin. + +* Daemonset plugins + +Daemonset plugins are plugins which need to run on every node, even control-plane nodes. The systemd-logs gatherer is a daemonset-type plugin. + +## Built-in Plugins + +Two plugins are included in the Sonobuoy source code by default: + +* Kubernetes end-to-end tests (the e2e plugin) + +The upstream Kubernetes repo contains the code for this [image][conformance]. The test image includes all the pieces necessary to run the end-to-end tests (which includes, but is not limited to, the conformance tests). This is the most common plugin run by Sonobuoy and can be tweaked in numerous ways to run the set of tests that you need. See more details about how to use this plugin [here][e2ePlugin]. + +* systemd-logs gathering plugin + +Gathers the latest system logs from each node, using systemd's `journalctl` command. The image this plugin uses is built from the [heptio/sonobuoy-plugin-systemd-logs][systemd-repo] repo. + +## Specifying Which Plugins To Run + +By default both the `e2e` and `systemd-logs` plugin are run. + +Otherwise, you can specify the plugins to run (including custom plugins) by using the `--plugin` flag. This can accept the path to a plugin defintion file or the name of a built-in plugin. For example: + +``` +# Run just the e2e plugin +$ sonobuoy run --plugin e2e + +# Run your custom plugin and the systemd-logs gatherer +$ sonobuoy run --plugin customPlugin.yaml --plugin systemd-logs +``` + +> Note: All of the CLI options impact the generated YAML. If you would like to edit the YAML directly or see the impact your options have on the YAML, use `sonobuoy gen `. + +## How Plugins Work + +A plugin consists of two parts: + +* the core logic which runs tests/gathers data (typically a single container) +* a sidecar added by Sonobuoy which reports the data back to the aggregator + +After your container completes its work, it needs to signal to Sonobuoy that +it's done by writing out the name of the results file into a "done file". The default +value is `/tmp/results/done`, which you can configure with the `ResultsDir` value +in the Sonobuoy config. + +The Sonobuoy sidecar waits for the `done` file to be present, then transmits the indicated +file back to the aggregator. + +![sonobuoy plugins diagram][diagram] + +[diagram]: /img/plugin-contract.png + +### Writing your own plugin + +Use the `sonobuoy gen plugin` command to help generate the YAML for your plugin definition. Once you've saved that YAML locally, you can run your plugin via: + +``` +sonobuoy run --plugin myPlugin.yaml +``` + +For a thorough walkthrough of how to build a custom plugin from scratch, see our [blog post][customPluginsBlog] and our [existing plugins][examplePlugins]. + +## Plugin Result Types + +When results get transmitted back to the aggregator, Sonobuoy inspects the results in order +to present results metadata to the end user such as the number of passed/failed tests or +the number of files gathered. + +This inspection process is informed by the YAML that described the plugin defintion. The +`result-type` field can be set to either `raw`, `junit`, `gojson`, or `manual`. + +When set to `junit`, Sonobuoy will look for XML files and process them as junit test results. + +When set to `gojson`, Sonobuoy will look for JSON files and process them as JSON output from `go test` [See details here.](https://golang.org/cmd/test2json/) + +When set to `raw`, Sonobuoy will simply inspect all the files and record the number of files generated. + +When set to `manual`, Sonobuoy will process files that use the Sonobuoy results metadata format. +This option allows you to specify your own results metadata directly, rather than having Sonobuoy generate it. +To use this option, the files to process must be specified directly in `result-files` array field in the plugin definition, or the plugin must write a `sonobuoy_results.yaml` file. +To find out more about using this format, see the [results][results] page. + +The data that Sonobuoy gathers during this step makes it possible for a user to do a few different tasks: + +* get high-level results without even downloading the results tarball via `sonobuoy status --json` +* get summary information about the run via `sonobuoy results $tarball` +* get detailed information about each test/file via `sonobuoy results $tarball --mode=detailed` + +To see more information about how Sonobuoy can process and present your results, see the [results][results] page and our previous [blog post][resultsBlog]. + +### Customizing PodSpec options + +By default, Sonobuoy will determine how to create and run the resources required for your plugin. +When creating your own plugins however, you may want additional control over how the plugin is run within your cluster. +To enable this, you can customize the [PodSpec][kubernetes-podspecs] used by Sonobuoy when creating the plugin's Pods or DaemonSets by supplying a `podSpec` object within your plugin defition. +The `podSpec` object corresponds directly to a Kubernetes [PodSpec][kubernetes-podspecs] so any fields that are available there can be set by your plugins. + +If a `podSpec` is provided, Sonobuoy will use it as is, only adding what is necessary for Sonobuoy to run your plugin (such as a Sonobuoy worker container). +Sonobuoy will only ever _add_ to your `podSpec` definition, it will not remove or override settings within it. +If you don't need to provide any additional settings, you can omit this object and Sonobuoy will use the defaults. + +#### Providing your own PodSpec +We recommend starting with the default `podSpec` used by Sonobuoy and then making any necessary modifications. +To view the default `podSpec`, you can use the flag `--show-default-podspec` with the `gen` and `gen plugin` commands. + +When creating a new plugin, you can include the default `podSpec` in the generated definition as follows: + +``` +sonobuoy gen plugin --show-default-podspec -n my-plugin -i my-plugin:latest +``` + +This will produce the following plugin definition: + +```yaml +podSpec: + containers: [] + restartPolicy: Never + serviceAccountName: sonobuoy-serviceaccount + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - key: CriticalAddonsOnly + operator: Exists +sonobuoy-config: + driver: Job + plugin-name: my-plugin + result-type: my-plugin +spec: + command: + - ./run.sh + image: my-plugin:latest + name: plugin + resources: {} + volumeMounts: + - mountPath: /tmp/results + name: results +``` + +You are then free to make modifications to the `podSpec` object as necessary. + +If you already have an existing plugin which you would like to customize, you can take the default `podSpec`, add it to your plugin definition and use it as the basis for customization. + +> **NOTE:** The default `podSpec` differs for Job and DaemonSet plugins. +To be sure you are using the appropriate defaults as your starting point, be sure to provide the `--type` flag when using `sonobuoy gen plugin`. + +You can also modify the `podSpec` from within a Sonobuoy manifest. +By providing the flag `--show-default-podspec` to `sonobuoy gen`, the default `podSpec` for each plugin will be included within the `sonobuoy-plugins-cm` ConfigMap in the manifest. + +> **NOTE:** Modifications to the `podSpec` are only persisted within that generated manifest. +If you generate a new manifest by running `sonobuoy gen` again, you will need to reapply any changes made. +We recommend adding your desired customizations to the plugin definition itself. + +#### Plugin Installation (Experimental Feature) + +When you select which plugins to run, currently you have to specify a local file or a URL where the plugin definition exists. Keeping track of the absolute paths and URLs creates a burden to plugin adoption so we've implemented a new feature to help: plugin installation. + +To (enable)[featureGates], set `SONOBUOY_PLUGIN_INSTALLATION=true`. Then proceed to use Sonobuoy normally. With this new functionality you can install a plugin for repeated use by executing the commands: + +``` +$ sonobuoy plugin install myPlugin +$ sonobuoy run -p myPlugin +``` + +The plugin definition will be saved into ~/.sonobuoy (configurable via the environment variable `SONOBUOY_DIR`). If Sonobuoy can't find the plugin in the installation directory, it will search the pwd just like current behavior. + +[systemd-repo]: https://github.com/vmware-tanzu/sonobuoy-plugins/tree/main/systemd-logs +[conformance]: https://github.com/kubernetes/kubernetes/tree/master/test/conformance/image +[e2ePlugin]: e2eplugin.md +[kubernetes-podspecs]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.15/#podspec-v1-core +[customPluginsBlog]: https://blogs.vmware.com/cloudnative/2019/04/30/sonobuoy-plugins-custom-testing/ +[examplePlugins]: https://github.com/vmware-tanzu/sonobuoy-plugins +[results]: results.md +[resultsBlog]: https://sonobuoy.io/simplified-results-reporting-with-sonobuoy/ diff --git a/site/content/docs/v0.56.5/pullsecrets.md b/site/content/docs/v0.56.5/pullsecrets.md new file mode 100644 index 000000000..1659d00a9 --- /dev/null +++ b/site/content/docs/v0.56.5/pullsecrets.md @@ -0,0 +1,42 @@ +# Using a Private Sonobuoy Image with ImagePullSecrets + +This document describes how to use the ImagePullSecrets option in order to run Sonobuoy using a private Sonobuoy image. + +## Setting ImagePullSecrets + +The name of the secret to use when pulling the image can be set easily in the configuration file passed to `sonobuoy run` or `sonobuoy gen`: + +``` +echo '{"ImagePullSecrets":"mysecret"}' > secretconfig.json +sonobuoy gen --config secretconfig.json +``` + +Doing this properly passes the value and places it into the YAML for the Sonobuoy aggregator pod and all the pods for each plugin. + +## Creating the Secret + +The main complication for this flow is that secrets can only be referenced from within their own namespace. As a result we need to create the secret at the same time we create the initial resources. + +Sonobuoy does not have built in support for this, but it can be manually achieved via the following process: + - Manually create the YAML for the secret + - Insert the YAML into the output from `sonobuoy gen --config secretconfig.json` + - Run with `sonobuoy run -f ...` + +As an example of how to create the secret you can follow the instructions [here][dockersecret] in order to create a secret in the default namespace. + +Then get a copy of its YAML via: + +``` +kubectl get secret -o yaml > secret.json +``` + +Manually edit the file and remove/adjust the metadata as appropriate. The namespace should be adjusted to your desired Sonobuoy namespace (default: sonobuoy) and the following fields can be removed: + - annotations + - creationTimestamp + - resourceVersion + - selfLink + - uid + +Then just insert that YAML into the output from `sonobuoy gen` and run with `sonobuoy run -f ...` + +[dockersecret]: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ diff --git a/site/content/docs/v0.56.5/release.md b/site/content/docs/v0.56.5/release.md new file mode 100644 index 000000000..facda33ab --- /dev/null +++ b/site/content/docs/v0.56.5/release.md @@ -0,0 +1,121 @@ +# Release + +## Preparing release version + +1. Update the `version` defined in the code to the new version number. + As of the time of writing, the version is defined in `pkg/buildinfo/version.go`. + +## Kubernetes release +Is there a Kubernetes release since the last Sonobuoy release? If so, apply the following steps: + +### Conformance images +Ensure the upstream conformance script is working appropriately: + * Update the `kind-config.yaml` file with the new image version [here](https://github.com/vmware-tanzu/sonobuoy/blob/main/kind-config.yaml). + * Run conformance tests + +## Updating the versioned docs +Explicit doc changes (if any) should be made to the appropriate files in directory `site/docs/main`. + +Next, generate a set of versioned docs for v0.x.y. For instance, the new docs be generated by running the command: + +``` +./scripts/update_docs.sh v0.20.0 +``` + +This will copy the current `main` docs into the version given and update +a few of the links in the README to be correct. It will also update +the website config to add the new version and consider it the newest +version of the docs. + +## Create PR +1. Commit previous changes and open a new PR +2. Ensure your commits signed +3. Follow PR build progress in circleci.com +4. Once all checks passes, merge accordingly + +## Tag release +This step will tag the code and triggers a release. + +1. From your local branch, create an annotated `tag` for the commit that was merged: + + ``` + git tag -a v0.x.y -m "Release v0.x.y" + ``` + + > NOTE: Tag the new tip of `main`, not the branch you just merged. + +1. Push the `tag` to the [`github.com/vmware-tanzu/sonobuoy`](https://github.com/vmware-tanzu/sonobuoy/) repository. +2. To ensure that the tag is pushed to the correct repository, check which remote corresponds to that repository using the following command: + + ``` + git remote -v + ``` + The output of this command should include at least two configured remotes, typically `origin`, which refers to your personal fork, and `upstream` which refers to the upstream Sonobuoy repository. + For example: + + ``` + origin git@github.com:/sonobuoy.git (fetch) + origin git@github.com:/sonobuoy.git (push) + upstream https://github.com/vmware-tanzu/sonobuoy (fetch) + upstream https://github.com/vmware-tanzu/sonobuoy (push) + ``` + For the following steps, use the remote configured for the `vmware-tanzu/sonobuoy` repository. + The following instructions will use `upstream`. +1. Push the tag with the following command. + > NOTE: This will push all tags. + + ``` + git push upstream --tags + ``` + To push just one tag, use the following command format (replacing `v0.x.y` with the tag created in the previous step): + + ``` + git push upstream refs/tags/v0.x.y + ``` + If there is a problem and you need to remove the tag, run the following commands: + + ``` + git tag -d v0.x.y + git push upstream :refs/tags/v0.x.y + ``` + > NOTE: The `:` preceding the tag ref is necessary to delete the tag from the remote repository. + > Git refspecs have the format `<+>:`. + > By pushing an empty `src` to the remote `dst`, it makes the destination ref empty, effectively deleting it. + > For more details, see the [`git push` documentation](https://git-scm.com/docs/git-push) or [this concise explanation on Stack Overflow](https://stackoverflow.com/a/7303710). + + +## Release Validation +1. Open a browser tab and go to: https://https://github.com/vmware-tanzu/sonobuoy/actions and verify go releaser for tag v0.x.y completes successfully. +1. Upon successful completion of build job above, check the [releases tab of Sonobuoy](https://github.com/vmware-tanzu/sonobuoy/releases) and verify the artifacts and changelog were published correctly. +1. Run the following command to make sure the image was pushed correctly to [Docker Hub][dockerhub]: + + ``` + docker run -it sonobuoy/sonobuoy:v0.x.y /sonobuoy version + ``` + The `Sonobuoy Version` in the output should match the release tag above. +1. Go to the [GitHub release page](https://github.com/vmware-tanzu/sonobuoy/releases) and download the release binaries and make sure the version matches the expected values. +2. Run a [Kind](https://github.com/kubernetes-sigs/kind) cluster locally and ensure that you can run `sonobuoy run --mode quick`. + If this release corresponds to a new Kubernetes release as well, ensure: + + * you're using the correct Kubernetes context by checking the output from: + + ``` + kubectl config current-context + ``` + + and verifying that it is set to the context for the Kind cluster just created (`kind-kind` or `kind-`) + * you're testing with the new Kind images by checking the output from: + + ``` + kubectl version --short + ``` + + and verifying that the server version matches the intended Kubernetes version. + * you can run `sonobuoy images` and get a list of test images as expected +2. Update the release notes if desired on GitHub by editing the newly created release. + +### Notes +1. Before releasing, ensure all parties are available to resolve any issues that come up. If not, just bump the release. + +[gendocs]: #generating-a-new-set-of-versioned-docs +[dockerhub]: https://cloud.docker.com/u/sonobuoy/repository/docker/sonobuoy/sonobuoy/tags diff --git a/site/content/docs/v0.56.5/results.md b/site/content/docs/v0.56.5/results.md new file mode 100644 index 000000000..7e3423e73 --- /dev/null +++ b/site/content/docs/v0.56.5/results.md @@ -0,0 +1,169 @@ +# Viewing Plugin Results + +The `sonobuoy results` command can be used to print the results of a plugin without first having to extract the files from the tarball. + +## Canonical Data Format + +Plugin results undergo post-processing on the server to produce a tree-like file which contains information about the tests run (or files generated) by the plugin. This is the file which enables `sonobuoy results` to present reports to the user and navigate the tarball effectively. + +Currently, plugins are specified as either producing `junit` results (like the `e2e` plugin), `raw` results (like the `systemd-logs` plugin), or you can specify your own results file in the format used by Sonobuoy by specifying the option `manual`. + +To see this file directly you can either open the tarball and look for `plugins//sonobuoy_results.yaml` or run: + +``` +sonobuoy results $tarball --mode=dump +``` + +## Working with any Plugin + +By default, the command produces a human-readable report corresponding to the `e2e` plugin. However, you can specify other plugins by name. For example: + +``` +$ sonobuoy results $tarball --plugin systemd-logs +Plugin: systemd-logs +Status: passed +Total: 1 +Passed: 1 +Failed: 0 +Skipped: 0 +``` + +> In the above output, notice that even though the `systemd-logs` plugin doesn't run "tests" per se, each file produced by the plugin is reported on: a readable file is reported as a success. + +## Detailed Results + +If you would like to view or script around the individual tests/files, use the `--mode detailed` flag. In the case of junit tests, it will write a list of json objects which can be piped to other commands or saved to another file. + +To see the passed tests, one approach would be: + +``` +$ sonobuoy results $tarball --mode=detailed | jq 'select(.status=="passed")' +``` + +To list the conformance tests, one approach would be: + +``` +$ sonobuoy results $tarball --mode=detailed|jq 'select(.name | contains("[Conformance]"))' +``` + +When dealing with non-junit plugins, the `--mode detailed` results will print the file output with a prefix that reports on the nature/location of the file: + +``` +$ sonobuoy results $tarball --mode=detailed --plugin systemd-logs|head -n1 +systemd-logs|kind-control-plane|systemd_logs {"_HOSTNAME":"kind-control-plane",...} +``` + +The prefix is telling you that this result came from the "systemd-logs" plugin, was from the "kind-control-plane" node, and the filename was "systemd_logs". + +If you had multiple nodes, you could look at just one by adding the `--node` flag. It walks the result tree and will return only results rooted from the given node: + +``` +$ sonobuoy results $tarball --mode=detailed --plugin systemd-logs --node=kind-control-plane|head -n1 +kind-control-plane|systemd_logs {"_HOSTNAME":"kind-control-plane",...} +``` + +Now if you wanted to script around the actual file output (in this case it is json), you wouldn't want to keep that prefix around. Just add the `--skip-prefix` flag to get only the raw file output so that you can manipulate it easily: + +``` +$ sonobuoy results $tarball --mode=detailed --plugin systemd-logs --node=kind-control-plane --skip-prefix|head -n1|jq .MESSAGE +{"_HOSTNAME":"kind-control-plane",...} +``` + +## Providing results manually + +When creating a plugin, you can choose to have your plugin write its results in the same format as the Sonobuoy results metadata. +This allows you to take advantage of the `sonobuoy results` workflow even if your plugin doesn't produce output in one of the other supported formats. + +When using this option, Sonobuoy will process files in the Sonobuoy result format and perform any necessary aggregation to produce a single report for your plugin. +How these results are aggregated depends on how many result files your plugin produces and whether or not the plugin is a `Job` or `DaemonSet` plugin. + +To use this feature, you must set the `result-type` to `manual` in the plugin definition. +When gathering the results files to aggregate, Sonobuoy will look for files listed in the `result-files` array entry in the plugin definition, or if no files are provided, it will look for a `sonobuoy_results.yaml` file in the results directory. +When using this mode, any files written to the results directory will still be available in the results tarball however only the plugin `result-files` or the `sonobuoy_results.yaml` file will be used when generating the results metadata. + +The following is an example of a plugin definition using manual results: + +```yaml +sonobuoy-config: + driver: Job + plugin-name: manual-results-plugin + result-format: manual + result-files: + - manual-results-1.yaml + - manual-results-2.yaml +spec: + command: + - ./run.sh + image: custom-image:latest + name: plugin + resources: {} + volumeMounts: + - mountPath: /tmp/results + name: results +``` + +### Manual results format + +The format for manual results is the same as the format used by Sonobuoy when writing its results metadata. +It is a tree-like recursive data structure, where child nodes are the same type as the parent node, allowing nesting of items. +The definition for this format can be found [here](https://github.com/vmware-tanzu/sonobuoy/blob/v0.18.0/pkg/client/results/processing.go#L91-L100). + +Each result `item` comprises: + + * `name`: string + * `status`: string + * `meta`: map of string to string + * `details`: map of string to interface{} + * `items`: array of `item` + +An example of this format is given below: + +```yaml +name: manual-results-plugin +status: custom-status +meta: + type: summary +items: +- name: Manual test suite + status: complete + items: + - name: Manual test + status: custom-status-1 + details: + stdout: "stdout from the test" + messages: + - message from the test + - another message + - name: Another manual test + status: custom-status-2 + details: + stderr: "stderr from the test" +``` + +The format is flexible, with no restrictions on the values used for each of the fields. + +### Manual results aggregation + +Sonobuoy will aggregate the results from any manual results files that it processes. +Like other plugins, it will aggregate all the results that it processes into a single results metadata file. + +Each manual result file processed by Sonobuoy will be collected to form the `items` entry in the aggregated results file. +In the case of a `DaemonSet` plugin, any manual result files will be grouped by the node from which they were retrieved. + +The aggregated `status` for a plugin will be based on the `status` reported within each manual result file. +In the case where the same status is found across all result files, that will be the reported status for the plugin. +Where a plugin produces multiple results files and multiple different statuses are reported, the aggregate `status` for the plugin will be the `status` from each file grouped by count into a single human readable format. +It will take the form of `status: count, status: count, ...`. +For `DaemonSet` plugins, where results files will be generated for each node, the status will be aggregated for each node in addition to the overall summary level. + +## Summary + + - `sonobuoy results` can show you results of a plugin without extracting the tarball + - Plugins are either `junit`, `gojson`, `raw` or `manual` type currently + - When viewing `junit` results, json data is dumped for each test + - When viewing `gojson` results, json data is dumped for each test + - When viewing `raw` results, file contents are dumped directly + - When viewing `manual` results, results are included as provided by the plugin + - Use the `--mode` flag to see either report, detail, or dump level data + - Use the `--node` flag to view results rooted at a different location + - Use the `--skip-prefix` flag to print only file output diff --git a/site/content/docs/v0.56.5/snapshot.md b/site/content/docs/v0.56.5/snapshot.md new file mode 100644 index 000000000..c904f1fad --- /dev/null +++ b/site/content/docs/v0.56.5/snapshot.md @@ -0,0 +1,131 @@ +# Sonobuoy Snapshot Layout + +- [Retrieving results](#retrieving-results) +- [Filename](#filename) +- [Contents](#contents) + - [/hosts](#hosts) + - [/meta](#meta) + - [/plugins](#plugins) + - [/podlogs](#podlogs) + - [/resources](#resources) + - [/servergroups.json](#servergroups.json) + - [/serverversion.json](#serverversionjson) + +This document describes retrieving the Sonobuoy results tarball, its layout, how it is formatted, and how data is named and laid out. + +## Retrieving results + +To view the output, copy the output directory from the aggregator Sonobuoy pod to +your local machine (and save the name of the file to a variable for reference): + +``` +output=$(sonobuoy retrieve) +``` + +The results of plugins can be inspected without being extracted. By default, it will give you a human-readable report about the tests but also has options to list detailed information and even print raw files generated by the plugin. See the [results page][results] for more details. + +``` +sonobuoy results $output [--plugin ] [--mode report|detailed|dump] +``` + +You can also extract the output locally so that you can view the other +information Sonobuoy gathered as well: + - detailed plugin results + - pod logs + - query results about the contents/state of your cluster + +``` +$ sonobuoy retrieve --extract + +OR + +$ output=$(sonobuoy retrieve --extract) +$ mkdir ./results; tar xzf $output -C ./results +``` + +## Filename + +A Sonobuoy snapshot is a gzipped tarball, named `YYYYmmDDHHMM_sonobuoy_.tar.gz`. + +where YYYYmmDDHHMM is a timestamp containing the year, month, day, hour, and minute of the run. The `` string is an RFC4122 UUID, consisting of lowercase hexadecimal characters and dashes (e.g. "dfe30ebc-f635-42f4-9608-8abcd6311916"). This UUID should match the UUID from the snapshot's [meta/config.json][1], stored at the root of the tarball. + +## Contents + +The top-level directories in the results tarball look like this: + +![tarball overview screenshot][3] + +### /hosts + +The `/hosts` directory contains the information gathered about each host in the system by directly querying their HTTP endpoints. +This is different from what you find in `/resources/cluster/Nodes.json` -- it contains items that aren't part of the Kubernetes API objects: + +- `/hosts//configz.json` - Contains the output of querying the `/configz` endpoint for this host -- that is, the component configuration for the host. +- `/hosts//healthz.json` - Contains a json-formatted representation of the result of querying `/healthz` for this host, for example `{"status":200}` + +This looks like the following: + +![tarball hosts screenshot][5] + +### /meta + +The `/meta` directory contains metadata about this Sonobuoy run, including configuration and query runtime. + +- `/meta/query-time.json` - Contains metadata about how long each query took, example: `{"queryobj":"Pods","time":12.345ms"}` +- `/meta/config.json` - A copy of the Sonobuoy configuration that was set up when this run was created, but with unspecified values filled in with explicit defaults, and with a `UUID` field in the root JSON, set to a randomly generated UUID created for that Sonobuoy run. + +This looks like the following: + +![tarball meta screenshot][8] + +### /plugins + +The `/plugins` directory contains output for each plugin selected for this Sonobuoy run: + +- `/plugins//results/` - For plugins that run on an arbitrary node to collect cluster-wide data, for example using the Job driver. Contains the results for the plugin. + +- `/plugins//results//` - For plugins that run once on every node to collect node-specific data, for example using the DaemonSet driver. Contains the results for the plugin, for each node. + +- `/plugins//sonobuoy_results.yaml` - A file generated by the server by post-processing the plugin results. This is the file that `sonobuoy results` relies on. + +This looks like the following: + +![tarball plugins screenshot][7] + +### /podlogs + +The `/podlogs` directory contains logs for each pod found during the Sonobuoy run, similar to what you get with `kubectl logs -n `. + +- `/podlogs///.log` - Contains the logs for each container, for each pod in each namespace. + +This looks like the following: + +![tarball podlogs screenshot][6] + +### /resources + +The `/resources` directory lists JSON-serialized Kubernetes objects, taken from querying the Kubernetes REST API. The directory has the following structure: + +- `/resources/ns//.json` - For all resources that belong to a namespace, where `` is the namespace of that resource (eg. `kube-system`), and `` is the type of resource, pluralized (eg. `Pods`). +- `/resources/cluster/.json` - For all resources that don't belong to a namespace, where `` is the type of resource, pluralized (eg. `Nodes`). + +This looks like the following: + +![tarball resources screenshot][4] + +### /servergroups.json + +`/servergroups.json` lists the Kubernetes APIs that the cluster supports. + +### /serverversion.json + +`/serverversion.json` contains the output from querying the server's version, including the major and minor version, git commit, etc. + +[1]: #meta +[3]: /img/snapshot-00-overview.png +[4]: /img/snapshot-10-resources.png +[5]: /img/snapshot-20-hosts.png +[6]: /img/snapshot-30-podlogs.png +[7]: /img/snapshot-40-plugins.png +[8]: /img/snapshot-50-meta.png +[results]: results.md diff --git a/site/content/docs/v0.56.5/sonobuoy-config.md b/site/content/docs/v0.56.5/sonobuoy-config.md new file mode 100644 index 000000000..dc4d2d1c7 --- /dev/null +++ b/site/content/docs/v0.56.5/sonobuoy-config.md @@ -0,0 +1,86 @@ +# Sonobuoy Config + +The commands "run" and "gen" both accept a parameter for a Sonobuoy config file which allows you to customize multiple aspects of the run. + +We've provided a command to generate the JSON file necessary so that it is easier to edit for your runs. Run the command: + +``` +sonobuoy gen config +``` + +and you will see the default configuration. Below is a description of each of the values. + +## General options + +`Description`: A string which provides consumers a way to add extra context to a configuration that may be in memory or saved to disk. Unused by Sonobuoy itself. + +`UUID`: A unique identifier used to identify the run of this configuration. Used in a few places including the name of the results file. + +`Namespace`: The namespace in which to run Sonobuoy. + +`WorkerImage`: The image for the Sonobuoy worker container which runs as a sidecar along the plugins. Responsible for reporting results back to the Sonobuoy aggregator. + +`ImagePullPolicy`: The image pull policy to set on the Sonobuoy worker sidecars as well as each of the plugins. + +`ResultsDir`: The location on the Sonobuoy aggregator where the results are placed. + +`Version`: The version of Sonobuoy which created the configuration file. + + +## Plugin options + +`Plugins`: An array of plugin selection objects of the plugins you want to run. When running custom plugins (or avoiding running a particular plugin) this value needs modified. + +`PluginSearchPath`: The aggregator pod looks for plugin configurations in these locations. You shouldn't need to edit this unless you are doing development work on the aggregator itself. + +## Query options + +`Resources`: A list of resources which Sonobuoy will query for in every namespace in which it runs queries. In the namespace in which Sonobuoy is running, `PodLogs`, `Events`, and `HorizontalPodAutoscalers` are also added. + +`Filters`: Options for filtering which resources queries should be run against. + + * `Namespace`: A regexp which specifies which namespaces to run queries against. + * `LabelSelector`: A Kubernetes [label selector][labelselector] which will be added to every query run. + +`Limits`: Options for limiting the scope of response. + + * `PodLogs`: limits the scope when getting logs from pods. The supported parameters are: + + * `Namespaces`: string + + * A regular expression for the targeted namespaces. + * Default is empty string + * To get logs from all namespaces use ".*" + * `SonobuoyNamespace`: bool + + * If set to true, get pod logs from the namespace Sonobuoy is running in. Can be set along with a `Namespaces` field or on its own. + * Default value is true + * `FieldSelectors`: []string + + * A list of field selectors, with OR logic. + For example, to get logs from two specified namespaces `FieldSelectors = ["metadata.namespace=default","metadata.namespace=heptio-sonobuoy"]` + * Each field selector contains one or more chained operators, with AND logic + For example, to get logs from a specified pod `FieldSelectors = ["metadata.namespace=default,metadata.name=pod1"]` + * Each field selector follows the same format as a [Kubernetes Field Selector][fieldselector]. + * Can be set along with the `Namespaces` or `SonobuoyNamespace` field or on its own. + * `LabelSelector`: string + + * Filters candidate pods by their labels, using the same format as a [Kubernetes Label Selector][labelselector]. + For example: `LabelSelector = "app=nginx,layer in (frontend, backend)"` + * When set together with other fields, the scope of pods is defined by: + ``` + (Namespaces OR SonobuoyNamespace OR FieldSelectors) AND LabelSelector + ``` + + * For each candidate pod, the format and size of logs are defined by other fields. These will be passed onto Kubernetes [PodLogOptions][podlogopts]: + * `Previous`: bool + * `SinceSeconds`: int + * `SinceTime`: string. RFC3339 format. + * `Timestamps`: bool + * `TailLines`: int + * `LimitBytes`: int + + +[fieldselector]: https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ +[labelselector]: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +[podlogopts]: https://godoc.org/k8s.io/api/core/v1#PodLogOptions diff --git a/site/content/docs/v0.56.5/sonobuoy-query.md b/site/content/docs/v0.56.5/sonobuoy-query.md new file mode 100644 index 000000000..09ab40cf4 --- /dev/null +++ b/site/content/docs/v0.56.5/sonobuoy-query.md @@ -0,0 +1,20 @@ +# `sonobuoy query` command + +## Intro + +`sonobuoy query` command runs queries against your cluster and helps in debugging by providing useful information about the cluster which helps us to find bugs and fix them before they can cause any severe harm to the cluster. These queries were run from the aggregator pod (made through `sonobuoy run`) in the past. Now you can run them locally without running Sonobuoy or launching pods. Just run sonobuoy query which is much faster and convenient. + +![sonobuoy query](img/sonobuoy-query-1.png) + +- The logs are generated via logrus (https://github.com/sirupsen/logrus), which contains three parts + `time, level, msg`. + +- `msg` provides information about the cluster which helps us to find bugs in our clusters and tackle them. + +- `level` specifies log level. We can specify it via the level flag and valid values are `{panic, fatal, error, warn, info, debug, trace}` and the default is `info`. + + ![sonobuoy query --level panic](img/sonobuoy-query-2.png) + +- By default, Sonobuoy uses creates a temporary directory in the current directory to store the query information. You can capture this directory for scripting (its the only value output to stdout) or you can specify your own directory via the `--output` flag. + +- While logging we can also specify **sonobuoy config file** in order to specify query configuration values. This allows you to tweak things like which namespaces to get pod logs for or what API resources to query. In most cases, the default is appropriate (gets logs for kube-system and queries all non-secret resources). diff --git a/site/content/docs/v0.56.5/strategy.md b/site/content/docs/v0.56.5/strategy.md new file mode 100644 index 000000000..52b02c2b5 --- /dev/null +++ b/site/content/docs/v0.56.5/strategy.md @@ -0,0 +1,36 @@ +# Sonobuoy Strategy + +## The problem +Users of Kubernetes value its ability to automate running complex systems at scale. However, little Kubernetes cluster validation is automated. As a result, skilled Kubernetes operators spend needless time performing manual tasks. Just as automated unit tests frontload the work of testing software by having the developer codify how to check that their code is functional, Kubernetes would benefit from automated test suites that allow skilled Kubernetes operators and developers to codify once how to test cluster capabilities and then have those checks run by whichever Kubernetes user needs this. + +Examples: + - Kubernetes experts still perform many manual tasks: + - A security auditor works with a Kubernetes expert to determine whether a Kubernetes cluster is CIS compliant. + - After a Kubernetes cluster is restored, the backup admin spends time running manual tests to confirm the cluster is functional. + - A support engineer manually checks different areas of functionality to diagnose an Kubernetes cluster issue while on a Zoom call with a customer. + - To install developer tooling, for example a cloud native runtime like Knative, a Kubernetes expert from the platform team is needed to validate that the user has the required installation privileges. + +Worse, these steps may be skipped, leading to wasted time, technical debt, or even risk of operational failure: + + - The CIS audit is skipped, leaving security vulnerabilties in the Kubernetes cluster. + - The backup admin doesn’t check the restored cluster and only realizes later through user-reported bugs that the container registry didn’t correctly re-attach to its S3 image store. + - The support engineer tries reading logs to diagnose the problem, but because of the complexity of the issue, the logs lead in the wrong direction. + - The platform operator installing the developer tooling (Cloud Native Runtime) doesn’t know how to check if the Kuberentes cluster has a load balancer, so they either spend significant extra time researching how to check this or skip the step, but then encounter myriad problems later when the tooling doesn’t install correctly. + +## Who would benefit from solving this problem? + - Users of these automated test suites reduce risk to the business, since alternatives are to use less reliable manual testing or skipping such testing altogether. Users also potentially save time, and thus money, that would have been spent in manual testing. + - Skilled Kubernetes operators can spend their time on innovative work that only they can do, rather than repetitive manual checks. This, of course can translate into increased revenue or decreased costs for their organization through prioritization of experts’ time. + - Organizations can more reliably get started with Kubernetes, given the existence of automated validation checks alongside their deployment and operation processes. In this way, they can slowly grow their platform teams to have Kuberentes knowledge, rather than having to make a big up front investment in operators skilled in Kubernetes. + +## The solution +A tool exists that solves this exact problem. Sonobuoy, well-known in the Kubernetes community as the CNCF-recommended way to run the Kubernetes conformance tests, has an underused pluggable architecture that allows users to build automated test suites and run them in Kubernetes clusters. Teams can use the Sonobuoy plugin skeleton to easily and quickly create customized test suites that suit their and other users’ needs. + +## Call to Action + +Please help us increase the number of Sonobuoy use cases! +The Sonobuoy team has started pairing with teams, to help them develop the suites they need. +While teams can indeed create the suites themselves, we want to understand users of the plugin skeleton so we can better encourage adoption. +We are learning about each teams' use cases and how to make it as easy as possible to create and run Sonobuoy customized test suites. +Once we have several functional, often-used test suites, we will explore ways to organically grow the number of teams and Kubernetes users using this Sonobuoy feature, such as promoting this Sonobuoy feature to the larger Kubernetes community. + +Be in touch if you are using Sonobuoy beyond conformance testing - we want to learn more. And we will do our best to help you if you are brainstorming a new, creative application of Sonobuoy's automated cluster validation. \ No newline at end of file diff --git a/site/data/docs/toc-mapping.yml b/site/data/docs/toc-mapping.yml index 619ebf771..baac85842 100644 --- a/site/data/docs/toc-mapping.yml +++ b/site/data/docs/toc-mapping.yml @@ -5,6 +5,7 @@ # Below is a commented out example of what this may look like: main: main-toc +v0.56.5: v0-56-5toc v0.56.4: v0-56-4toc v0.56.3: v0-56-3toc v0.56.2: v0-56-2toc diff --git a/site/data/docs/v0-56-5toc.yml b/site/data/docs/v0-56-5toc.yml new file mode 100644 index 000000000..6bb061bbb --- /dev/null +++ b/site/data/docs/v0-56-5toc.yml @@ -0,0 +1,32 @@ +toc: + - title: Basics + subfolderitems: + - page: Overview + url: /index.html + - page: Checking Results + url: /results + - title: Plugins + subfolderitems: + - page: Overview + url: /plugins + - page: E2E & Conformance + url: /e2eplugin + - page: Examples + url: https://github.com/vmware-tanzu/sonobuoy-plugins/ + absolute: true + - title: Advanced + subfolderitems: + - page: Detailed result contents + url: /snapshot + - page: Configuration Options + url: /sonobuoy-config + - page: Custom Registries & Airgap Testing + url: /airgap + - page: Using Private Images + url: /pullsecrets + - page: Advanced Customization + url: /gen + - title: Resources + subfolderitems: + - page: Frequently Asked Questions + url: /faq