diff --git a/helmcharts/bootstrapper/values.yaml b/helmcharts/bootstrapper/values.yaml index 2eb76fe5..c296369f 100644 --- a/helmcharts/bootstrapper/values.yaml +++ b/helmcharts/bootstrapper/values.yaml @@ -24,6 +24,7 @@ namespaces: - volume-autoscaler - web-console - config-api +- otel global: image: diff --git a/helmcharts/global-resource-values.yaml b/helmcharts/global-resource-values.yaml index 8f2f0bed..09a7ee2e 100644 --- a/helmcharts/global-resource-values.yaml +++ b/helmcharts/global-resource-values.yaml @@ -679,3 +679,12 @@ hms: requests: cpu: 100m memory: 1024Mi + +opentelemetry-collector: + resources: + limits: + cpu: 250m + memory: 512Mi + requests: + cpu: 250m + memory: 512Mi \ No newline at end of file diff --git a/helmcharts/global-values.yaml b/helmcharts/global-values.yaml index 0219cf2d..e06fa0a5 100644 --- a/helmcharts/global-values.yaml +++ b/helmcharts/global-values.yaml @@ -27,7 +27,8 @@ defaults: volume_autoscaler_namespace: &volume-autoscaler-namespace "volume-autoscaler" hms_namespace: &hms_namespace "hms" trino_namespace: &trino_namespace "trino" - + opentelemetry-collector: &opentelemetry-collector "otel" + postgres: pghost: &pghost "postgresql-hl.postgresql.svc.cluster.local" obsrv_username: &psql-obsrv-user "obsrv" @@ -204,6 +205,9 @@ druid: &druid port: 8888 supervisorEndpoint: "indexer/v1/supervisor" +opentelemetry-collector: + namespace: *opentelemetry-collector + redis-denorm: &redis_denorm namespace: *redis-namespace host: redis-denorm-headless.redis.svc.cluster.local diff --git a/helmcharts/images.yaml b/helmcharts/images.yaml index 23e8a2a4..3f21b7c2 100644 --- a/helmcharts/images.yaml +++ b/helmcharts/images.yaml @@ -337,6 +337,10 @@ images: &images repository: lakehouse-connector tag: 1.1-RC + opentelemetry-collector: &opentelemetry-collector + repository: otel/opentelemetry-collector-contrib + tag: "latest" + digest: "" # Public charts like bitnami etc # Modyfying these charts involves many changes # Hence we are keeping their original desgin @@ -537,6 +541,14 @@ internal: &internal image: <<: *lakehouse-connector + opentelemetry-collector: + image: + <<: *opentelemetry-collector + + opentelemetry-collector: + <<: *opentelemetry-collector + imagePullSecrets: *imagePullSecrets + ## Sourcing internal as root element, ## should need arise diff --git a/helmcharts/kitchen/install.sh b/helmcharts/kitchen/install.sh index d3b8d77e..f206653f 100755 --- a/helmcharts/kitchen/install.sh +++ b/helmcharts/kitchen/install.sh @@ -71,6 +71,13 @@ hudi) cp -rf ../services/{hms,trino,lakehouse-connector} hudi/charts/ helm $cmd hudi ./hudi -n obsrv -f global-resource-values.yaml -f global-values.yaml -f images.yaml -f $cloud_file_name --debug ;; +otel) + rm -rf opentelemetry-collector + cp -rf ../obsrv opentelemetry-collector + cp -rf ../services/opentelemetry-collector opentelemetry-collector/charts/ + helm $cmd opentelemetry-collector ./opentelemetry-collector -n obsrv -f global-resource-values.yaml -f global-values.yaml -f images.yaml -f $cloud_file_name --debug + ;; + obsrvtools) rm -rf obsrvtools cp -rf ../obsrv obsrvtools @@ -104,6 +111,7 @@ all) bash $0 hudi bash $0 obsrvtools bash $0 additional + bash $0 otel ;; reset) helm uninstall additional -n obsrv @@ -115,6 +123,7 @@ reset) helm uninstall migrations -n obsrv helm uninstall coredb -n obsrv helm uninstall obsrv-bootstrap -n obsrv + helm uninstall opentelemetry-collector -n obsrv ;; *) if [ ! -d "../services/$1" ]; then diff --git a/helmcharts/services/flink/values.yaml b/helmcharts/services/flink/values.yaml index 06b23438..a6f78078 100644 --- a/helmcharts/services/flink/values.yaml +++ b/helmcharts/services/flink/values.yaml @@ -352,6 +352,8 @@ baseconfig: | host = "{{ .Values.global.cassandra.host }}" port = "{{ .Values.global.cassandra.port }}" } + + otel.collector.endpoint="http://opentelemetry-collector.otel.svc.cluster.local:4317" diff --git a/helmcharts/services/opentelemetry-collector/.helmignore b/helmcharts/services/opentelemetry-collector/.helmignore new file mode 100644 index 00000000..7e03f4bc --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/.helmignore @@ -0,0 +1,27 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ + +# Ignore unittest +tests/ +*/__snapshot__/* diff --git a/helmcharts/services/opentelemetry-collector/CONTRIBUTING.md b/helmcharts/services/opentelemetry-collector/CONTRIBUTING.md new file mode 100644 index 00000000..aa0bf266 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/CONTRIBUTING.md @@ -0,0 +1,12 @@ +# Collector Chart Contributing Guide + +All changes to the chart require a bump to the version in `chart.yaml`. See the [Contributing Guide](https://github.com/open-telemetry/opentelemetry-helm-charts/blob/main/CONTRIBUTING.md#versioning) for our versioning requirements. + +Once the chart version is bumped, the examples must be regenerated. You can regenerate examples by running `make generate-examples CHARTS=opentelemetry-collector`. + +## Bumping Default Collector Version + +1. Increase the minor version of the chart by one and set the patch version to zero. +2. Update the chart's `appVersion` to match the new collector version. This version will be used as the image tag by default. +3. Review the corresponding release notes in [Collector Core](https://github.com/open-telemetry/opentelemetry-collector/releases), [Collector Contrib](https://github.com/open-telemetry/opentelemetry-collector-contrib/releases), and [Collector Releases](https://github.com/open-telemetry/opentelemetry-collector-releases/releases). If any changes affect the helm charts, adjust the helm chart accordingly. +4. Run `make generate-examples CHARTS=opentelemetry-collector`. diff --git a/helmcharts/services/opentelemetry-collector/Chart.yaml b/helmcharts/services/opentelemetry-collector/Chart.yaml new file mode 100644 index 00000000..a64abedd --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v2 +name: opentelemetry-collector +version: 0.109.0 +description: OpenTelemetry Collector Helm chart for Kubernetes +type: application +home: https://opentelemetry.io/ +sources: + - https://github.com/open-telemetry/opentelemetry-collector + - https://github.com/open-telemetry/opentelemetry-collector-contrib +maintainers: + - name: dmitryax + - name: jaronoff97 + - name: TylerHelmuth +icon: https://opentelemetry.io/img/logos/opentelemetry-logo-nav.png +appVersion: 0.113.0 diff --git a/helmcharts/services/opentelemetry-collector/README.md b/helmcharts/services/opentelemetry-collector/README.md new file mode 100644 index 00000000..a085a887 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/README.md @@ -0,0 +1,251 @@ +# OpenTelemetry Collector Helm Chart + +The helm chart installs [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector) +in kubernetes cluster. + +## Prerequisites + +- Kubernetes 1.24+ +- Helm 3.9+ + +## Installing the Chart + +Add OpenTelemetry Helm repository: + +```console +helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts +``` + +To install the chart with the release name my-opentelemetry-collector, run the following command: + +```console +helm install my-opentelemetry-collector open-telemetry/opentelemetry-collector --set mode= --set image.repository="otel/opentelemetry-collector-k8s" --set command.name="otelcol-k8s" +``` + +Where the `mode` value needs to be set to one of `daemonset`, `deployment` or `statefulset`. + +For an in-depth walk through getting started in Kubernetes using this helm chart, see [OpenTelemetry Kubernetes Getting Started](https://opentelemetry.io/docs/kubernetes/getting-started/). + +## Upgrading + +See [UPGRADING.md](UPGRADING.md). + +## Security Considerations + +OpenTelemetry Collector recommends to bind receivers' servers to addresses that limit connections to authorized users. +For this reason, by default the chart binds all the Collector's endpoints to the pod's IP. + +More info is available in the [Security Best Practices docummentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks) + +Some care must be taken when using `hostNetwork: true`, as then OpenTelemetry Collector will listen on all the addresses in the host network namespace. + +## Configuration + +### Default configuration + +By default this chart will deploy an OpenTelemetry Collector with three pipelines (logs, metrics and traces) +and debug exporter enabled by default. The collector can be installed either as daemonset (agent), deployment or stateful set. + +*Example*: Install collector as a deployment. + +```yaml +mode: deployment +``` + +By default collector has the following receivers enabled: + +- **metrics**: OTLP and prometheus. Prometheus is configured only for scraping collector's own metrics. +- **traces**: OTLP, zipkin and jaeger (thrift and grpc). +- **logs**: OTLP (to enable container logs, see [Configuration for Kubernetes container logs](#configuration-for-kubernetes-container-logs)). + +### Basic Top Level Configuration + +The Collector's configuration is set via the `config` section. Default components can be removed with `null`. Remember that lists in helm are not merged, so if you want to modify any default list you must specify all items, including any default items you want to keep. + +*Example*: Disable metrics and logs pipelines and non-otlp receivers: + +```yaml +config: + receivers: + jaeger: null + prometheus: null + zipkin: null + service: + pipelines: + traces: + receivers: + - otlp + metrics: null + logs: null +``` + +The chart also provides several presets, detailed below, to help configure important Kubernetes components. For more details on each component, see [Kubernetes Collector Components](https://opentelemetry.io/docs/kubernetes/collector/components/). + +### Configuration for Kubernetes Container Logs + +The collector can be used to collect logs sent to standard output by Kubernetes containers. +This feature is disabled by default. It has the following requirements: + +- It needs agent collector to be deployed. +- It requires the [Filelog receiver](https://opentelemetry.io/docs/kubernetes/collector/components/#filelog-receiver) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image. + +To enable this feature, set the `presets.logsCollection.enabled` property to `true`. +Here is an example `values.yaml`: + +```yaml +mode: daemonset + +presets: + logsCollection: + enabled: true + includeCollectorLogs: true +``` + +The way this feature works is it adds a `filelog` receiver on the `logs` pipeline. This receiver is preconfigured +to read the files where Kubernetes container runtime writes all containers' console output to. + +#### :warning: Warning: Risk of looping the exported logs back into the receiver, causing "log explosion" + +The container logs pipeline uses the `debug` exporter by default. +Paired with the default `filelog` receiver that receives all containers' console output, +it is easy to accidentally feed the exported logs back into the receiver. + +Also note that using the `--verbosity=detailed` option for the `debug` exporter causes it to output +multiple lines per single received log, which when looped, would amplify the logs exponentially. + +To prevent the looping, the default configuration of the receiver excludes logs from the collector's containers. + +If you want to include the collector's logs, make sure to replace the `debug` exporter +with an exporter that does not send logs to collector's standard output. + +Here's an example `values.yaml` file that replaces the default `debug` exporter on the `logs` pipeline +with an `otlphttp` exporter that sends the container logs to `https://example.com:55681` endpoint. +It also clears the `filelog` receiver's `exclude` property, for collector logs to be included in the pipeline. + +```yaml +mode: daemonset + +presets: + logsCollection: + enabled: true + includeCollectorLogs: true + +config: + exporters: + otlphttp: + endpoint: https://example.com:55681 + service: + pipelines: + logs: + exporters: + - otlphttp +``` + +### Configuration for Kubernetes Attributes Processor + +The collector can be configured to add Kubernetes metadata, such as pod name and namespace name, as resource attributes to incoming logs, metrics and traces. + +This feature is disabled by default. It has the following requirements: + +- It requires the [Kubernetes Attributes processor](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image. + +To enable this feature, set the `presets.kubernetesAttributes.enabled` property to `true`. +Here is an example `values.yaml`: + +```yaml +mode: daemonset +presets: + kubernetesAttributes: + enabled: true + # You can also configure the preset to add all of the associated pod's labels and annotations to you telemetry. + # The label/annotation name will become the resource attribute's key. + extractAllPodLabels: true + extractAllPodAnnotations: true +``` + +### Configuration for Retrieving Kubelet Metrics + +The collector can be configured to collect node, pod, and container metrics from the API server on a kubelet. + +This feature is disabled by default. It has the following requirements: + +- It requires the [Kubeletstats receiver](https://opentelemetry.io/docs/kubernetes/collector/components/#kubeletstats-receiver) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image. + +To enable this feature, set the `presets.kubeletMetrics.enabled` property to `true`. +Here is an example `values.yaml`: + +```yaml +mode: daemonset +presets: + kubeletMetrics: + enabled: true +``` + +### Configuration for Kubernetes Cluster Metrics + +The collector can be configured to collects cluster-level metrics from the Kubernetes API server. A single instance of this receiver can be used to monitor a cluster. + +This feature is disabled by default. It has the following requirements: + +- It requires the [Kubernetes Cluster receiver](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-cluster-receiver) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image. +- It requires statefulset or deployment mode with a single replica. + +To enable this feature, set the `presets.clusterMetrics.enabled` property to `true`. + +Here is an example `values.yaml`: + +```yaml +mode: deployment +replicaCount: 1 +presets: + clusterMetrics: + enabled: true +``` + +### Configuration for Retrieving Kubernetes Events + +The collector can be configured to collect Kubernetes events. + +This feature is disabled by default. It has the following requirements: + +- It requires [Kubernetes Objects receiver](https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-objects-receiver) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image. + +To enable this feature, set the `presets.kubernetesEvents.enabled` property to `true`. +Here is an example `values.yaml`: + +```yaml +mode: deployment +replicaCount: 1 +presets: + kubernetesEvents: + enabled: true +``` + +### Configuration for Host Metrics + +The collector can be configured to collect host metrics for Kubernetes nodes. + +This feature is disabled by default. It has the following requirements: + +- It requires [Host Metrics receiver](https://opentelemetry.io/docs/kubernetes/collector/components/#host-metrics-receiver) to be included in the collector, such as [k8s](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) version of the collector image. + +To enable this feature, set the `presets.hostMetrics.enabled` property to `true`. +Here is an example `values.yaml`: + +```yaml +mode: daemonset +presets: + hostMetrics: + enabled: true +``` + +## CRDs + +At this time, Prometheus CRDs are supported but other CRDs are not. + +### Other configuration options + +The [values.yaml](./values.yaml) file contains information about all other configuration +options for this chart. + +For more examples see [Examples](examples). diff --git a/helmcharts/services/opentelemetry-collector/UPGRADING.md b/helmcharts/services/opentelemetry-collector/UPGRADING.md new file mode 100644 index 00000000..83102433 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/UPGRADING.md @@ -0,0 +1,419 @@ +# Upgrade guidelines + +These upgrade guidelines only contain instructions for version upgrades which require manual modifications on the user's side. +If the version you want to upgrade to is not listed here, then there is nothing to do for you. +Just upgrade and enjoy. + +## 0.97.2 to 0.98.0 + +> [!WARNING] +> Critical content demanding immediate user attention due to potential risks. + +The deprecated memory ballast extension has been removed from the default config. If you depend on this component you must manually configure `config.extensions` and `config.service.extensions` to include the memory ballast extension. Setting `useGOMEMLIMIT` to `false` will no longer keep the memory ballast extension in the rendered collector config. + +## 0.88.0 to 0.89.0 + +> [!WARNING] +> Critical content demanding immediate user attention due to potential risks. + +As part of working towards using the [OpenTelemetry Collector Kubernetes Distro](https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s) by default, the chart now requires users to explicitly set an image repository. If you are already explicitly setting an image repository this breaking change does not affect you. + +If you are using a OpenTelemetry Community distribution of the Collector we recommend you use `otel/opentelemetry-collector-k8s`, but carefully review the [components included in this distribution](https://github.com/open-telemetry/opentelemetry-collector-releases/blob/main/distributions/otelcol-k8s/manifest.yaml) to make sure it includes all the components you use in your configuration. In the future this distribution will become the default image used for the chart. + +You can use the OpenTelemetry Collector Kubernetes Distro by adding these lines to your values.yaml: + +```yaml +image: + repository: "otel/opentelemetry-collector-k8s" +``` + +If you want to stick with using the Contrib distribution, add these lines to your values.yaml: + +```yaml +image: + repository: "otel/opentelemetry-collector-contrib" +``` + +For more details see [#1135](https://github.com/open-telemetry/opentelemetry-helm-charts/issues/1135). + +## 0.84.0 to 0.85.0 + +The `loggingexporter` has been removed from the default configuration. Use the `debugexporter` instead. + +## 0.78.2 to 0.78.3 + +[Update Health Check Extension's endpoints to use Pod IP Instead of 0.0.0.0](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/1012) + +The [Collector's security guidelines were updated](https://github.com/open-telemetry/opentelemetry-collector/pull/6959) to include containerized environments when discussing safeguards against denial of service attacks. +To be in compliance with the Collector's security best practices the chart has been updated to use the Collector's pod IP in place of `0.0.0.0`. + +The chart will continue to allow complete configuration of the Collector via the `config` field in the values.yaml. If pod IP does not suite your needs you can use `config` to set something different. + +See [Security Best Practices docummentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks) for more details. + +## 0.75.1 to 0.76.0 + +Enable the `useGOMEMLIMIT` feature flag by default. This means by default the chart now does not use the Memory Ballast Extension and any custom configuraiton applied to the Memory Ballast Extension is ignored. + +**If you're still interested in using the Memory Ballast Extension set this back to false.** + +## 0.69.3 to 0.70.0 + +The following deprecated fields have been removed. Please use the new values: + +- `extraConfigMapMounts` -> `extraVolumes` +- `extraHostPathMounts` -> `extraVolumes` +- `secretMounts` -> `extraVolumes` +- `containerLogs` -> `presets.logsCollection` + +## 0.69.0 to 0.69.1 & 0.69.2 + +The `loggingexporter` was replaced with the `debugexporter`. This ended up being an accidental breaking change for any user that depended on the default logging exporter config when explicitly listing the logging exporter in an exporter list. + +When using versions `0.69.1` or `0.69.2` you should explicitly list the debugging exporter instead of the logging exporter. You other option is to skip these version and use `0.69.3` or newer, which includes the logging exporter configuration. + +**The logging exporter will be removed in a future version.** We highly recommend switching to the debug exporter. + +## 0.67 to 0.68 + +The `preset.kubernetesEvents` preset now excludes `DELETED` watch types so that an log is not ingested when Kubernetes deletes an event. +The intention behind this change is to cleanup the data ingested by the preset as the `DELETED` updated for a Kubernetes Events is +uninteresting. If you want to keep ingesting `DELETED` updates for Kubernetes Events you will need to configure the `k8sobjectsreceiver` manually. + +## 0.62 to 0.63 + +The `kubernetesAttributes` preset now respects order of processors in logs, metrics and traces pipelines. +This implicitly might break your pipelines if you relied on having the `k8sAttributes` processor rendered as the first processor but also explicitly listed it in the signal's pipeline somewhere else. + +## 0.55.2 to 0.56 + +The `tpl` function has been added to references of pod labels and ingress hosts. This adds the ability to add some reusability in +charts values through referencing global values. If you are currently using any `{{ }}` syntax in pod labels or ingress hosts it will now be rendered. To escape existing instances of {{ }}, use {{` `}}. + +```yaml +global: + region: us-east-1 + environment: stage + +# Tests `tpl` function reference used in pod labels and +# ingress.hosts[*] +podLabels: + environment: "{{ .Values.global.environment }}" + +ingress: + enabled: true + hosts: + - host: "otlp-collector-{{ .Values.global.region }}-{{ .Values.global.environment }}-example.dev" + paths: + - path: / + pathType: Prefix + port: 4318 +``` + +Note that only global Helm values can be referenced as the Helm Chart schema currently does not allow `additionalValues`. + +## 0.55.0 to 0.55.1 + +As of v0.55.1 Collector chart use `${env:ENV}` style syntax when getting environment variables and that $`{env:ENV}` syntax is not supported before collector 0.71. If you upgrade collector chart to v0.55.1, you need to make sure your collector version is after than 0.71 (default is v0.76.1). + +## 0.53.1 to 0.54.0 + +As of v0.54.0 Collector chart, the default resource limits are removed. If you want to keep old values you can use the following configuration: + +``` +resources: + limits: + # CPU units are in fractions of 1000; memory in powers of 2 + cpu: 250m + memory: 512Mi +``` + +See [the 644 issue](https://github.com/open-telemetry/opentelemetry-helm-charts/issues/644) for more information. + +## 0.46.0 to 0.47.0 + +[Update Collector Endpoints to use Pod IP Instead of 0.0.0.0](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/603) + +The [Collector's security guidelines were updated](https://github.com/open-telemetry/opentelemetry-collector/pull/6959) to include containerized environments when discussing safeguards against denial of service attacks. +To be in compliance with the Collector's security best practices the chart has been updated to use the Collector's pod IP in place of `0.0.0.0`. + +The chart will continue to allow complete configuration of the Collector via the `config` field in the values.yaml. If pod IP does not suite your needs you can use `config` to set something different. + +See [Security Best Practices docummentation](https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/security-best-practices.md#safeguards-against-denial-of-service-attacks) for more details. + +The new default of binding to the pod IP, rather than `0.0.0.0`, will cause `kubectl port-forward` to fail. If port-forwarding is desired, the following `value.yaml` snippet will allow the Collector bind to `127.0.0.1` inside the pod, in addition to the pod's IP: + +```yaml +config: + receivers: + jaeger/local: + protocols: + grpc: + endpoint: 127.0.0.1:14250 + thrift_compact: + endpoint: 127.0.0.1:6831 + thrift_http: + endpoint: 127.0.0.1:14268 + otlp/local: + protocols: + grpc: + endpoint: 127.0.0.1:4317 + http: + endpoint: 127.0.0.1:4318 + zipkin/local: + endpoint: 127.0.0.1:9411 + service: + pipelines: + traces: + receivers: + - otlp + - otlp/local + - jaeger + - jaeger/local + - zipkin + - zipkin/local +``` + +## 0.40.7 to 0.41.0 + +[Require Kubernetes version 1.23 or later](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/541) + +If you enable use of a _HorizontalPodAutoscaler_ for the collector when running in the "deployment" mode by way of `.Values.autoscaling.enabled`, the manifest now uses the "autoscaling/v2" API group version, which [is available only as recently as Kubernetes version 1.23](https://kubernetes.io/blog/2021/12/07/kubernetes-1-23-release-announcement/#horizontalpodautoscaler-v2-graduates-to-ga). As [all previous versions of this API group are deprecated and removed as of Kubernetes version 1.26](https://kubernetes.io/docs/reference/using-api/deprecation-guide/#horizontalpodautoscaler-v126), we don't offer support for Kubernetes versions older than 1.23. + +## 0.34.0 to 0.34.0 + +[config supports templating](TBD) + +The chart now supports templating in `.Values.config`. If you are currently using any `{{ }}` syntax in `.Values.yaml` it will now be rendered. To escape existing instances of `{{ }}`, use ``` {{` `}} ```. For example, `{{ REDACTED_EMAIL }}` becomes ``` {{` {{ REDACTED_EMAIL }} `}} ```. + +## 0.28.0 to 0.29.0 + +[Reduce requested resources](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/273) + +Resource `limits` have been reduced. Upgrades/installs of chart 0.29.0 will now use fewer resources. In order to set the resources back to what they were, you will need to override the `resources` section in the `values.yaml`. + +*Example*: + +```yaml +resources: + limits: + cpu: 1 + memory: 2Gi +``` + +## 0.23.1 to 0.24.0 + +[Remove containerLogs in favor of presets.logsCollection]() + +The ability to enable logs collection from the collector has been moved from `containerLogs.enabled` to `presets.logsCollection.enabled`. If you are currently using `containerLogs.enabled`, you should instead use the preset: + +```yaml +presets: + logsCollection: + enabled: true +``` + +If you are using `containerLogs.enabled` and also enabling collection of the collector logs you can use `includeCollectorLogs` + +```yaml +presets: + logsCollection: + enabled: true + includeCollectorLogs: true +``` + +You no longer need to update `config.service.pipelines.logs` to include the filelog receiver yourself as the preset will automatically update the logs pipeline to include the filelog receiver. + +The filelog's preset configuration can modified by `config.receivers`, but preset configuration cannot be removed. If you need to remove any filelog receiver configuration generated by the preset you should not use the preset. Instead, configure the filelog receiver manually in `config.receivers` and set any other necessary fields in the values.yaml to modify k8s as needed. + +See the [daemonset-collector-logs example](https://github.com/open-telemetry/opentelemetry-helm-charts/tree/main/charts/opentelemetry-collector/examples/daemonset-collector-logs) to see an example of the preset in action. + +## 0.18.0 to 0.19.0 + +[Remove agentCollector and standaloneCollector settings](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/216) + +The `agentCollector` and `standaloneCollector` config sections have been removed. Upgrades/installs of chart 0.19.0 will fail if `agentCollector` or `standaloneCollector` are in the values.yaml. See the [Migrate to mode](#migrate-to-mode) steps for instructions on how to replace `agentCollector` and `standaloneCollector` with `mode`. + +## 0.13.0 to 0.14.0 + +[Remove two-deployment mode](https://github.com/open-telemetry/opentelemetry-helm-charts/pull/159) + +The ability to install both the agent and standalone collectors simultaneous with the chart has been removed. Installs/upgrades where both `.Values.agentCollector.enabled` and `.Values.standloneCollector.enables` are true will fail. `agentCollector` and `standloneCollector` have also be deprecated, but backward compatibility has been maintained. + +### To run both a deployment and daemonset + +Install a deployment version of the collector. This is done by setting `.Values.mode` to `deployment` + +```yaml +mode: deployment +``` + +Next, install an daemonset version of the collector that is configured to send traffic to the previously installed deployment. This is done by setting `.Values.mode` to `daemonset` and updating `.Values.config` so that data is exported to the deployment. + +```yaml +mode: daemonset + +config: + exporters: + otlp: + endpoint: example-opentelemetry-collector:4317 + tls: + insecure: true + service: + pipelines: + logs: + exporters: + - otlp + - logging + metrics: + exporters: + - otlp + - logging + traces: + exporters: + - otlp + - logging +``` + +See the [daemonset-and-deployment](examples/daemonset-and-deployment) example to see the rendered config. + +### Migrate to `mode`: + +The `agentCollector` and `standaloneCollector` sections in values.yaml have been deprecated. Instead there is a new field, `mode`, that determines if the collector is being installed as a daemonset or deployment. + +```yaml +# Valid values are "daemonset" and "deployment". +# If set, agentCollector and standaloneCollector are ignored. +mode: +``` + +The following fields have also been added to the root-level to replace the depracated `agentCollector` and `standaloneCollector` settings. + +```yaml +containerLogs: + enabled: false + +resources: + limits: + cpu: 1 + memory: 2Gi + +podAnnotations: {} + +podLabels: {} + +# Host networking requested for this pod. Use the host's network namespace. +hostNetwork: false + +# only used with deployment mode +replicaCount: 1 + +annotations: {} +``` + +When using `mode`, these settings should be used instead of their counterparts in `agentCollector` and `standaloneCollector`. + +Set `mode` to `daemonset` if `agentCollector` was being used. Move all `agentCollector` settings to the corresponding root-level setting. If `agentCollector.configOverride` was being used, merge the settings with `.Values.config`. + +Example agentCollector values.yaml: + +```yaml +agentCollector: + resources: + limits: + cpu: 3 + memory: 6Gi + configOverride: + receivers: + hostmetrics: + scrapers: + cpu: + disk: + filesystem: + service: + pipelines: + metrics: + receivers: [otlp, prometheus, hostmetrics] +``` + +Example mode values.yaml: + +```yaml +mode: daemonset + +resources: + limits: + cpu: 3 + memory: 6Gi + +config: + receivers: + hostmetrics: + scrapers: + cpu: + disk: + filesystem: + service: + pipelines: + metrics: + receivers: [otlp, prometheus, hostmetrics] +``` + +Set `mode` to `deployment` if `standaloneCollector` was being used. Move all `standaloneCollector` settings to the corresponding root-level setting. If `standaloneCollector.configOverride` was being used, merge the settings with `.Values.config`. + +Example standaloneCollector values.yaml: + +```yaml +standaloneCollector: + enabled: true + replicaCount: 2 + configOverride: + receivers: + podman_stats: + endpoint: unix://run/podman/podman.sock + timeout: 10s + collection_interval: 10s + service: + pipelines: + metrics: + receivers: [otlp, prometheus, podman_stats] +``` + +Example mode values.yaml: + +```yaml +mode: deployment + +replicaCount: 2 + +config: + receivers: + receivers: + podman_stats: + endpoint: unix://run/podman/podman.sock + timeout: 10s + collection_interval: 10s + service: + pipelines: + metrics: + receivers: [otlp, prometheus, podman_stats] +``` + +Default configuration in `.Values.config` can now be removed with `null`. When changing a pipeline, you must explicitly list all the components that are in the pipeline, including any default components. + +*Example*: Disable metrics and logging pipelines and non-otlp receivers: + +```yaml +config: + receivers: + jaeger: null + prometheus: null + zipkin: null + service: + pipelines: + traces: + receivers: + - otlp + metrics: null + logs: null +``` diff --git a/helmcharts/services/opentelemetry-collector/charts/common/.helmignore b/helmcharts/services/opentelemetry-collector/charts/common/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/helmcharts/services/opentelemetry-collector/charts/common/Chart.yaml b/helmcharts/services/opentelemetry-collector/charts/common/Chart.yaml new file mode 100644 index 00000000..cc1e9676 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/Chart.yaml @@ -0,0 +1,12 @@ +apiVersion: v2 +description: Library chart for Sunbird +keywords: +- common +- helper +- template +- function +maintainers: +- name: NimbusHub.in +name: common +type: library +version: 0.1.0 diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/_affinities.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/_affinities.tpl new file mode 100644 index 00000000..e85b1df4 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/_affinities.tpl @@ -0,0 +1,139 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 + {{- range $extraPodAffinityTerms }} + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: {{ .weight | default 1 -}} + {{- end -}} +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- range $extraPodAffinityTerms }} + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- end -}} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/_capabilities.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/_capabilities.tpl new file mode 100644 index 00000000..b1257397 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/_capabilities.tpl @@ -0,0 +1,229 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "common.capabilities.daemonset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Vertical Pod Autoscaler. +*/}} +{{- define "common.capabilities.vpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if PodSecurityPolicy is supported +*/}} +{{- define "common.capabilities.psp.supported" -}} +{{- if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if AdmissionConfiguration is supported +*/}} +{{- define "common.capabilities.admisionConfiguration.supported" -}} +{{- if semverCompare ">=1.23-0" (include "common.capabilities.kubeVersion" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for AdmissionConfiguration. +*/}} +{{- define "common.capabilities.admisionConfiguration.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiserver.config.k8s.io/v1alpha1" -}} +{{- else if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiserver.config.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiserver.config.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityConfiguration. +*/}} +{{- define "common.capabilities.podSecurityConfiguration.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "pod-security.admission.config.k8s.io/v1alpha1" -}} +{{- else if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "pod-security.admission.config.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "pod-security.admission.config.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/_configs.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/_configs.tpl new file mode 100644 index 00000000..e69de29b diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/_errors.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/_errors.tpl new file mode 100644 index 00000000..07ded6f6 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/_errors.tpl @@ -0,0 +1,28 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/_images.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/_images.tpl new file mode 100644 index 00000000..1bcb779d --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/_images.tpl @@ -0,0 +1,117 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- if $registryName }} + {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- else -}} + {{- printf "%s%s%s" $repositoryName $separator $termination -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- if kindIs "map" . -}} + {{- $pullSecrets = append $pullSecrets .name -}} + {{- else -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end }} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- if kindIs "map" . -}} + {{- $pullSecrets = append $pullSecrets .name -}} + {{- else -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- if kindIs "map" . -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" .name "context" $context)) -}} + {{- else -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- if kindIs "map" . -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" .name "context" $context)) -}} + {{- else -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper image version (ingores image revision/prerelease info & fallbacks to chart appVersion) +{{ include "common.images.version" ( dict "imageRoot" .Values.path.to.the.image "chart" .Chart ) }} +*/}} +{{- define "common.images.version" -}} +{{- $imageTag := .imageRoot.tag | toString -}} +{{/* regexp from https://github.com/Masterminds/semver/blob/23f51de38a0866c5ef0bfc42b3f735c73107b700/version.go#L41-L44 */}} +{{- if regexMatch `^([0-9]+)(\.[0-9]+)?(\.[0-9]+)?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$` $imageTag -}} + {{- $version := semver $imageTag -}} + {{- printf "%d.%d.%d" $version.Major $version.Minor $version.Patch -}} +{{- else -}} + {{- print .chart.AppVersion -}} +{{- end -}} +{{- end -}} + diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/_ingress.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/_ingress.tpl new file mode 100644 index 00000000..efa5b85c --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/_ingress.tpl @@ -0,0 +1,73 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/_labels.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/_labels.tpl new file mode 100644 index 00000000..d90a6cdc --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/_labels.tpl @@ -0,0 +1,46 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Kubernetes standard labels +{{ include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) -}} +*/}} +{{- define "common.labels.standard" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{- $default := dict "app.kubernetes.io/name" (include "common.names.name" .context) "helm.sh/chart" (include "common.names.chart" .context) "app.kubernetes.io/instance" .context.Release.Name "app.kubernetes.io/managed-by" .context.Release.Service -}} +{{- with .context.Chart.AppVersion -}} +{{- $_ := set $default "app.kubernetes.io/version" . -}} +{{- end -}} +{{ template "common.tplvalues.merge" (dict "values" (list .customLabels $default) "context" .context) }} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Chart.AppVersion }} +app.kubernetes.io/version: {{ . | quote }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Labels used on immutable fields such as deploy.spec.selector.matchLabels or svc.spec.selector +{{ include "common.labels.matchLabels" (dict "customLabels" .Values.podLabels "context" $) -}} + +We don't want to loop over custom labels appending them to the selector +since it's very likely that it will break deployments, services, etc. +However, it's important to overwrite the standard labels if the user +overwrote them on metadata.labels fields. +*/}} +{{- define "common.labels.matchLabels" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{ merge (pick (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) "app.kubernetes.io/name" "app.kubernetes.io/instance") (dict "app.kubernetes.io/name" (include "common.names.name" .context) "app.kubernetes.io/instance" .context.Release.Name ) | toYaml }} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/_names.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/_names.tpl new file mode 100644 index 00000000..a222924f --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/_names.tpl @@ -0,0 +1,71 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/_namespace.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/_namespace.tpl new file mode 100644 index 00000000..4016b1e4 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/_namespace.tpl @@ -0,0 +1,14 @@ +# .Values.namespace will get overridden by .Values.global.namespace.chart-name +{{- define "base.namespace" -}} + {{- $chartName := .Chart.Name }} + {{- $namespace := default .Release.Namespace .Values.namespace }} + {{- if .Values.global }} + {{- with .Values.global.namespace }} + {{- if hasKey . $chartName }} + {{- $namespace = index . $chartName }} + {{- end }} + {{- end }} + {{- end }} + {{- $namespace | trunc 63 | trimSuffix "-" }} +{{- end }} + diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/_secrets.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/_secrets.tpl new file mode 100644 index 00000000..a193c46b --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/_secrets.tpl @@ -0,0 +1,172 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + - failOnNew - Boolean - Optional - Default to true. If set to false, skip errors adding new keys to existing secrets. +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $failOnNew := default true .failOnNew }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else if $failOnNew }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else if .defaultValue -}} + {{- $value = .defaultValue | toString | b64enc -}} +{{- end -}} +{{- if $value -}} +{{- printf "%s" $value -}} +{{- end -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/_storage.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/_storage.tpl new file mode 100644 index 00000000..16405a0f --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/_storage.tpl @@ -0,0 +1,28 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/_tplvalues.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/_tplvalues.tpl new file mode 100644 index 00000000..a8ed7637 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,38 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template perhaps with scope if the scope is present. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }} +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }} +*/}} +{{- define "common.tplvalues.render" -}} +{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }} +{{- if contains "{{" (toJson .value) }} + {{- if .scope }} + {{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }} + {{- else }} + {{- tpl $value .context }} + {{- end }} +{{- else }} + {{- $value }} +{{- end }} +{{- end -}} + +{{/* +Merge a list of values that contains template after rendering them. +Merge precedence is consistent with http://masterminds.github.io/sprig/dicts.html#merge-mustmerge +Usage: +{{ include "common.tplvalues.merge" ( dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $ ) }} +*/}} +{{- define "common.tplvalues.merge" -}} +{{- $dst := dict -}} +{{- range .values -}} +{{- $dst = include "common.tplvalues.render" (dict "value" . "context" $.context "scope" $.scope) | fromYaml | merge $dst -}} +{{- end -}} +{{ $dst | toYaml }} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/_utils.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/_utils.tpl new file mode 100644 index 00000000..bfbddf05 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/_utils.tpl @@ -0,0 +1,77 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Checksum a template at "path" containing a *single* resource (ConfigMap,Secret) for use in pod annotations, excluding the metadata (see #18376). +Usage: +{{ include "common.utils.checksumTemplate" (dict "path" "/configmap.yaml" "context" $) }} +*/}} +{{- define "common.utils.checksumTemplate" -}} +{{- $obj := include (print .context.Template.BasePath .path) .context | fromYaml -}} +{{ omit $obj "apiVersion" "kind" "metadata" | toYaml | sha256sum }} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/_variables.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/_variables.tpl new file mode 100644 index 00000000..e5572991 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/_variables.tpl @@ -0,0 +1,41 @@ +{{/* Define findkey function */}} +{{- define "common.variables.findkey" -}} +{{- $value := .value -}} +{{- $keys := .keys -}} +{{- $found := true -}} + +{{- range $key := $keys -}} + {{- if kindIs "map" $value -}} + {{- if hasKey $value $key -}} + {{- $value = index $value $key -}} + {{- else -}} + {{- $found = false -}} + {{- break -}} + {{- end -}} + {{- else -}} + {{- $found = false -}} + {{- break -}} + {{- end -}} +{{- end -}} + +{{- if $found -}} + {{- $value -}} +{{- else -}} + {{- "" -}} +{{- end -}} +{{- end -}} + +{{/* Define getmethekey function with precedence and input as a dot-delimited string using findkey */}} +{{- define "common.variables.variableGlobal" -}} +{{- $root := first . -}} +{{- $keyString := index (rest .) 0 -}} +{{- $keys := splitList "." $keyString -}} + +{{- $value := include "common.variables.findkey" (dict "value" $root.Values "keys" $keys) -}} + +{{- if (not $value) -}} + {{- $value = include "common.variables.findkey" (dict "value" $root.Values.global "keys" $keys) -}} +{{- end -}} + +{{- $value -}} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/_warnings.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/_warnings.tpl new file mode 100644 index 00000000..66dffc1f --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/_warnings.tpl @@ -0,0 +1,19 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_cassandra.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 00000000..eda9aada --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,77 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_mariadb.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 00000000..17d83a2f --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_mongodb.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 00000000..bbb445b8 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,113 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_mysql.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 00000000..ca3953f8 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_postgresql.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 00000000..8c9aa570 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,134 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_redis.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_redis.tpl new file mode 100644 index 00000000..fc0d208d --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,81 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_validations.tpl b/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_validations.tpl new file mode 100644 index 00000000..31ceda87 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,51 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/helmcharts/services/opentelemetry-collector/charts/common/values.yaml b/helmcharts/services/opentelemetry-collector/charts/common/values.yaml new file mode 100644 index 00000000..c83b33f2 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/charts/common/values.yaml @@ -0,0 +1,82 @@ +# Default values for common. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/helmcharts/services/opentelemetry-collector/ci/GOMEMLIMIT-values.yaml b/helmcharts/services/opentelemetry-collector/ci/GOMEMLIMIT-values.yaml new file mode 100644 index 00000000..cc0570fe --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/GOMEMLIMIT-values.yaml @@ -0,0 +1,9 @@ +mode: deployment + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +useGOMEMLIMIT: true diff --git a/helmcharts/services/opentelemetry-collector/ci/clusterrole-values.yaml b/helmcharts/services/opentelemetry-collector/ci/clusterrole-values.yaml new file mode 100644 index 00000000..01acd446 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/clusterrole-values.yaml @@ -0,0 +1,27 @@ +mode: daemonset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +clusterRole: + create: true + name: "testing-clusterrole" + rules: + - apiGroups: + - '' + resources: + - 'pods' + - 'nodes' + verbs: + - 'get' + - 'list' + - 'watch' + clusterRoleBinding: + name: "testing-clusterrolebinding" +resources: + limits: + cpu: 100m + memory: 200M diff --git a/helmcharts/services/opentelemetry-collector/ci/config-override-values.yaml b/helmcharts/services/opentelemetry-collector/ci/config-override-values.yaml new file mode 100644 index 00000000..577cf4d2 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/config-override-values.yaml @@ -0,0 +1,26 @@ +mode: daemonset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +config: + receivers: + jaeger: null + otlp: null + zipkin: null + hostmetrics: + scrapers: + cpu: + disk: + filesystem: + service: + pipelines: + metrics: + receivers: + - prometheus + - hostmetrics + traces: null + logs: null diff --git a/helmcharts/services/opentelemetry-collector/ci/daemonset-values.yaml b/helmcharts/services/opentelemetry-collector/ci/daemonset-values.yaml new file mode 100644 index 00000000..0f1d6190 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/daemonset-values.yaml @@ -0,0 +1,12 @@ +mode: daemonset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +resources: + limits: + cpu: 100m + memory: 200M diff --git a/helmcharts/services/opentelemetry-collector/ci/deployment-values.yaml b/helmcharts/services/opentelemetry-collector/ci/deployment-values.yaml new file mode 100644 index 00000000..447a3f13 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/deployment-values.yaml @@ -0,0 +1,29 @@ +global: + test: templated-value + +mode: deployment + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +resources: + limits: + cpu: 100m + memory: 200M + +# Tests `tpl` function reference used in pod labels and +# ingress.hosts[*] +podLabels: + testLabel: "{{ .Values.global.test }}" + +ingress: + enabled: true + hosts: + - host: "{{ .Values.global.test }}" + paths: + - path: / + pathType: Prefix + port: 4318 diff --git a/helmcharts/services/opentelemetry-collector/ci/disabling-protocols-values.yaml b/helmcharts/services/opentelemetry-collector/ci/disabling-protocols-values.yaml new file mode 100644 index 00000000..b7a67bdf --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/disabling-protocols-values.yaml @@ -0,0 +1,21 @@ +mode: deployment + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +ports: + jaeger-compact: + enabled: false + jaeger-thrift: + enabled: false + jaeger-grpc: + enabled: false + zipkin: + enabled: false +resources: + limits: + cpu: 100m + memory: 200M diff --git a/helmcharts/services/opentelemetry-collector/ci/hpa-deployment-values.yaml b/helmcharts/services/opentelemetry-collector/ci/hpa-deployment-values.yaml new file mode 100644 index 00000000..3fe38538 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/hpa-deployment-values.yaml @@ -0,0 +1,15 @@ +mode: deployment + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 10 + behavior: {} + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 diff --git a/helmcharts/services/opentelemetry-collector/ci/hpa-statefulset-values.yaml b/helmcharts/services/opentelemetry-collector/ci/hpa-statefulset-values.yaml new file mode 100644 index 00000000..cbf3c605 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/hpa-statefulset-values.yaml @@ -0,0 +1,15 @@ +mode: statefulset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 10 + behavior: {} + targetCPUUtilizationPercentage: 80 + targetMemoryUtilizationPercentage: 80 diff --git a/helmcharts/services/opentelemetry-collector/ci/multiple-ingress-values.yaml b/helmcharts/services/opentelemetry-collector/ci/multiple-ingress-values.yaml new file mode 100644 index 00000000..4b9df752 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/multiple-ingress-values.yaml @@ -0,0 +1,49 @@ +mode: deployment + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +resources: + limits: + cpu: 100m + memory: 200M + +ingress: + enabled: true + + ingressClassName: nginx + annotations: + test.io/collector: default + hosts: + - host: defaultcollector.example.com + paths: + - path: / + pathType: Prefix + port: 4318 + + additionalIngresses: + - name: additional-basic + hosts: + - host: additional-basic.example.com + paths: + - path: / + pathType: Prefix + port: 4318 + + - name: additional-advanced + ingressClassName: nginx + annotations: + test.io/ingress: additional-advanced + hosts: + - host: additional-advanced.example.com + paths: + - path: / + pathType: Exact + port: 4318 + tls: + - secretName: somesecret + hosts: + - additional-advanced.example.com diff --git a/helmcharts/services/opentelemetry-collector/ci/networkpolicy-override-values.yaml b/helmcharts/services/opentelemetry-collector/ci/networkpolicy-override-values.yaml new file mode 100644 index 00000000..d08d368c --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/networkpolicy-override-values.yaml @@ -0,0 +1,37 @@ +mode: daemonset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +resources: + limits: + cpu: 100m + memory: 200M + +networkPolicy: + enabled: true + + allowIngressFrom: + - namespaceSelector: {} + - ipBlock: + cidr: 127.0.0.1/32 + + extraIngressRules: + - ports: + - port: metrics + protocol: TCP + from: + - ipBlock: + cidr: 127.0.0.1/32 + + egressRules: + - to: + - podSelector: + matchLabels: + app: jaeger + ports: + - port: 4317 + protocol: TCP diff --git a/helmcharts/services/opentelemetry-collector/ci/networkpolicy-values.yaml b/helmcharts/services/opentelemetry-collector/ci/networkpolicy-values.yaml new file mode 100644 index 00000000..7e80cb47 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/networkpolicy-values.yaml @@ -0,0 +1,15 @@ +mode: deployment + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +resources: + limits: + cpu: 100m + memory: 200M + +networkPolicy: + enabled: true diff --git a/helmcharts/services/opentelemetry-collector/ci/preset-clustermetrics-values.yaml b/helmcharts/services/opentelemetry-collector/ci/preset-clustermetrics-values.yaml new file mode 100644 index 00000000..8c21aeb3 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/preset-clustermetrics-values.yaml @@ -0,0 +1,16 @@ +mode: deployment + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +presets: + clusterMetrics: + enabled: true + +resources: + limits: + cpu: 100m + memory: 200M diff --git a/helmcharts/services/opentelemetry-collector/ci/preset-hostmetrics-values.yaml b/helmcharts/services/opentelemetry-collector/ci/preset-hostmetrics-values.yaml new file mode 100644 index 00000000..4889c014 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/preset-hostmetrics-values.yaml @@ -0,0 +1,16 @@ +mode: daemonset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +presets: + hostMetrics: + enabled: true + +resources: + limits: + cpu: 100m + memory: 200M diff --git a/helmcharts/services/opentelemetry-collector/ci/preset-k8sevents-values.yaml b/helmcharts/services/opentelemetry-collector/ci/preset-k8sevents-values.yaml new file mode 100644 index 00000000..65782919 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/preset-k8sevents-values.yaml @@ -0,0 +1,16 @@ +mode: deployment + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +presets: + kubernetesEvents: + enabled: true + +resources: + limits: + cpu: 100m + memory: 200M diff --git a/helmcharts/services/opentelemetry-collector/ci/preset-kubeletmetrics-values.yaml b/helmcharts/services/opentelemetry-collector/ci/preset-kubeletmetrics-values.yaml new file mode 100644 index 00000000..83dd9cab --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/preset-kubeletmetrics-values.yaml @@ -0,0 +1,16 @@ +mode: daemonset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +presets: + kubeletMetrics: + enabled: true + +resources: + limits: + cpu: 100m + memory: 200M diff --git a/helmcharts/services/opentelemetry-collector/ci/preset-kubernetesattributes-values.yaml b/helmcharts/services/opentelemetry-collector/ci/preset-kubernetesattributes-values.yaml new file mode 100644 index 00000000..ef32ac98 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/preset-kubernetesattributes-values.yaml @@ -0,0 +1,16 @@ +mode: daemonset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +presets: + kubernetesAttributes: + enabled: true + +resources: + limits: + cpu: 100m + memory: 200M diff --git a/helmcharts/services/opentelemetry-collector/ci/preset-logscollection-values.yaml b/helmcharts/services/opentelemetry-collector/ci/preset-logscollection-values.yaml new file mode 100644 index 00000000..1dd9a8e5 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/preset-logscollection-values.yaml @@ -0,0 +1,17 @@ +mode: daemonset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +presets: + logsCollection: + enabled: true + includeCollectorLogs: true + +resources: + limits: + cpu: 100m + memory: 200M diff --git a/helmcharts/services/opentelemetry-collector/ci/probes-values.yaml b/helmcharts/services/opentelemetry-collector/ci/probes-values.yaml new file mode 100644 index 00000000..60600dc1 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/probes-values.yaml @@ -0,0 +1,37 @@ +mode: daemonset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +livenessProbe: + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 2 + terminationGracePeriodSeconds: 40 + httpGet: + port: 8989 + path: /healthz + +readinessProbe: + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + successThreshold: 2 + failureThreshold: 2 + httpGet: + port: 8989 + path: /healthz + +startupProbe: + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 10 + terminationGracePeriodSeconds: 40 + httpGet: + port: 8989 + path: /healthz diff --git a/helmcharts/services/opentelemetry-collector/ci/statefulset-values.yaml b/helmcharts/services/opentelemetry-collector/ci/statefulset-values.yaml new file mode 100644 index 00000000..76de30c7 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/ci/statefulset-values.yaml @@ -0,0 +1,13 @@ +mode: statefulset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +replicaCount: 2 +resources: + limits: + cpu: 100m + memory: 200M diff --git a/helmcharts/services/opentelemetry-collector/examples/README.md b/helmcharts/services/opentelemetry-collector/examples/README.md new file mode 100644 index 00000000..8f2ac4c2 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/README.md @@ -0,0 +1,17 @@ +# Examples of chart configuration + +Here is a collection of common configurations for the OpenTelemetry collector. Each folder contains an example `values.yaml` and the resulting configurations that are generated by the opentelemetry-collector helm charts. + +- [Daemonset only](daemonset-only) +- [Deployment only](deployment-only) +- [Daemonset and deployment](daemonset-and-deployment) +- [Log collection, including collector logs](daemonset-collector-logs) +- [Add component (hostmetrics)](daemonset-hostmetrics) + +The manifests are rendered using the `helm template` command and the specific example folder's values.yaml. + +Examples are generated by (from root of the repo): + +```sh +make generate-examples CHARTS=opentelemetry-collector +``` diff --git a/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/clusterrole.yaml b/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/clusterrole.yaml new file mode 100644 index 00000000..9bcd686b --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/clusterrole.yaml @@ -0,0 +1,41 @@ +--- +# Source: opentelemetry-collector/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: example-opentelemetry-collector + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +rules: + - apiGroups: [""] + resources: ["pods", "namespaces"] + verbs: ["get", "watch", "list"] + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events", "namespaces", "namespaces/status", "nodes", "nodes/spec", "pods", "pods/status", "replicationcontrollers", "replicationcontrollers/status", "resourcequotas", "services" ] + verbs: ["get", "list", "watch"] + - apiGroups: ["apps"] + resources: ["daemonsets", "deployments", "replicasets", "statefulsets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["daemonsets", "deployments", "replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["batch"] + resources: ["jobs", "cronjobs"] + verbs: ["get", "list", "watch"] + - apiGroups: ["autoscaling"] + resources: ["horizontalpodautoscalers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["events.k8s.io"] + resources: ["events"] + verbs: ["watch", "list"] diff --git a/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/clusterrolebinding.yaml b/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/clusterrolebinding.yaml new file mode 100644 index 00000000..e817448c --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/clusterrolebinding.yaml @@ -0,0 +1,21 @@ +--- +# Source: opentelemetry-collector/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: example-opentelemetry-collector + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: example-opentelemetry-collector +subjects: +- kind: ServiceAccount + name: example-opentelemetry-collector + namespace: default diff --git a/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/configmap.yaml b/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/configmap.yaml new file mode 100644 index 00000000..c40de890 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/configmap.yaml @@ -0,0 +1,68 @@ +--- +# Source: opentelemetry-collector/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +data: + relay: | + exporters: + debug: {} + processors: + k8sattributes: + extract: + metadata: + - k8s.namespace.name + - k8s.deployment.name + - k8s.statefulset.name + - k8s.daemonset.name + - k8s.cronjob.name + - k8s.job.name + - k8s.node.name + - k8s.pod.name + - k8s.pod.uid + - k8s.pod.start_time + passthrough: false + pod_association: + - sources: + - from: resource_attribute + name: k8s.pod.ip + - sources: + - from: resource_attribute + name: k8s.pod.uid + - sources: + - from: connection + receivers: + k8s_cluster: + collection_interval: 10s + k8sobjects: + objects: + - exclude_watch_type: + - DELETED + group: events.k8s.io + mode: watch + name: events + service: + pipelines: + logs: + exporters: + - debug + processors: + - k8sattributes + receivers: + - k8sobjects + metrics: + exporters: + - debug + processors: + - k8sattributes + receivers: + - k8s_cluster diff --git a/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/deployment.yaml b/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/deployment.yaml new file mode 100644 index 00000000..bb9ce81e --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/deployment.yaml @@ -0,0 +1,100 @@ +--- +# Source: opentelemetry-collector/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + strategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 05adf31e71768d6f4a0a72f54616072733e1605d80713238ee85456aafe75403 + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-k8s + args: + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-k8s:0.113.0" + imagePullPolicy: IfNotPresent + ports: + + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + - name: otlp + containerPort: 4317 + protocol: TCP + - name: otlp-http + containerPort: 4318 + protocol: TCP + - name: zipkin + containerPort: 9411 + protocol: TCP + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: GOMEMLIMIT + value: "3276MiB" + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + resources: + limits: + cpu: 2 + memory: 4Gi + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector + items: + - key: relay + path: relay.yaml + hostNetwork: false diff --git a/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/service.yaml b/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/service.yaml new file mode 100644 index 00000000..9c2d47c5 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/service.yaml @@ -0,0 +1,49 @@ +--- +# Source: opentelemetry-collector/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + + component: standalone-collector +spec: + type: ClusterIP + ports: + + - name: jaeger-compact + port: 6831 + targetPort: 6831 + protocol: UDP + - name: jaeger-grpc + port: 14250 + targetPort: 14250 + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: 14268 + protocol: TCP + - name: otlp + port: 4317 + targetPort: 4317 + protocol: TCP + appProtocol: grpc + - name: otlp-http + port: 4318 + targetPort: 4318 + protocol: TCP + - name: zipkin + port: 9411 + targetPort: 9411 + protocol: TCP + selector: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + internalTrafficPolicy: Cluster diff --git a/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/serviceaccount.yaml b/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/serviceaccount.yaml new file mode 100644 index 00000000..5815b748 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/alternate-config/rendered/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm diff --git a/helmcharts/services/opentelemetry-collector/examples/alternate-config/values.yaml b/helmcharts/services/opentelemetry-collector/examples/alternate-config/values.yaml new file mode 100644 index 00000000..e0f6a5fd --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/alternate-config/values.yaml @@ -0,0 +1,34 @@ +mode: deployment + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +replicaCount: 1 + +resources: + limits: + cpu: 2 + memory: 4Gi + +presets: + clusterMetrics: + enabled: true + kubernetesAttributes: + enabled: true + kubernetesEvents: + enabled: true + +alternateConfig: + exporters: + debug: {} + service: + pipelines: + logs: + exporters: + - debug + metrics: + exporters: + - debug diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/daemonset-values.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/daemonset-values.yaml new file mode 100644 index 00000000..5b21b7bb --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/daemonset-values.yaml @@ -0,0 +1,34 @@ +mode: daemonset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +config: + exporters: + otlp: + endpoint: example-opentelemetry-collector:4317 + tls: + insecure: true + service: + pipelines: + logs: + exporters: + - otlp + - debug + metrics: + exporters: + - otlp + - debug + traces: + exporters: + - otlp + - debug + +resources: + limits: + cpu: 100m + memory: 200M + diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/deployment-values.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/deployment-values.yaml new file mode 100644 index 00000000..1c80d158 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/deployment-values.yaml @@ -0,0 +1,13 @@ +mode: deployment + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +resources: + limits: + cpu: 100m + memory: 200M + diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap-agent.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap-agent.yaml new file mode 100644 index 00000000..a5bbb304 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap-agent.yaml @@ -0,0 +1,93 @@ +--- +# Source: opentelemetry-collector/templates/configmap-agent.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector-agent + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +data: + relay: | + exporters: + debug: {} + otlp: + endpoint: example-opentelemetry-collector:4317 + tls: + insecure: true + extensions: + health_check: + endpoint: ${env:MY_POD_IP}:13133 + processors: + batch: {} + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + receivers: + jaeger: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:14250 + thrift_compact: + endpoint: ${env:MY_POD_IP}:6831 + thrift_http: + endpoint: ${env:MY_POD_IP}:14268 + otlp: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 + prometheus: + config: + scrape_configs: + - job_name: opentelemetry-collector + scrape_interval: 10s + static_configs: + - targets: + - ${env:MY_POD_IP}:8888 + zipkin: + endpoint: ${env:MY_POD_IP}:9411 + service: + extensions: + - health_check + pipelines: + logs: + exporters: + - otlp + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + metrics: + exporters: + - otlp + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - prometheus + traces: + exporters: + - otlp + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - jaeger + - zipkin + telemetry: + metrics: + address: ${env:MY_POD_IP}:8888 diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap.yaml new file mode 100644 index 00000000..78d98843 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/configmap.yaml @@ -0,0 +1,86 @@ +--- +# Source: opentelemetry-collector/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +data: + relay: | + exporters: + debug: {} + extensions: + health_check: + endpoint: ${env:MY_POD_IP}:13133 + processors: + batch: {} + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + receivers: + jaeger: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:14250 + thrift_compact: + endpoint: ${env:MY_POD_IP}:6831 + thrift_http: + endpoint: ${env:MY_POD_IP}:14268 + otlp: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 + prometheus: + config: + scrape_configs: + - job_name: opentelemetry-collector + scrape_interval: 10s + static_configs: + - targets: + - ${env:MY_POD_IP}:8888 + zipkin: + endpoint: ${env:MY_POD_IP}:9411 + service: + extensions: + - health_check + pipelines: + logs: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + metrics: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - prometheus + traces: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - jaeger + - zipkin + telemetry: + metrics: + address: ${env:MY_POD_IP}:8888 diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/daemonset.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/daemonset.yaml new file mode 100644 index 00000000..82471b3f --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/daemonset.yaml @@ -0,0 +1,104 @@ +--- +# Source: opentelemetry-collector/templates/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: example-opentelemetry-collector-agent + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +spec: + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: agent-collector + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 45bec5a8c8f33cc8d7481ff6d7c0dd4235104c3257bb9c0761f62a157d70c97a + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: agent-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-k8s + args: + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-k8s:0.113.0" + imagePullPolicy: IfNotPresent + ports: + + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + hostPort: 6831 + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + hostPort: 14250 + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + hostPort: 14268 + - name: otlp + containerPort: 4317 + protocol: TCP + hostPort: 4317 + - name: otlp-http + containerPort: 4318 + protocol: TCP + hostPort: 4318 + - name: zipkin + containerPort: 9411 + protocol: TCP + hostPort: 9411 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: GOMEMLIMIT + value: "152MiB" + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + resources: + limits: + cpu: 100m + memory: 200M + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector-agent + items: + - key: relay + path: relay.yaml + hostNetwork: false diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/deployment.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/deployment.yaml new file mode 100644 index 00000000..ab79539a --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/deployment.yaml @@ -0,0 +1,100 @@ +--- +# Source: opentelemetry-collector/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + strategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 3ece6cc294f0ed92854413f33366d43d4b73621c702b50e6564565795cb9fbd0 + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-k8s + args: + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-k8s:0.113.0" + imagePullPolicy: IfNotPresent + ports: + + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + - name: otlp + containerPort: 4317 + protocol: TCP + - name: otlp-http + containerPort: 4318 + protocol: TCP + - name: zipkin + containerPort: 9411 + protocol: TCP + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: GOMEMLIMIT + value: "152MiB" + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + resources: + limits: + cpu: 100m + memory: 200M + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector + items: + - key: relay + path: relay.yaml + hostNetwork: false diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/service.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/service.yaml new file mode 100644 index 00000000..9c2d47c5 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/service.yaml @@ -0,0 +1,49 @@ +--- +# Source: opentelemetry-collector/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + + component: standalone-collector +spec: + type: ClusterIP + ports: + + - name: jaeger-compact + port: 6831 + targetPort: 6831 + protocol: UDP + - name: jaeger-grpc + port: 14250 + targetPort: 14250 + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: 14268 + protocol: TCP + - name: otlp + port: 4317 + targetPort: 4317 + protocol: TCP + appProtocol: grpc + - name: otlp-http + port: 4318 + targetPort: 4318 + protocol: TCP + - name: zipkin + port: 9411 + targetPort: 9411 + protocol: TCP + selector: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + internalTrafficPolicy: Cluster diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/serviceaccount.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/serviceaccount.yaml new file mode 100644 index 00000000..5815b748 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-and-deployment/rendered/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-collector-logs/rendered/configmap-agent.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-collector-logs/rendered/configmap-agent.yaml new file mode 100644 index 00000000..e7c3a282 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-collector-logs/rendered/configmap-agent.yaml @@ -0,0 +1,100 @@ +--- +# Source: opentelemetry-collector/templates/configmap-agent.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector-agent + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +data: + relay: | + exporters: + debug: {} + extensions: + health_check: + endpoint: ${env:MY_POD_IP}:13133 + processors: + batch: {} + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + receivers: + filelog: + exclude: [] + include: + - /var/log/pods/*/*/*.log + include_file_name: false + include_file_path: true + operators: + - id: container-parser + max_log_size: 102400 + type: container + retry_on_failure: + enabled: true + start_at: end + jaeger: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:14250 + thrift_compact: + endpoint: ${env:MY_POD_IP}:6831 + thrift_http: + endpoint: ${env:MY_POD_IP}:14268 + otlp: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 + prometheus: + config: + scrape_configs: + - job_name: opentelemetry-collector + scrape_interval: 10s + static_configs: + - targets: + - ${env:MY_POD_IP}:8888 + zipkin: + endpoint: ${env:MY_POD_IP}:9411 + service: + extensions: + - health_check + pipelines: + logs: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - filelog + metrics: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - prometheus + traces: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - jaeger + - zipkin + telemetry: + metrics: + address: ${env:MY_POD_IP}:8888 diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-collector-logs/rendered/daemonset.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-collector-logs/rendered/daemonset.yaml new file mode 100644 index 00000000..b976ad28 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-collector-logs/rendered/daemonset.yaml @@ -0,0 +1,110 @@ +--- +# Source: opentelemetry-collector/templates/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: example-opentelemetry-collector-agent + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +spec: + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: agent-collector + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 8bfee756de57aa564d26f9f90a47f3e28bfe2e238a54df8307b9dd6e5f224115 + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: agent-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-k8s + args: + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-k8s:0.113.0" + imagePullPolicy: IfNotPresent + ports: + + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + hostPort: 6831 + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + hostPort: 14250 + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + hostPort: 14268 + - name: otlp + containerPort: 4317 + protocol: TCP + hostPort: 4317 + - name: otlp-http + containerPort: 4318 + protocol: TCP + hostPort: 4318 + - name: zipkin + containerPort: 9411 + protocol: TCP + hostPort: 9411 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + - name: varlogpods + mountPath: /var/log/pods + readOnly: true + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector-agent + items: + - key: relay + path: relay.yaml + - name: varlogpods + hostPath: + path: /var/log/pods + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + hostNetwork: false diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-collector-logs/rendered/serviceaccount.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-collector-logs/rendered/serviceaccount.yaml new file mode 100644 index 00000000..5815b748 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-collector-logs/rendered/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-collector-logs/values.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-collector-logs/values.yaml new file mode 100644 index 00000000..085361ed --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-collector-logs/values.yaml @@ -0,0 +1,12 @@ +mode: daemonset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +presets: + logsCollection: + enabled: true + includeCollectorLogs: true diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/configmap-agent.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/configmap-agent.yaml new file mode 100644 index 00000000..5197bf73 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/configmap-agent.yaml @@ -0,0 +1,133 @@ +--- +# Source: opentelemetry-collector/templates/configmap-agent.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector-agent + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +data: + relay: | + exporters: + debug: {} + extensions: + health_check: + endpoint: ${env:MY_POD_IP}:13133 + processors: + batch: {} + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + receivers: + hostmetrics: + collection_interval: 10s + root_path: /hostfs + scrapers: + cpu: null + disk: null + filesystem: + exclude_fs_types: + fs_types: + - autofs + - binfmt_misc + - bpf + - cgroup2 + - configfs + - debugfs + - devpts + - devtmpfs + - fusectl + - hugetlbfs + - iso9660 + - mqueue + - nsfs + - overlay + - proc + - procfs + - pstore + - rpc_pipefs + - securityfs + - selinuxfs + - squashfs + - sysfs + - tracefs + match_type: strict + exclude_mount_points: + match_type: regexp + mount_points: + - /dev/* + - /proc/* + - /sys/* + - /run/k3s/containerd/* + - /var/lib/docker/* + - /var/lib/kubelet/* + - /snap/* + load: null + memory: null + network: null + jaeger: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:14250 + thrift_compact: + endpoint: ${env:MY_POD_IP}:6831 + thrift_http: + endpoint: ${env:MY_POD_IP}:14268 + otlp: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 + prometheus: + config: + scrape_configs: + - job_name: opentelemetry-collector + scrape_interval: 10s + static_configs: + - targets: + - ${env:MY_POD_IP}:8888 + zipkin: + endpoint: ${env:MY_POD_IP}:9411 + service: + extensions: + - health_check + pipelines: + logs: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + metrics: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - prometheus + - hostmetrics + traces: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - jaeger + - zipkin + telemetry: + metrics: + address: ${env:MY_POD_IP}:8888 diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/daemonset.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/daemonset.yaml new file mode 100644 index 00000000..04b6bcb2 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/daemonset.yaml @@ -0,0 +1,105 @@ +--- +# Source: opentelemetry-collector/templates/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: example-opentelemetry-collector-agent + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +spec: + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: agent-collector + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: ec9d9c8763d31b396c761305de250e8604d4856eabaad9e5b107152439e535da + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: agent-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-k8s + args: + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-k8s:0.113.0" + imagePullPolicy: IfNotPresent + ports: + + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + hostPort: 6831 + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + hostPort: 14250 + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + hostPort: 14268 + - name: otlp + containerPort: 4317 + protocol: TCP + hostPort: 4317 + - name: otlp-http + containerPort: 4318 + protocol: TCP + hostPort: 4318 + - name: zipkin + containerPort: 9411 + protocol: TCP + hostPort: 9411 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + - name: hostfs + mountPath: /hostfs + readOnly: true + mountPropagation: HostToContainer + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector-agent + items: + - key: relay + path: relay.yaml + - name: hostfs + hostPath: + path: / + hostNetwork: false diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/serviceaccount.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/serviceaccount.yaml new file mode 100644 index 00000000..5815b748 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-hostmetrics/rendered/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-hostmetrics/values.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-hostmetrics/values.yaml new file mode 100644 index 00000000..10dd2576 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-hostmetrics/values.yaml @@ -0,0 +1,12 @@ +mode: daemonset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +presets: + hostMetrics: + enabled: true + diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/configmap-agent.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/configmap-agent.yaml new file mode 100644 index 00000000..b4fe3fad --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/configmap-agent.yaml @@ -0,0 +1,86 @@ +--- +# Source: opentelemetry-collector/templates/configmap-agent.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector-agent + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +data: + relay: | + exporters: + debug: {} + extensions: + health_check: + endpoint: ${env:MY_POD_IP}:13133 + processors: + batch: {} + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + receivers: + jaeger: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:14250 + thrift_compact: + endpoint: ${env:MY_POD_IP}:6831 + thrift_http: + endpoint: ${env:MY_POD_IP}:14268 + otlp: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 + prometheus: + config: + scrape_configs: + - job_name: opentelemetry-collector + scrape_interval: 10s + static_configs: + - targets: + - ${env:MY_POD_IP}:8888 + zipkin: + endpoint: ${env:MY_POD_IP}:9411 + service: + extensions: + - health_check + pipelines: + logs: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + metrics: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - prometheus + traces: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - jaeger + - zipkin + telemetry: + metrics: + address: ${env:MY_POD_IP}:8888 diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/daemonset.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/daemonset.yaml new file mode 100644 index 00000000..10837296 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/daemonset.yaml @@ -0,0 +1,119 @@ +--- +# Source: opentelemetry-collector/templates/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: example-opentelemetry-collector-agent + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +spec: + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: agent-collector + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: d656a64cb8fdb962bc6f3d46e9ff2fbeebeb02b66e0424049d70795227d376bd + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: agent-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-k8s + args: + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-k8s:0.113.0" + imagePullPolicy: IfNotPresent + ports: + + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + hostPort: 6831 + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + hostPort: 14250 + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + hostPort: 14268 + - name: otlp + containerPort: 4317 + protocol: TCP + hostPort: 4317 + - name: otlp-http + containerPort: 4318 + protocol: TCP + hostPort: 4318 + - name: zipkin + containerPort: 9411 + protocol: TCP + hostPort: 9411 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + lifecycle: + preStop: + exec: + command: + - /test/sleep + - "5" + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + - mountPath: /test + name: test + initContainers: + - args: + - /bin/sleep + - /test/sleep + command: + - cp + image: 'busybox:latest' + name: test + volumeMounts: + - mountPath: /test + name: test + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector-agent + items: + - key: relay + path: relay.yaml + - emptyDir: {} + name: test + hostNetwork: false diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/serviceaccount.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/serviceaccount.yaml new file mode 100644 index 00000000..5815b748 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-lifecycle-hooks/rendered/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-lifecycle-hooks/values.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-lifecycle-hooks/values.yaml new file mode 100644 index 00000000..58acddac --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-lifecycle-hooks/values.yaml @@ -0,0 +1,37 @@ +mode: daemonset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +global: + image: busybox:latest +initContainers: + - name: test + command: + - cp + args: + - /bin/sleep + - /test/sleep + image: "{{ .Values.global.image }}" + volumeMounts: + - name: test + mountPath: /test + +extraVolumes: + - name: test + emptyDir: {} + +extraVolumeMounts: + - name: test + mountPath: /test + +lifecycleHooks: + preStop: + exec: + command: + - /test/sleep + - "5" + diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-only/rendered/configmap-agent.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-only/rendered/configmap-agent.yaml new file mode 100644 index 00000000..b4fe3fad --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-only/rendered/configmap-agent.yaml @@ -0,0 +1,86 @@ +--- +# Source: opentelemetry-collector/templates/configmap-agent.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector-agent + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +data: + relay: | + exporters: + debug: {} + extensions: + health_check: + endpoint: ${env:MY_POD_IP}:13133 + processors: + batch: {} + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + receivers: + jaeger: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:14250 + thrift_compact: + endpoint: ${env:MY_POD_IP}:6831 + thrift_http: + endpoint: ${env:MY_POD_IP}:14268 + otlp: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 + prometheus: + config: + scrape_configs: + - job_name: opentelemetry-collector + scrape_interval: 10s + static_configs: + - targets: + - ${env:MY_POD_IP}:8888 + zipkin: + endpoint: ${env:MY_POD_IP}:9411 + service: + extensions: + - health_check + pipelines: + logs: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + metrics: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - prometheus + traces: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - jaeger + - zipkin + telemetry: + metrics: + address: ${env:MY_POD_IP}:8888 diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-only/rendered/daemonset.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-only/rendered/daemonset.yaml new file mode 100644 index 00000000..fdf2ccb5 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-only/rendered/daemonset.yaml @@ -0,0 +1,98 @@ +--- +# Source: opentelemetry-collector/templates/daemonset.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: example-opentelemetry-collector-agent + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +spec: + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: agent-collector + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: d656a64cb8fdb962bc6f3d46e9ff2fbeebeb02b66e0424049d70795227d376bd + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: agent-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-k8s + args: + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-k8s:0.113.0" + imagePullPolicy: IfNotPresent + ports: + + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + hostPort: 6831 + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + hostPort: 14250 + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + hostPort: 14268 + - name: otlp + containerPort: 4317 + protocol: TCP + hostPort: 4317 + - name: otlp-http + containerPort: 4318 + protocol: TCP + hostPort: 4318 + - name: zipkin + containerPort: 9411 + protocol: TCP + hostPort: 9411 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector-agent + items: + - key: relay + path: relay.yaml + hostNetwork: false diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-only/rendered/serviceaccount.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-only/rendered/serviceaccount.yaml new file mode 100644 index 00000000..5815b748 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-only/rendered/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm diff --git a/helmcharts/services/opentelemetry-collector/examples/daemonset-only/values.yaml b/helmcharts/services/opentelemetry-collector/examples/daemonset-only/values.yaml new file mode 100644 index 00000000..71002cc6 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/daemonset-only/values.yaml @@ -0,0 +1,7 @@ +mode: daemonset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" diff --git a/helmcharts/services/opentelemetry-collector/examples/deployment-only/rendered/configmap.yaml b/helmcharts/services/opentelemetry-collector/examples/deployment-only/rendered/configmap.yaml new file mode 100644 index 00000000..78d98843 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/deployment-only/rendered/configmap.yaml @@ -0,0 +1,86 @@ +--- +# Source: opentelemetry-collector/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +data: + relay: | + exporters: + debug: {} + extensions: + health_check: + endpoint: ${env:MY_POD_IP}:13133 + processors: + batch: {} + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + receivers: + jaeger: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:14250 + thrift_compact: + endpoint: ${env:MY_POD_IP}:6831 + thrift_http: + endpoint: ${env:MY_POD_IP}:14268 + otlp: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 + prometheus: + config: + scrape_configs: + - job_name: opentelemetry-collector + scrape_interval: 10s + static_configs: + - targets: + - ${env:MY_POD_IP}:8888 + zipkin: + endpoint: ${env:MY_POD_IP}:9411 + service: + extensions: + - health_check + pipelines: + logs: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + metrics: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - prometheus + traces: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - jaeger + - zipkin + telemetry: + metrics: + address: ${env:MY_POD_IP}:8888 diff --git a/helmcharts/services/opentelemetry-collector/examples/deployment-only/rendered/deployment.yaml b/helmcharts/services/opentelemetry-collector/examples/deployment-only/rendered/deployment.yaml new file mode 100644 index 00000000..f1106088 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/deployment-only/rendered/deployment.yaml @@ -0,0 +1,100 @@ +--- +# Source: opentelemetry-collector/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +spec: + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + strategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 3ece6cc294f0ed92854413f33366d43d4b73621c702b50e6564565795cb9fbd0 + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-k8s + args: + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-k8s:0.113.0" + imagePullPolicy: IfNotPresent + ports: + + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + - name: otlp + containerPort: 4317 + protocol: TCP + - name: otlp-http + containerPort: 4318 + protocol: TCP + - name: zipkin + containerPort: 9411 + protocol: TCP + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: GOMEMLIMIT + value: "3276MiB" + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + resources: + limits: + cpu: 2 + memory: 4Gi + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector + items: + - key: relay + path: relay.yaml + hostNetwork: false diff --git a/helmcharts/services/opentelemetry-collector/examples/deployment-only/rendered/service.yaml b/helmcharts/services/opentelemetry-collector/examples/deployment-only/rendered/service.yaml new file mode 100644 index 00000000..9c2d47c5 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/deployment-only/rendered/service.yaml @@ -0,0 +1,49 @@ +--- +# Source: opentelemetry-collector/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + + component: standalone-collector +spec: + type: ClusterIP + ports: + + - name: jaeger-compact + port: 6831 + targetPort: 6831 + protocol: UDP + - name: jaeger-grpc + port: 14250 + targetPort: 14250 + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: 14268 + protocol: TCP + - name: otlp + port: 4317 + targetPort: 4317 + protocol: TCP + appProtocol: grpc + - name: otlp-http + port: 4318 + targetPort: 4318 + protocol: TCP + - name: zipkin + port: 9411 + targetPort: 9411 + protocol: TCP + selector: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + internalTrafficPolicy: Cluster diff --git a/helmcharts/services/opentelemetry-collector/examples/deployment-only/rendered/serviceaccount.yaml b/helmcharts/services/opentelemetry-collector/examples/deployment-only/rendered/serviceaccount.yaml new file mode 100644 index 00000000..5815b748 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/deployment-only/rendered/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm diff --git a/helmcharts/services/opentelemetry-collector/examples/deployment-only/values.yaml b/helmcharts/services/opentelemetry-collector/examples/deployment-only/values.yaml new file mode 100644 index 00000000..91820513 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/deployment-only/values.yaml @@ -0,0 +1,14 @@ +mode: deployment + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +replicaCount: 3 + +resources: + limits: + cpu: 2 + memory: 4Gi diff --git a/helmcharts/services/opentelemetry-collector/examples/deployment-otlp-traces/rendered/configmap.yaml b/helmcharts/services/opentelemetry-collector/examples/deployment-otlp-traces/rendered/configmap.yaml new file mode 100644 index 00000000..163429dc --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/deployment-otlp-traces/rendered/configmap.yaml @@ -0,0 +1,49 @@ +--- +# Source: opentelemetry-collector/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +data: + relay: | + exporters: + debug: {} + extensions: + health_check: + endpoint: ${env:MY_POD_IP}:13133 + processors: + batch: {} + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + receivers: + otlp: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 + service: + extensions: + - health_check + pipelines: + traces: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + telemetry: + metrics: + address: ${env:MY_POD_IP}:8888 diff --git a/helmcharts/services/opentelemetry-collector/examples/deployment-otlp-traces/rendered/deployment.yaml b/helmcharts/services/opentelemetry-collector/examples/deployment-otlp-traces/rendered/deployment.yaml new file mode 100644 index 00000000..a4312535 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/deployment-otlp-traces/rendered/deployment.yaml @@ -0,0 +1,82 @@ +--- +# Source: opentelemetry-collector/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + strategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 0c0024feb0c9791a11a32fc6f883acd7401da0725e489388747021f23b2652af + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-k8s + args: + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-k8s:0.113.0" + imagePullPolicy: IfNotPresent + ports: + + - name: otlp + containerPort: 4317 + protocol: TCP + - name: otlp-http + containerPort: 4318 + protocol: TCP + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector + items: + - key: relay + path: relay.yaml + hostNetwork: false diff --git a/helmcharts/services/opentelemetry-collector/examples/deployment-otlp-traces/rendered/service.yaml b/helmcharts/services/opentelemetry-collector/examples/deployment-otlp-traces/rendered/service.yaml new file mode 100644 index 00000000..c3bedb97 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/deployment-otlp-traces/rendered/service.yaml @@ -0,0 +1,33 @@ +--- +# Source: opentelemetry-collector/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + + component: standalone-collector +spec: + type: ClusterIP + ports: + + - name: otlp + port: 4317 + targetPort: 4317 + protocol: TCP + appProtocol: grpc + - name: otlp-http + port: 4318 + targetPort: 4318 + protocol: TCP + selector: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + internalTrafficPolicy: Cluster diff --git a/helmcharts/services/opentelemetry-collector/examples/deployment-otlp-traces/rendered/serviceaccount.yaml b/helmcharts/services/opentelemetry-collector/examples/deployment-otlp-traces/rendered/serviceaccount.yaml new file mode 100644 index 00000000..5815b748 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/deployment-otlp-traces/rendered/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm diff --git a/helmcharts/services/opentelemetry-collector/examples/deployment-otlp-traces/values.yaml b/helmcharts/services/opentelemetry-collector/examples/deployment-otlp-traces/values.yaml new file mode 100644 index 00000000..3297147a --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/deployment-otlp-traces/values.yaml @@ -0,0 +1,30 @@ +mode: deployment + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +ports: + jaeger-compact: + enabled: false + jaeger-thrift: + enabled: false + jaeger-grpc: + enabled: false + zipkin: + enabled: false + +config: + receivers: + jaeger: null + prometheus: null + zipkin: null + service: + pipelines: + traces: + receivers: + - otlp + metrics: null + logs: null diff --git a/helmcharts/services/opentelemetry-collector/examples/deployment-use-existing-configMap/deployment-values.yaml b/helmcharts/services/opentelemetry-collector/examples/deployment-use-existing-configMap/deployment-values.yaml new file mode 100644 index 00000000..098843d5 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/deployment-use-existing-configMap/deployment-values.yaml @@ -0,0 +1,28 @@ +mode: deployment + +resources: + limits: + cpu: 100m + memory: 200M + +configMap: + create: false + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + extraArgs: ["--config=/conf/config.yaml"] + +extraVolumes: + - name: custom-otelcol-configmap + configMap: + name: custom-otel-collector-config + items: + - key: config + path: config.yaml + defaultMode: 420 +extraVolumeMounts: + - name: custom-otelcol-configmap + mountPath: /conf/config.yaml diff --git a/helmcharts/services/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/deployment.yaml b/helmcharts/services/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/deployment.yaml new file mode 100644 index 00000000..eaec8218 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/deployment.yaml @@ -0,0 +1,101 @@ +--- +# Source: opentelemetry-collector/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + strategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-k8s + args: + - --config=/conf/config.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-k8s:0.113.0" + imagePullPolicy: IfNotPresent + ports: + + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + - name: otlp + containerPort: 4317 + protocol: TCP + - name: otlp-http + containerPort: 4318 + protocol: TCP + - name: zipkin + containerPort: 9411 + protocol: TCP + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: GOMEMLIMIT + value: "152MiB" + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + resources: + limits: + cpu: 100m + memory: 200M + volumeMounts: + - mountPath: /conf/config.yaml + name: custom-otelcol-configmap + volumes: + - configMap: + defaultMode: 420 + items: + - key: config + path: config.yaml + name: custom-otel-collector-config + name: custom-otelcol-configmap + hostNetwork: false diff --git a/helmcharts/services/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/service.yaml b/helmcharts/services/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/service.yaml new file mode 100644 index 00000000..9c2d47c5 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/service.yaml @@ -0,0 +1,49 @@ +--- +# Source: opentelemetry-collector/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + + component: standalone-collector +spec: + type: ClusterIP + ports: + + - name: jaeger-compact + port: 6831 + targetPort: 6831 + protocol: UDP + - name: jaeger-grpc + port: 14250 + targetPort: 14250 + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: 14268 + protocol: TCP + - name: otlp + port: 4317 + targetPort: 4317 + protocol: TCP + appProtocol: grpc + - name: otlp-http + port: 4318 + targetPort: 4318 + protocol: TCP + - name: zipkin + port: 9411 + targetPort: 9411 + protocol: TCP + selector: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + internalTrafficPolicy: Cluster diff --git a/helmcharts/services/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/serviceaccount.yaml b/helmcharts/services/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/serviceaccount.yaml new file mode 100644 index 00000000..5815b748 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/deployment-use-existing-configMap/rendered/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm diff --git a/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/clusterrole.yaml b/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/clusterrole.yaml new file mode 100644 index 00000000..8b4430d7 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/clusterrole.yaml @@ -0,0 +1,23 @@ +--- +# Source: opentelemetry-collector/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: example-opentelemetry-collector + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +rules: + - apiGroups: [""] + resources: ["pods", "namespaces"] + verbs: ["get", "watch", "list"] + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] diff --git a/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/clusterrolebinding.yaml b/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/clusterrolebinding.yaml new file mode 100644 index 00000000..e817448c --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/clusterrolebinding.yaml @@ -0,0 +1,21 @@ +--- +# Source: opentelemetry-collector/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: example-opentelemetry-collector + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: example-opentelemetry-collector +subjects: +- kind: ServiceAccount + name: example-opentelemetry-collector + namespace: default diff --git a/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/configmap.yaml b/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/configmap.yaml new file mode 100644 index 00000000..ed96e2e1 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/configmap.yaml @@ -0,0 +1,112 @@ +--- +# Source: opentelemetry-collector/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +data: + relay: | + exporters: + debug: {} + extensions: + health_check: + endpoint: ${env:MY_POD_IP}:13133 + processors: + batch: {} + k8sattributes: + extract: + metadata: + - k8s.namespace.name + - k8s.deployment.name + - k8s.statefulset.name + - k8s.daemonset.name + - k8s.cronjob.name + - k8s.job.name + - k8s.node.name + - k8s.pod.name + - k8s.pod.uid + - k8s.pod.start_time + passthrough: false + pod_association: + - sources: + - from: resource_attribute + name: k8s.pod.ip + - sources: + - from: resource_attribute + name: k8s.pod.uid + - sources: + - from: connection + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + receivers: + jaeger: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:14250 + thrift_compact: + endpoint: ${env:MY_POD_IP}:6831 + thrift_http: + endpoint: ${env:MY_POD_IP}:14268 + otlp: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 + prometheus: + config: + scrape_configs: + - job_name: opentelemetry-collector + scrape_interval: 10s + static_configs: + - targets: + - ${env:MY_POD_IP}:8888 + zipkin: + endpoint: ${env:MY_POD_IP}:9411 + service: + extensions: + - health_check + pipelines: + logs: + exporters: + - debug + processors: + - k8sattributes + - memory_limiter + - batch + receivers: + - otlp + metrics: + exporters: + - debug + processors: + - k8sattributes + - memory_limiter + - batch + receivers: + - otlp + - prometheus + traces: + exporters: + - debug + processors: + - resource + - k8sattributes + - batch + receivers: + - otlp + - jaeger + - zipkin + telemetry: + metrics: + address: ${env:MY_POD_IP}:8888 diff --git a/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/deployment.yaml b/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/deployment.yaml new file mode 100644 index 00000000..97acdd77 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/deployment.yaml @@ -0,0 +1,94 @@ +--- +# Source: opentelemetry-collector/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + strategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 20873f73c237ce2266b3a59435f2e3db5cbb31bebc435f110c56a0a207318144 + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-k8s + args: + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-k8s:0.113.0" + imagePullPolicy: IfNotPresent + ports: + + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + - name: otlp + containerPort: 4317 + protocol: TCP + - name: otlp-http + containerPort: 4318 + protocol: TCP + - name: zipkin + containerPort: 9411 + protocol: TCP + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector + items: + - key: relay + path: relay.yaml + hostNetwork: false diff --git a/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/service.yaml b/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/service.yaml new file mode 100644 index 00000000..9c2d47c5 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/service.yaml @@ -0,0 +1,49 @@ +--- +# Source: opentelemetry-collector/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + + component: standalone-collector +spec: + type: ClusterIP + ports: + + - name: jaeger-compact + port: 6831 + targetPort: 6831 + protocol: UDP + - name: jaeger-grpc + port: 14250 + targetPort: 14250 + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: 14268 + protocol: TCP + - name: otlp + port: 4317 + targetPort: 4317 + protocol: TCP + appProtocol: grpc + - name: otlp-http + port: 4318 + targetPort: 4318 + protocol: TCP + - name: zipkin + port: 9411 + targetPort: 9411 + protocol: TCP + selector: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + internalTrafficPolicy: Cluster diff --git a/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/serviceaccount.yaml b/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/serviceaccount.yaml new file mode 100644 index 00000000..5815b748 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/rendered/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm diff --git a/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/values.yaml b/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/values.yaml new file mode 100644 index 00000000..666a1eeb --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/kubernetesAttributes/values.yaml @@ -0,0 +1,21 @@ +mode: deployment + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +presets: + kubernetesAttributes: + enabled: true + +config: + service: + pipelines: + traces: + processors: + - resource + - k8sattributes + - batch + \ No newline at end of file diff --git a/helmcharts/services/opentelemetry-collector/examples/statefulset-only/rendered/configmap-statefulset.yaml b/helmcharts/services/opentelemetry-collector/examples/statefulset-only/rendered/configmap-statefulset.yaml new file mode 100644 index 00000000..72f816d4 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/statefulset-only/rendered/configmap-statefulset.yaml @@ -0,0 +1,86 @@ +--- +# Source: opentelemetry-collector/templates/configmap-statefulset.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector-statefulset + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +data: + relay: | + exporters: + debug: {} + extensions: + health_check: + endpoint: ${env:MY_POD_IP}:13133 + processors: + batch: {} + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + receivers: + jaeger: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:14250 + thrift_compact: + endpoint: ${env:MY_POD_IP}:6831 + thrift_http: + endpoint: ${env:MY_POD_IP}:14268 + otlp: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 + prometheus: + config: + scrape_configs: + - job_name: opentelemetry-collector + scrape_interval: 10s + static_configs: + - targets: + - ${env:MY_POD_IP}:8888 + zipkin: + endpoint: ${env:MY_POD_IP}:9411 + service: + extensions: + - health_check + pipelines: + logs: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + metrics: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - prometheus + traces: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - jaeger + - zipkin + telemetry: + metrics: + address: ${env:MY_POD_IP}:8888 diff --git a/helmcharts/services/opentelemetry-collector/examples/statefulset-only/rendered/service.yaml b/helmcharts/services/opentelemetry-collector/examples/statefulset-only/rendered/service.yaml new file mode 100644 index 00000000..4d022762 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/statefulset-only/rendered/service.yaml @@ -0,0 +1,49 @@ +--- +# Source: opentelemetry-collector/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + + component: statefulset-collector +spec: + type: ClusterIP + ports: + + - name: jaeger-compact + port: 6831 + targetPort: 6831 + protocol: UDP + - name: jaeger-grpc + port: 14250 + targetPort: 14250 + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: 14268 + protocol: TCP + - name: otlp + port: 4317 + targetPort: 4317 + protocol: TCP + appProtocol: grpc + - name: otlp-http + port: 4318 + targetPort: 4318 + protocol: TCP + - name: zipkin + port: 9411 + targetPort: 9411 + protocol: TCP + selector: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: statefulset-collector + internalTrafficPolicy: Cluster diff --git a/helmcharts/services/opentelemetry-collector/examples/statefulset-only/rendered/serviceaccount.yaml b/helmcharts/services/opentelemetry-collector/examples/statefulset-only/rendered/serviceaccount.yaml new file mode 100644 index 00000000..5815b748 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/statefulset-only/rendered/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm diff --git a/helmcharts/services/opentelemetry-collector/examples/statefulset-only/rendered/statefulset.yaml b/helmcharts/services/opentelemetry-collector/examples/statefulset-only/rendered/statefulset.yaml new file mode 100644 index 00000000..6eb5f5b3 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/statefulset-only/rendered/statefulset.yaml @@ -0,0 +1,101 @@ +--- +# Source: opentelemetry-collector/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +spec: + serviceName: example-opentelemetry-collector + podManagementPolicy: Parallel + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: statefulset-collector + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 452112766c1091c269e5cd1ce9137625b08f6332bb63d0a7139e0295705b8456 + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: statefulset-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-k8s + args: + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-k8s:0.113.0" + imagePullPolicy: IfNotPresent + ports: + + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + - name: otlp + containerPort: 4317 + protocol: TCP + - name: otlp-http + containerPort: 4318 + protocol: TCP + - name: zipkin + containerPort: 9411 + protocol: TCP + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: GOMEMLIMIT + value: "152MiB" + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + resources: + limits: + cpu: 100m + memory: 200M + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector-statefulset + items: + - key: relay + path: relay.yaml + hostNetwork: false diff --git a/helmcharts/services/opentelemetry-collector/examples/statefulset-only/values.yaml b/helmcharts/services/opentelemetry-collector/examples/statefulset-only/values.yaml new file mode 100644 index 00000000..c1e9dba5 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/statefulset-only/values.yaml @@ -0,0 +1,14 @@ +mode: statefulset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +replicaCount: 2 + +resources: + limits: + cpu: 100m + memory: 200M diff --git a/helmcharts/services/opentelemetry-collector/examples/statefulset-with-pvc/rendered/configmap-statefulset.yaml b/helmcharts/services/opentelemetry-collector/examples/statefulset-with-pvc/rendered/configmap-statefulset.yaml new file mode 100644 index 00000000..72f816d4 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/statefulset-with-pvc/rendered/configmap-statefulset.yaml @@ -0,0 +1,86 @@ +--- +# Source: opentelemetry-collector/templates/configmap-statefulset.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector-statefulset + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +data: + relay: | + exporters: + debug: {} + extensions: + health_check: + endpoint: ${env:MY_POD_IP}:13133 + processors: + batch: {} + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + receivers: + jaeger: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:14250 + thrift_compact: + endpoint: ${env:MY_POD_IP}:6831 + thrift_http: + endpoint: ${env:MY_POD_IP}:14268 + otlp: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 + prometheus: + config: + scrape_configs: + - job_name: opentelemetry-collector + scrape_interval: 10s + static_configs: + - targets: + - ${env:MY_POD_IP}:8888 + zipkin: + endpoint: ${env:MY_POD_IP}:9411 + service: + extensions: + - health_check + pipelines: + logs: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + metrics: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - prometheus + traces: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - jaeger + - zipkin + telemetry: + metrics: + address: ${env:MY_POD_IP}:8888 diff --git a/helmcharts/services/opentelemetry-collector/examples/statefulset-with-pvc/rendered/service.yaml b/helmcharts/services/opentelemetry-collector/examples/statefulset-with-pvc/rendered/service.yaml new file mode 100644 index 00000000..4d022762 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/statefulset-with-pvc/rendered/service.yaml @@ -0,0 +1,49 @@ +--- +# Source: opentelemetry-collector/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + + component: statefulset-collector +spec: + type: ClusterIP + ports: + + - name: jaeger-compact + port: 6831 + targetPort: 6831 + protocol: UDP + - name: jaeger-grpc + port: 14250 + targetPort: 14250 + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: 14268 + protocol: TCP + - name: otlp + port: 4317 + targetPort: 4317 + protocol: TCP + appProtocol: grpc + - name: otlp-http + port: 4318 + targetPort: 4318 + protocol: TCP + - name: zipkin + port: 9411 + targetPort: 9411 + protocol: TCP + selector: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: statefulset-collector + internalTrafficPolicy: Cluster diff --git a/helmcharts/services/opentelemetry-collector/examples/statefulset-with-pvc/rendered/serviceaccount.yaml b/helmcharts/services/opentelemetry-collector/examples/statefulset-with-pvc/rendered/serviceaccount.yaml new file mode 100644 index 00000000..5815b748 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/statefulset-with-pvc/rendered/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm diff --git a/helmcharts/services/opentelemetry-collector/examples/statefulset-with-pvc/rendered/statefulset.yaml b/helmcharts/services/opentelemetry-collector/examples/statefulset-with-pvc/rendered/statefulset.yaml new file mode 100644 index 00000000..f76d7577 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/statefulset-with-pvc/rendered/statefulset.yaml @@ -0,0 +1,126 @@ +--- +# Source: opentelemetry-collector/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +spec: + serviceName: example-opentelemetry-collector + podManagementPolicy: Parallel + replicas: 2 + persistentVolumeClaimRetentionPolicy: + whenDeleted: Delete + whenScaled: Delete + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: statefulset-collector + updateStrategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 452112766c1091c269e5cd1ce9137625b08f6332bb63d0a7139e0295705b8456 + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: statefulset-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-k8s + args: + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-k8s:0.113.0" + imagePullPolicy: IfNotPresent + ports: + + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + - name: otlp + containerPort: 4317 + protocol: TCP + - name: otlp-http + containerPort: 4318 + protocol: TCP + - name: zipkin + containerPort: 9411 + protocol: TCP + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: GOMEMLIMIT + value: "152MiB" + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + resources: + limits: + cpu: 100m + memory: 200M + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + - mountPath: /var/lib/storage/queue + name: queue + initContainers: + - command: + - sh + - -c + - 'chown -R 10001: /var/lib/storage/queue' + image: busybox:latest + name: init-fs + volumeMounts: + - mountPath: /var/lib/storage/queue + name: queue + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector-statefulset + items: + - key: relay + path: relay.yaml + hostNetwork: false + volumeClaimTemplates: + - metadata: + name: queue + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: standard diff --git a/helmcharts/services/opentelemetry-collector/examples/statefulset-with-pvc/values.yaml b/helmcharts/services/opentelemetry-collector/examples/statefulset-with-pvc/values.yaml new file mode 100644 index 00000000..269e20a8 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/statefulset-with-pvc/values.yaml @@ -0,0 +1,46 @@ +mode: statefulset + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +replicaCount: 2 + +resources: + limits: + cpu: 100m + memory: 200M + +statefulset: + persistentVolumeClaimRetentionPolicy: + enabled: true + whenDeleted: Delete + whenScaled: Delete + + volumeClaimTemplates: + - metadata: + name: queue + spec: + storageClassName: standard + accessModes: + - ReadWriteOnce + resources: + requests: + storage: "1Gi" + +extraVolumeMounts: + - name: queue + mountPath: /var/lib/storage/queue + +initContainers: + - name: init-fs + image: busybox:latest + command: + - sh + - "-c" + - "chown -R 10001: /var/lib/storage/queue" + volumeMounts: + - name: queue + mountPath: /var/lib/storage/queue diff --git a/helmcharts/services/opentelemetry-collector/examples/using-GOMEMLIMIT/rendered/configmap.yaml b/helmcharts/services/opentelemetry-collector/examples/using-GOMEMLIMIT/rendered/configmap.yaml new file mode 100644 index 00000000..78d98843 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/using-GOMEMLIMIT/rendered/configmap.yaml @@ -0,0 +1,86 @@ +--- +# Source: opentelemetry-collector/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +data: + relay: | + exporters: + debug: {} + extensions: + health_check: + endpoint: ${env:MY_POD_IP}:13133 + processors: + batch: {} + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + receivers: + jaeger: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:14250 + thrift_compact: + endpoint: ${env:MY_POD_IP}:6831 + thrift_http: + endpoint: ${env:MY_POD_IP}:14268 + otlp: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 + prometheus: + config: + scrape_configs: + - job_name: opentelemetry-collector + scrape_interval: 10s + static_configs: + - targets: + - ${env:MY_POD_IP}:8888 + zipkin: + endpoint: ${env:MY_POD_IP}:9411 + service: + extensions: + - health_check + pipelines: + logs: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + metrics: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - prometheus + traces: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - jaeger + - zipkin + telemetry: + metrics: + address: ${env:MY_POD_IP}:8888 diff --git a/helmcharts/services/opentelemetry-collector/examples/using-GOMEMLIMIT/rendered/deployment.yaml b/helmcharts/services/opentelemetry-collector/examples/using-GOMEMLIMIT/rendered/deployment.yaml new file mode 100644 index 00000000..ab79539a --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/using-GOMEMLIMIT/rendered/deployment.yaml @@ -0,0 +1,100 @@ +--- +# Source: opentelemetry-collector/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + strategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 3ece6cc294f0ed92854413f33366d43d4b73621c702b50e6564565795cb9fbd0 + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + command: + - /otelcol-k8s + args: + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-k8s:0.113.0" + imagePullPolicy: IfNotPresent + ports: + + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + - name: otlp + containerPort: 4317 + protocol: TCP + - name: otlp-http + containerPort: 4318 + protocol: TCP + - name: zipkin + containerPort: 9411 + protocol: TCP + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: GOMEMLIMIT + value: "152MiB" + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + resources: + limits: + cpu: 100m + memory: 200M + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector + items: + - key: relay + path: relay.yaml + hostNetwork: false diff --git a/helmcharts/services/opentelemetry-collector/examples/using-GOMEMLIMIT/rendered/service.yaml b/helmcharts/services/opentelemetry-collector/examples/using-GOMEMLIMIT/rendered/service.yaml new file mode 100644 index 00000000..9c2d47c5 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/using-GOMEMLIMIT/rendered/service.yaml @@ -0,0 +1,49 @@ +--- +# Source: opentelemetry-collector/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + + component: standalone-collector +spec: + type: ClusterIP + ports: + + - name: jaeger-compact + port: 6831 + targetPort: 6831 + protocol: UDP + - name: jaeger-grpc + port: 14250 + targetPort: 14250 + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: 14268 + protocol: TCP + - name: otlp + port: 4317 + targetPort: 4317 + protocol: TCP + appProtocol: grpc + - name: otlp-http + port: 4318 + targetPort: 4318 + protocol: TCP + - name: zipkin + port: 9411 + targetPort: 9411 + protocol: TCP + selector: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + internalTrafficPolicy: Cluster diff --git a/helmcharts/services/opentelemetry-collector/examples/using-GOMEMLIMIT/rendered/serviceaccount.yaml b/helmcharts/services/opentelemetry-collector/examples/using-GOMEMLIMIT/rendered/serviceaccount.yaml new file mode 100644 index 00000000..5815b748 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/using-GOMEMLIMIT/rendered/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm diff --git a/helmcharts/services/opentelemetry-collector/examples/using-GOMEMLIMIT/values.yaml b/helmcharts/services/opentelemetry-collector/examples/using-GOMEMLIMIT/values.yaml new file mode 100644 index 00000000..7a29aad5 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/using-GOMEMLIMIT/values.yaml @@ -0,0 +1,13 @@ +mode: deployment + +image: + repository: "otel/opentelemetry-collector-k8s" + +command: + name: "otelcol-k8s" + +resources: + limits: + cpu: 100m + memory: 200M +useGOMEMLIMIT: true diff --git a/helmcharts/services/opentelemetry-collector/examples/using-custom-config/rendered/deployment.yaml b/helmcharts/services/opentelemetry-collector/examples/using-custom-config/rendered/deployment.yaml new file mode 100644 index 00000000..e1985958 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/using-custom-config/rendered/deployment.yaml @@ -0,0 +1,92 @@ +--- +# Source: opentelemetry-collector/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + strategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + containers: + - name: opentelemetry-collector + args: + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector-k8s:0.113.0" + imagePullPolicy: IfNotPresent + ports: + + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + - name: otlp + containerPort: 4317 + protocol: TCP + - name: otlp-http + containerPort: 4318 + protocol: TCP + - name: zipkin + containerPort: 9411 + protocol: TCP + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: user-config + items: + - key: relay + path: relay.yaml + hostNetwork: false diff --git a/helmcharts/services/opentelemetry-collector/examples/using-custom-config/rendered/service.yaml b/helmcharts/services/opentelemetry-collector/examples/using-custom-config/rendered/service.yaml new file mode 100644 index 00000000..9c2d47c5 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/using-custom-config/rendered/service.yaml @@ -0,0 +1,49 @@ +--- +# Source: opentelemetry-collector/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + + component: standalone-collector +spec: + type: ClusterIP + ports: + + - name: jaeger-compact + port: 6831 + targetPort: 6831 + protocol: UDP + - name: jaeger-grpc + port: 14250 + targetPort: 14250 + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: 14268 + protocol: TCP + - name: otlp + port: 4317 + targetPort: 4317 + protocol: TCP + appProtocol: grpc + - name: otlp-http + port: 4318 + targetPort: 4318 + protocol: TCP + - name: zipkin + port: 9411 + targetPort: 9411 + protocol: TCP + selector: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + internalTrafficPolicy: Cluster diff --git a/helmcharts/services/opentelemetry-collector/examples/using-custom-config/rendered/serviceaccount.yaml b/helmcharts/services/opentelemetry-collector/examples/using-custom-config/rendered/serviceaccount.yaml new file mode 100644 index 00000000..5815b748 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/using-custom-config/rendered/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm diff --git a/helmcharts/services/opentelemetry-collector/examples/using-custom-config/values.yaml b/helmcharts/services/opentelemetry-collector/examples/using-custom-config/values.yaml new file mode 100644 index 00000000..06ed084b --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/using-custom-config/values.yaml @@ -0,0 +1,8 @@ +mode: deployment + +image: + repository: "otel/opentelemetry-collector-k8s" + +configMap: + create: false + existingName: user-config diff --git a/helmcharts/services/opentelemetry-collector/examples/using-shared-processes/rendered/configmap.yaml b/helmcharts/services/opentelemetry-collector/examples/using-shared-processes/rendered/configmap.yaml new file mode 100644 index 00000000..78d98843 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/using-shared-processes/rendered/configmap.yaml @@ -0,0 +1,86 @@ +--- +# Source: opentelemetry-collector/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +data: + relay: | + exporters: + debug: {} + extensions: + health_check: + endpoint: ${env:MY_POD_IP}:13133 + processors: + batch: {} + memory_limiter: + check_interval: 5s + limit_percentage: 80 + spike_limit_percentage: 25 + receivers: + jaeger: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:14250 + thrift_compact: + endpoint: ${env:MY_POD_IP}:6831 + thrift_http: + endpoint: ${env:MY_POD_IP}:14268 + otlp: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 + prometheus: + config: + scrape_configs: + - job_name: opentelemetry-collector + scrape_interval: 10s + static_configs: + - targets: + - ${env:MY_POD_IP}:8888 + zipkin: + endpoint: ${env:MY_POD_IP}:9411 + service: + extensions: + - health_check + pipelines: + logs: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + metrics: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - prometheus + traces: + exporters: + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + - jaeger + - zipkin + telemetry: + metrics: + address: ${env:MY_POD_IP}:8888 diff --git a/helmcharts/services/opentelemetry-collector/examples/using-shared-processes/rendered/deployment.yaml b/helmcharts/services/opentelemetry-collector/examples/using-shared-processes/rendered/deployment.yaml new file mode 100644 index 00000000..62b1a655 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/using-shared-processes/rendered/deployment.yaml @@ -0,0 +1,93 @@ +--- +# Source: opentelemetry-collector/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + strategy: + type: RollingUpdate + template: + metadata: + annotations: + checksum/config: 3ece6cc294f0ed92854413f33366d43d4b73621c702b50e6564565795cb9fbd0 + + labels: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + + spec: + + serviceAccountName: example-opentelemetry-collector + securityContext: + {} + shareProcessNamespace: true + containers: + - name: opentelemetry-collector + args: + - --config=/conf/relay.yaml + securityContext: + {} + image: "otel/opentelemetry-collector:0.113.0" + imagePullPolicy: IfNotPresent + ports: + + - name: jaeger-compact + containerPort: 6831 + protocol: UDP + - name: jaeger-grpc + containerPort: 14250 + protocol: TCP + - name: jaeger-thrift + containerPort: 14268 + protocol: TCP + - name: otlp + containerPort: 4317 + protocol: TCP + - name: otlp-http + containerPort: 4318 + protocol: TCP + - name: zipkin + containerPort: 9411 + protocol: TCP + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + livenessProbe: + httpGet: + path: / + port: 13133 + readinessProbe: + httpGet: + path: / + port: 13133 + volumeMounts: + - mountPath: /conf + name: opentelemetry-collector-configmap + volumes: + - name: opentelemetry-collector-configmap + configMap: + name: example-opentelemetry-collector + items: + - key: relay + path: relay.yaml + hostNetwork: false diff --git a/helmcharts/services/opentelemetry-collector/examples/using-shared-processes/rendered/service.yaml b/helmcharts/services/opentelemetry-collector/examples/using-shared-processes/rendered/service.yaml new file mode 100644 index 00000000..9c2d47c5 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/using-shared-processes/rendered/service.yaml @@ -0,0 +1,49 @@ +--- +# Source: opentelemetry-collector/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm + + component: standalone-collector +spec: + type: ClusterIP + ports: + + - name: jaeger-compact + port: 6831 + targetPort: 6831 + protocol: UDP + - name: jaeger-grpc + port: 14250 + targetPort: 14250 + protocol: TCP + - name: jaeger-thrift + port: 14268 + targetPort: 14268 + protocol: TCP + - name: otlp + port: 4317 + targetPort: 4317 + protocol: TCP + appProtocol: grpc + - name: otlp-http + port: 4318 + targetPort: 4318 + protocol: TCP + - name: zipkin + port: 9411 + targetPort: 9411 + protocol: TCP + selector: + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + component: standalone-collector + internalTrafficPolicy: Cluster diff --git a/helmcharts/services/opentelemetry-collector/examples/using-shared-processes/rendered/serviceaccount.yaml b/helmcharts/services/opentelemetry-collector/examples/using-shared-processes/rendered/serviceaccount.yaml new file mode 100644 index 00000000..5815b748 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/using-shared-processes/rendered/serviceaccount.yaml @@ -0,0 +1,13 @@ +--- +# Source: opentelemetry-collector/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: example-opentelemetry-collector + namespace: default + labels: + helm.sh/chart: opentelemetry-collector-0.109.0 + app.kubernetes.io/name: opentelemetry-collector + app.kubernetes.io/instance: example + app.kubernetes.io/version: "0.113.0" + app.kubernetes.io/managed-by: Helm diff --git a/helmcharts/services/opentelemetry-collector/examples/using-shared-processes/values.yaml b/helmcharts/services/opentelemetry-collector/examples/using-shared-processes/values.yaml new file mode 100644 index 00000000..6c12f1b1 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/examples/using-shared-processes/values.yaml @@ -0,0 +1,6 @@ +mode: deployment + +image: + repository: otel/opentelemetry-collector + +shareProcessNamespace: true diff --git a/helmcharts/services/opentelemetry-collector/templates/NOTES.txt b/helmcharts/services/opentelemetry-collector/templates/NOTES.txt new file mode 100644 index 00000000..83f8a11c --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/NOTES.txt @@ -0,0 +1,49 @@ +{{- if not .Values.image.repository }} +{{ fail "[ERROR] 'image.repository' must be set. See https://github.com/open-telemetry/opentelemetry-helm-charts/blob/main/charts/opentelemetry-collector/UPGRADING.md for instructions." }} +{{ end }} + +{{- if and (not (eq .Values.mode "daemonset")) (not (eq .Values.mode "deployment")) (not (eq .Values.mode "statefulset")) }} +{{ fail "[ERROR] 'mode' must be set. See https://github.com/open-telemetry/opentelemetry-helm-charts/blob/main/charts/opentelemetry-collector/UPGRADING.md for instructions." }} +{{ end }} + +{{- if and (eq .Values.dnsPolicy "None") (not .Values.dnsConfig) }} +{{- fail "[ERROR] dnsConfig should be provided when dnsPolicy is None" }} +{{ end }} + +{{- if .Values.presets.clusterMetrics.enabled }} +{{- if eq .Values.mode "daemonset"}} +{{- fail "Cluster Metrics preset is not suitable for daemonset mode. Please use statefulset or deployment mode with replicaCount: 1"}} +{{ end }} +{{- if gt (int .Values.replicaCount) 1 }} +{{- fail "Cluster Metrics preset is not suitable for replicaCount greater than one. Please change replica count to one." }} +{{ end }} +{{ end }} + +{{/* validate extensions must include health_check */}} +{{- if not (has "health_check" .Values.config.service.extensions) }} +{{ fail "[ERROR] The opentelemetry-collector chart requires that the health_check extension to be included in the extension list." }} +{{- end}} + +{{- if not .Values.configMap.create }} +[WARNING] "configMap" wil not be created and "config" will not take effect. +{{ end }} + +{{- if and .Values.configMap.create .Values.configMap.existingName }} +{{ fail "[ERROR] Cannot set configMap.existingName when configMap.create is true" }} +{{ end }} + +{{- if and .Values.configMap.create .Values.configMap.existingPath }} +{{ fail "[ERROR] Cannot set configMap.existingPath when configMap.create is true" }} +{{ end }} + +{{- if not .Values.resources }} +[WARNING] No resource limits or requests were set. Consider setter resource requests and limits for your collector(s) via the `resources` field. +{{ end }} + +{{- if and (eq .Values.mode "daemonset") (eq .Values.service.internalTrafficPolicy "Cluster") }} +[WARNING] Setting internalTrafficPolicy to 'Cluster' on Daemonset is not recommended. Consider using 'Local' instead. +{{ end }} + +{{- if and (.Values.useGOMEMLIMIT) (not ((((.Values.resources).limits).memory))) }} +[WARNING] "useGOMEMLIMIT" is enabled but memory limits have not been supplied so the GOMEMLIMIT env var could not be added. Solve this problem by setting resources.limits.memory or disabling useGOMEMLIMIT +{{ end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/_config.tpl b/helmcharts/services/opentelemetry-collector/templates/_config.tpl new file mode 100644 index 00000000..4b670f94 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/_config.tpl @@ -0,0 +1,299 @@ +{{- define "opentelemetry-collector.baseConfig" -}} +{{- if .Values.alternateConfig }} +{{- .Values.alternateConfig | toYaml }} +{{- else}} +{{- .Values.config | toYaml }} +{{- end }} +{{- end }} + +{{/* +Build config file for daemonset OpenTelemetry Collector +*/}} +{{- define "opentelemetry-collector.daemonsetConfig" -}} +{{- $values := deepCopy .Values }} +{{- $data := dict "Values" $values | mustMergeOverwrite (deepCopy .) }} +{{- $config := include "opentelemetry-collector.baseConfig" $data | fromYaml }} +{{- if .Values.presets.logsCollection.enabled }} +{{- $config = (include "opentelemetry-collector.applyLogsCollectionConfig" (dict "Values" $data "config" $config) | fromYaml) }} +{{- end }} +{{- if .Values.presets.hostMetrics.enabled }} +{{- $config = (include "opentelemetry-collector.applyHostMetricsConfig" (dict "Values" $data "config" $config) | fromYaml) }} +{{- end }} +{{- if .Values.presets.kubeletMetrics.enabled }} +{{- $config = (include "opentelemetry-collector.applyKubeletMetricsConfig" (dict "Values" $data "config" $config) | fromYaml) }} +{{- end }} +{{- if .Values.presets.kubernetesAttributes.enabled }} +{{- $config = (include "opentelemetry-collector.applyKubernetesAttributesConfig" (dict "Values" $data "config" $config) | fromYaml) }} +{{- end }} +{{- if .Values.presets.clusterMetrics.enabled }} +{{- $config = (include "opentelemetry-collector.applyClusterMetricsConfig" (dict "Values" $data "config" $config) | fromYaml) }} +{{- end }} +{{- tpl (toYaml $config) . }} +{{- end }} + +{{/* +Build config file for deployment OpenTelemetry Collector +*/}} +{{- define "opentelemetry-collector.deploymentConfig" -}} +{{- $values := deepCopy .Values }} +{{- $data := dict "Values" $values | mustMergeOverwrite (deepCopy .) }} +{{- $config := include "opentelemetry-collector.baseConfig" $data | fromYaml }} +{{- if .Values.presets.logsCollection.enabled }} +{{- $config = (include "opentelemetry-collector.applyLogsCollectionConfig" (dict "Values" $data "config" $config) | fromYaml) }} +{{- end }} +{{- if .Values.presets.hostMetrics.enabled }} +{{- $config = (include "opentelemetry-collector.applyHostMetricsConfig" (dict "Values" $data "config" $config) | fromYaml) }} +{{- end }} +{{- if .Values.presets.kubeletMetrics.enabled }} +{{- $config = (include "opentelemetry-collector.applyKubeletMetricsConfig" (dict "Values" $data "config" $config) | fromYaml) }} +{{- end }} +{{- if .Values.presets.kubernetesAttributes.enabled }} +{{- $config = (include "opentelemetry-collector.applyKubernetesAttributesConfig" (dict "Values" $data "config" $config) | fromYaml) }} +{{- end }} +{{- if .Values.presets.kubernetesEvents.enabled }} +{{- $config = (include "opentelemetry-collector.applyKubernetesEventsConfig" (dict "Values" $data "config" $config) | fromYaml) }} +{{- end }} +{{- if .Values.presets.clusterMetrics.enabled }} +{{- $config = (include "opentelemetry-collector.applyClusterMetricsConfig" (dict "Values" $data "config" $config) | fromYaml) }} +{{- end }} +{{- tpl (toYaml $config) . }} +{{- end }} + +{{- define "opentelemetry-collector.applyHostMetricsConfig" -}} +{{- $config := mustMergeOverwrite (dict "service" (dict "pipelines" (dict "metrics" (dict "receivers" list)))) (include "opentelemetry-collector.hostMetricsConfig" .Values | fromYaml) .config }} +{{- $_ := set $config.service.pipelines.metrics "receivers" (append $config.service.pipelines.metrics.receivers "hostmetrics" | uniq) }} +{{- $config | toYaml }} +{{- end }} + +{{- define "opentelemetry-collector.hostMetricsConfig" -}} +receivers: + hostmetrics: + root_path: /hostfs + collection_interval: 10s + scrapers: + cpu: + load: + memory: + disk: + filesystem: + exclude_mount_points: + mount_points: + - /dev/* + - /proc/* + - /sys/* + - /run/k3s/containerd/* + - /var/lib/docker/* + - /var/lib/kubelet/* + - /snap/* + match_type: regexp + exclude_fs_types: + fs_types: + - autofs + - binfmt_misc + - bpf + - cgroup2 + - configfs + - debugfs + - devpts + - devtmpfs + - fusectl + - hugetlbfs + - iso9660 + - mqueue + - nsfs + - overlay + - proc + - procfs + - pstore + - rpc_pipefs + - securityfs + - selinuxfs + - squashfs + - sysfs + - tracefs + match_type: strict + network: +{{- end }} + +{{- define "opentelemetry-collector.applyClusterMetricsConfig" -}} +{{- $config := mustMergeOverwrite (dict "service" (dict "pipelines" (dict "metrics" (dict "receivers" list)))) (include "opentelemetry-collector.clusterMetricsConfig" .Values | fromYaml) .config }} +{{- $_ := set $config.service.pipelines.metrics "receivers" (append $config.service.pipelines.metrics.receivers "k8s_cluster" | uniq) }} +{{- $config | toYaml }} +{{- end }} + +{{- define "opentelemetry-collector.clusterMetricsConfig" -}} +receivers: + k8s_cluster: + collection_interval: 10s +{{- end }} + +{{- define "opentelemetry-collector.applyKubeletMetricsConfig" -}} +{{- $config := mustMergeOverwrite (dict "service" (dict "pipelines" (dict "metrics" (dict "receivers" list)))) (include "opentelemetry-collector.kubeletMetricsConfig" .Values | fromYaml) .config }} +{{- $_ := set $config.service.pipelines.metrics "receivers" (append $config.service.pipelines.metrics.receivers "kubeletstats" | uniq) }} +{{- $config | toYaml }} +{{- end }} + +{{- define "opentelemetry-collector.kubeletMetricsConfig" -}} +receivers: + kubeletstats: + collection_interval: 20s + auth_type: "serviceAccount" + endpoint: "${env:K8S_NODE_NAME}:10250" +{{- end }} + +{{- define "opentelemetry-collector.applyLogsCollectionConfig" -}} +{{- $config := mustMergeOverwrite (dict "service" (dict "pipelines" (dict "logs" (dict "receivers" list)))) (include "opentelemetry-collector.logsCollectionConfig" .Values | fromYaml) .config }} +{{- $_ := set $config.service.pipelines.logs "receivers" (append $config.service.pipelines.logs.receivers "filelog" | uniq) }} +{{- if .Values.Values.presets.logsCollection.storeCheckpoints}} +{{- $configExtensions := mustMergeOverwrite (dict "service" (dict "extensions" list)) $config }} +{{- $_ := set $config.service "extensions" (append $configExtensions.service.extensions "file_storage" | uniq) }} +{{- end }} +{{- $config | toYaml }} +{{- end }} + +{{- define "opentelemetry-collector.logsCollectionConfig" -}} +{{- if .Values.presets.logsCollection.storeCheckpoints }} +extensions: + file_storage: + directory: /var/lib/otelcol +{{- end }} +receivers: + filelog: + include: [ /var/log/pods/*/*/*.log ] + {{- if .Values.presets.logsCollection.includeCollectorLogs }} + exclude: [] + {{- else }} + # Exclude collector container's logs. The file format is /var/log/pods/__//.log + exclude: [ /var/log/pods/{{ include "opentelemetry-collector.namespace" . }}_{{ include "opentelemetry-collector.fullname" . }}*_*/{{ include "opentelemetry-collector.lowercase_chartname" . }}/*.log ] + {{- end }} + start_at: end + retry_on_failure: + enabled: true + {{- if .Values.presets.logsCollection.storeCheckpoints}} + storage: file_storage + {{- end }} + include_file_path: true + include_file_name: false + operators: + # parse container logs + - type: container + id: container-parser + max_log_size: {{ $.Values.presets.logsCollection.maxRecombineLogSize }} +{{- end }} + +{{- define "opentelemetry-collector.applyKubernetesAttributesConfig" -}} +{{- $config := mustMergeOverwrite (include "opentelemetry-collector.kubernetesAttributesConfig" .Values | fromYaml) .config }} +{{- if $config.service.pipelines.logs }} + {{- $config = mustMergeOverwrite (dict "service" (dict "pipelines" (dict "logs" (dict "processors" list)))) $config }} + {{- if not (has "k8sattributes" $config.service.pipelines.logs.processors) }} + {{- $_ := set $config.service.pipelines.logs "processors" (prepend $config.service.pipelines.logs.processors "k8sattributes" | uniq) }} + {{- end }} +{{- end }} +{{- if and $config.service.pipelines.metrics }} + {{- $config = mustMergeOverwrite (dict "service" (dict "pipelines" (dict "metrics" (dict "processors" list)))) $config }} + {{- if not (has "k8sattributes" $config.service.pipelines.metrics.processors) }} + {{- $_ := set $config.service.pipelines.metrics "processors" (prepend $config.service.pipelines.metrics.processors "k8sattributes" | uniq) }} + {{- end }} +{{- end }} +{{- if and $config.service.pipelines.traces }} + {{- $config = mustMergeOverwrite (dict "service" (dict "pipelines" (dict "traces" (dict "processors" list)))) $config }} + {{- if not (has "k8sattributes" $config.service.pipelines.traces.processors) }} + {{- $_ := set $config.service.pipelines.traces "processors" (prepend $config.service.pipelines.traces.processors "k8sattributes" | uniq) }} + {{- end }} +{{- end }} +{{- $config | toYaml }} +{{- end }} + +{{- define "opentelemetry-collector.kubernetesAttributesConfig" -}} +processors: + k8sattributes: + {{- if eq .Values.mode "daemonset" }} + filter: + node_from_env_var: K8S_NODE_NAME + {{- end }} + passthrough: false + pod_association: + - sources: + - from: resource_attribute + name: k8s.pod.ip + - sources: + - from: resource_attribute + name: k8s.pod.uid + - sources: + - from: connection + extract: + metadata: + - "k8s.namespace.name" + - "k8s.deployment.name" + - "k8s.statefulset.name" + - "k8s.daemonset.name" + - "k8s.cronjob.name" + - "k8s.job.name" + - "k8s.node.name" + - "k8s.pod.name" + - "k8s.pod.uid" + - "k8s.pod.start_time" + {{- if .Values.presets.kubernetesAttributes.extractAllPodLabels }} + labels: + - tag_name: $$1 + key_regex: (.*) + from: pod + {{- end }} + {{- if .Values.presets.kubernetesAttributes.extractAllPodAnnotations }} + annotations: + - tag_name: $$1 + key_regex: (.*) + from: pod + {{- end }} +{{- end }} + +{{/* Build the list of port for service */}} +{{- define "opentelemetry-collector.servicePortsConfig" -}} +{{- $ports := deepCopy .Values.ports }} +{{- range $key, $port := $ports }} +{{- if $port.enabled }} +- name: {{ $key }} + port: {{ $port.servicePort }} + targetPort: {{ $port.containerPort }} + protocol: {{ $port.protocol }} + {{- if $port.appProtocol }} + appProtocol: {{ $port.appProtocol }} + {{- end }} +{{- if $port.nodePort }} + nodePort: {{ $port.nodePort }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} + +{{/* Build the list of port for pod */}} +{{- define "opentelemetry-collector.podPortsConfig" -}} +{{- $ports := deepCopy .Values.ports }} +{{- range $key, $port := $ports }} +{{- if $port.enabled }} +- name: {{ $key }} + containerPort: {{ $port.containerPort }} + protocol: {{ $port.protocol }} + {{- if and $.isAgent $port.hostPort }} + hostPort: {{ $port.hostPort }} + {{- end }} +{{- end }} +{{- end }} +{{- end }} + +{{- define "opentelemetry-collector.applyKubernetesEventsConfig" -}} +{{- $config := mustMergeOverwrite (dict "service" (dict "pipelines" (dict "logs" (dict "receivers" list)))) (include "opentelemetry-collector.kubernetesEventsConfig" .Values | fromYaml) .config }} +{{- $_ := set $config.service.pipelines.logs "receivers" (append $config.service.pipelines.logs.receivers "k8sobjects" | uniq) }} +{{- $config | toYaml }} +{{- end }} + +{{- define "opentelemetry-collector.kubernetesEventsConfig" -}} +receivers: + k8sobjects: + objects: + - name: events + mode: "watch" + group: "events.k8s.io" + exclude_watch_type: + - "DELETED" +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/_helpers.tpl b/helmcharts/services/opentelemetry-collector/templates/_helpers.tpl new file mode 100644 index 00000000..130bf2a7 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/_helpers.tpl @@ -0,0 +1,244 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "opentelemetry-collector.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{- define "opentelemetry-collector.lowercase_chartname" -}} +{{- default .Chart.Name | lower }} +{{- end }} + +{{/* +Get component name +*/}} +{{- define "opentelemetry-collector.component" -}} +{{- if eq .Values.mode "deployment" -}} +component: standalone-collector +{{- end -}} +{{- if eq .Values.mode "daemonset" -}} +component: agent-collector +{{- end -}} +{{- if eq .Values.mode "statefulset" -}} +component: statefulset-collector +{{- end -}} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "opentelemetry-collector.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "opentelemetry-collector.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "opentelemetry-collector.labels" -}} +helm.sh/chart: {{ include "opentelemetry-collector.chart" . }} +{{ include "opentelemetry-collector.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{ include "opentelemetry-collector.additionalLabels" . }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "opentelemetry-collector.selectorLabels" -}} +app.kubernetes.io/name: {{ include "opentelemetry-collector.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "opentelemetry-collector.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "opentelemetry-collector.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + + +{{/* +Create the name of the clusterRole to use +*/}} +{{- define "opentelemetry-collector.clusterRoleName" -}} +{{- default (include "opentelemetry-collector.fullname" .) .Values.clusterRole.name }} +{{- end }} + +{{/* +Create the name of the clusterRoleBinding to use +*/}} +{{- define "opentelemetry-collector.clusterRoleBindingName" -}} +{{- default (include "opentelemetry-collector.fullname" .) .Values.clusterRole.clusterRoleBinding.name }} +{{- end }} + +{{- define "opentelemetry-collector.podAnnotations" -}} +{{- if .Values.podAnnotations }} +{{- tpl (.Values.podAnnotations | toYaml) . }} +{{- end }} +{{- end }} + +{{- define "opentelemetry-collector.podLabels" -}} +{{- if .Values.podLabels }} +{{- tpl (.Values.podLabels | toYaml) . }} +{{- end }} +{{- end }} + +{{- define "opentelemetry-collector.additionalLabels" -}} +{{- if .Values.additionalLabels }} +{{- tpl (.Values.additionalLabels | toYaml) . }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for podDisruptionBudget. +*/}} +{{- define "podDisruptionBudget.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "policy/v1") (semverCompare ">= 1.21-0" .Capabilities.KubeVersion.Version) -}} + {{- print "policy/v1" -}} + {{- else -}} + {{- print "policy/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* +Compute Service creation on mode +*/}} +{{- define "opentelemetry-collector.serviceEnabled" }} + {{- $serviceEnabled := true }} + {{- if not (eq (toString .Values.service.enabled) "") }} + {{- $serviceEnabled = .Values.service.enabled -}} + {{- end }} + {{- if and (eq .Values.mode "daemonset") (not .Values.service.enabled) }} + {{- $serviceEnabled = false -}} + {{- end }} + + {{- print $serviceEnabled }} +{{- end -}} + + +{{/* +Compute InternalTrafficPolicy on Service creation +*/}} +{{- define "opentelemetry-collector.serviceInternalTrafficPolicy" }} + {{- if and (eq .Values.mode "daemonset") (eq .Values.service.enabled true) }} + {{- print (.Values.service.internalTrafficPolicy | default "Local") -}} + {{- else }} + {{- print (.Values.service.internalTrafficPolicy | default "Cluster") -}} + {{- end }} +{{- end -}} + +{{/* +Allow the release namespace to be overridden +*/}} +{{- define "opentelemetry-collector.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{/* + This helper converts the input value of memory to Bytes. + Input needs to be a valid value as supported by k8s memory resource field. + */}} +{{- define "opentelemetry-collector.convertMemToBytes" }} + {{- $mem := lower . -}} + {{- if hasSuffix "e" $mem -}} + {{- $mem = mulf (trimSuffix "e" $mem | float64) 1e18 -}} + {{- else if hasSuffix "ei" $mem -}} + {{- $mem = mulf (trimSuffix "e" $mem | float64) 0x1p60 -}} + {{- else if hasSuffix "p" $mem -}} + {{- $mem = mulf (trimSuffix "p" $mem | float64) 1e15 -}} + {{- else if hasSuffix "pi" $mem -}} + {{- $mem = mulf (trimSuffix "pi" $mem | float64) 0x1p50 -}} + {{- else if hasSuffix "t" $mem -}} + {{- $mem = mulf (trimSuffix "t" $mem | float64) 1e12 -}} + {{- else if hasSuffix "ti" $mem -}} + {{- $mem = mulf (trimSuffix "ti" $mem | float64) 0x1p40 -}} + {{- else if hasSuffix "g" $mem -}} + {{- $mem = mulf (trimSuffix "g" $mem | float64) 1e9 -}} + {{- else if hasSuffix "gi" $mem -}} + {{- $mem = mulf (trimSuffix "gi" $mem | float64) 0x1p30 -}} + {{- else if hasSuffix "m" $mem -}} + {{- $mem = mulf (trimSuffix "m" $mem | float64) 1e6 -}} + {{- else if hasSuffix "mi" $mem -}} + {{- $mem = mulf (trimSuffix "mi" $mem | float64) 0x1p20 -}} + {{- else if hasSuffix "k" $mem -}} + {{- $mem = mulf (trimSuffix "k" $mem | float64) 1e3 -}} + {{- else if hasSuffix "ki" $mem -}} + {{- $mem = mulf (trimSuffix "ki" $mem | float64) 0x1p10 -}} + {{- end }} +{{- $mem }} +{{- end }} + +{{- define "opentelemetry-collector.gomemlimit" }} +{{- $memlimitBytes := include "opentelemetry-collector.convertMemToBytes" . | mulf 0.8 -}} +{{- printf "%dMiB" (divf $memlimitBytes 0x1p20 | floor | int64) -}} +{{- end }} + +{{/* +Get HPA kind from mode. +The capitalization is important for StatefulSet. +*/}} +{{- define "opentelemetry-collector.hpaKind" -}} +{{- if eq .Values.mode "deployment" -}} +{{- print "Deployment" -}} +{{- end -}} +{{- if eq .Values.mode "statefulset" -}} +{{- print "StatefulSet" -}} +{{- end -}} +{{- end }} + +{{/* +Get ConfigMap name if existingName is defined, otherwise use default name for generated config. +*/}} +{{- define "opentelemetry-collector.configName" -}} + {{- if .Values.configMap.existingName -}} + {{- .Values.configMap.existingName }} + {{- else }} + {{- printf "%s%s" (include "opentelemetry-collector.fullname" .) (.configmapSuffix) }} + {{- end -}} +{{- end }} + +{{/* +Create ConfigMap checksum annotation if configMap.existingPath is defined, otherwise use default templates +*/}} +{{- define "opentelemetry-collector.configTemplateChecksumAnnotation" -}} + {{- if .Values.configMap.existingPath -}} + checksum/config: {{ include (print $.Template.BasePath "/" .Values.configMap.existingPath) . | sha256sum }} + {{- else -}} + {{- if eq .Values.mode "daemonset" -}} + checksum/config: {{ include (print $.Template.BasePath "/configmap-agent.yaml") . | sha256sum }} + {{- else if eq .Values.mode "deployment" -}} + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- else if eq .Values.mode "statefulset" -}} + checksum/config: {{ include (print $.Template.BasePath "/configmap-statefulset.yaml") . | sha256sum }} + {{- end -}} + {{- end }} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/_pod.tpl b/helmcharts/services/opentelemetry-collector/templates/_pod.tpl new file mode 100644 index 00000000..07332ccc --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/_pod.tpl @@ -0,0 +1,221 @@ +{{- define "opentelemetry-collector.pod" -}} +{{- with .Values.imagePullSecrets }} +imagePullSecrets: + {{- toYaml . | nindent 2 }} +{{- end }} +serviceAccountName: {{ include "opentelemetry-collector.serviceAccountName" . }} +securityContext: + {{- toYaml .Values.podSecurityContext | nindent 2 }} +{{- with .Values.hostAliases }} +hostAliases: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- if $.Values.shareProcessNamespace }} +shareProcessNamespace: true +{{- end }} +containers: + - name: {{ include "opentelemetry-collector.lowercase_chartname" . }} + {{- if .Values.command.name }} + command: + - /{{ .Values.command.name }} + {{- end }} + args: + {{- if or .Values.configMap.create .Values.configMap.existingName }} + - --config=/conf/relay.yaml + {{- end }} + {{- range .Values.command.extraArgs }} + - {{ . }} + {{- end }} + securityContext: + {{- if and (not (.Values.securityContext)) (.Values.presets.logsCollection.storeCheckpoints) }} + runAsUser: 0 + runAsGroup: 0 + {{- else -}} + {{- toYaml .Values.securityContext | nindent 6 }} + {{- end }} + {{- if .Values.image.digest }} + image: "{{ ternary "" (print (.Values.global).imageRegistry "/") (empty (.Values.global).imageRegistry) }}{{ .Values.image.repository }}@{{ .Values.image.digest }}" + {{- else }} + image: "{{ ternary "" (print (.Values.global).imageRegistry "/") (empty (.Values.global).imageRegistry) }}{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + + {{- $ports := include "opentelemetry-collector.podPortsConfig" . }} + {{- if $ports }} + ports: + {{- $ports | nindent 6}} + {{- end }} + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + {{- if or .Values.presets.kubeletMetrics.enabled (and .Values.presets.kubernetesAttributes.enabled (eq .Values.mode "daemonset")) }} + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + {{- end }} + {{- if and (.Values.useGOMEMLIMIT) ((((.Values.resources).limits).memory)) }} + - name: GOMEMLIMIT + value: {{ include "opentelemetry-collector.gomemlimit" .Values.resources.limits.memory | quote }} + {{- end }} + {{- with .Values.extraEnvs }} + {{- . | toYaml | nindent 6 }} + {{- end }} + {{- with .Values.extraEnvsFrom }} + envFrom: + {{- . | toYaml | nindent 6 }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: + {{- toYaml .Values.lifecycleHooks | nindent 6 }} + {{- end }} + livenessProbe: + {{- if .Values.livenessProbe.initialDelaySeconds | empty | not }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + {{- end }} + {{- if .Values.livenessProbe.periodSeconds | empty | not }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + {{- end }} + {{- if .Values.livenessProbe.timeoutSeconds | empty | not }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.livenessProbe.failureThreshold | empty | not }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.livenessProbe.terminationGracePeriodSeconds | empty | not }} + terminationGracePeriodSeconds: {{ .Values.livenessProbe.terminationGracePeriodSeconds }} + {{- end }} + httpGet: + path: {{ .Values.livenessProbe.httpGet.path }} + port: {{ .Values.livenessProbe.httpGet.port }} + readinessProbe: + {{- if .Values.readinessProbe.initialDelaySeconds | empty | not }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + {{- end }} + {{- if .Values.readinessProbe.periodSeconds | empty | not }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + {{- end }} + {{- if .Values.readinessProbe.timeoutSeconds | empty | not }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.readinessProbe.successThreshold | empty | not }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + {{- end }} + {{- if .Values.readinessProbe.failureThreshold | empty | not }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + httpGet: + path: {{ .Values.readinessProbe.httpGet.path }} + port: {{ .Values.readinessProbe.httpGet.port }} + {{- if .Values.startupProbe }} + startupProbe: + {{- if .Values.startupProbe.initialDelaySeconds | empty | not }} + initialDelaySeconds: {{ .Values.startupProbe.initialDelaySeconds }} + {{- end }} + {{- if .Values.startupProbe.periodSeconds | empty | not }} + periodSeconds: {{ .Values.startupProbe.periodSeconds }} + {{- end }} + {{- if .Values.startupProbe.timeoutSeconds | empty | not }} + timeoutSeconds: {{ .Values.startupProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.startupProbe.failureThreshold | empty | not }} + failureThreshold: {{ .Values.startupProbe.failureThreshold }} + {{- end }} + {{- if .Values.startupProbe.terminationGracePeriodSeconds | empty | not }} + terminationGracePeriodSeconds: {{ .Values.startupProbe.terminationGracePeriodSeconds }} + {{- end }} + httpGet: + path: {{ .Values.startupProbe.httpGet.path }} + port: {{ .Values.startupProbe.httpGet.port }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + {{- if or .Values.configMap.create .Values.configMap.existingName }} + - mountPath: /conf + name: {{ include "opentelemetry-collector.lowercase_chartname" . }}-configmap + {{- end }} + {{- if .Values.presets.logsCollection.enabled }} + - name: varlogpods + mountPath: /var/log/pods + readOnly: true + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + {{- if .Values.presets.logsCollection.storeCheckpoints}} + - name: varlibotelcol + mountPath: /var/lib/otelcol + {{- end }} + {{- end }} + {{- if .Values.presets.hostMetrics.enabled }} + - name: hostfs + mountPath: /hostfs + readOnly: true + mountPropagation: HostToContainer + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- tpl (toYaml .Values.extraVolumeMounts) . | nindent 6 }} + {{- end }} +{{- if .Values.extraContainers }} + {{- tpl (toYaml .Values.extraContainers) . | nindent 2 }} +{{- end }} +{{- if .Values.initContainers }} +initContainers: + {{- tpl (toYaml .Values.initContainers) . | nindent 2 }} +{{- end }} +{{- if .Values.priorityClassName }} +priorityClassName: {{ .Values.priorityClassName | quote }} +{{- end }} +volumes: + {{- if or .Values.configMap.create .Values.configMap.existingName }} + - name: {{ include "opentelemetry-collector.lowercase_chartname" . }}-configmap + configMap: + name: {{ include "opentelemetry-collector.configName" . }} + items: + - key: relay + path: relay.yaml + {{- end }} + {{- if .Values.presets.logsCollection.enabled }} + - name: varlogpods + hostPath: + path: /var/log/pods + {{- if .Values.presets.logsCollection.storeCheckpoints}} + - name: varlibotelcol + hostPath: + path: /var/lib/otelcol + type: DirectoryOrCreate + {{- end }} + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + {{- end }} + {{- if .Values.presets.hostMetrics.enabled }} + - name: hostfs + hostPath: + path: / + {{- end }} + {{- if .Values.extraVolumes }} + {{- tpl (toYaml .Values.extraVolumes) . | nindent 2 }} + {{- end }} +{{- with .Values.nodeSelector }} +nodeSelector: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.affinity }} +affinity: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.tolerations }} +tolerations: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.topologySpreadConstraints }} +topologySpreadConstraints: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/clusterrole.yaml b/helmcharts/services/opentelemetry-collector/templates/clusterrole.yaml new file mode 100644 index 00000000..7d19652f --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/clusterrole.yaml @@ -0,0 +1,56 @@ +{{- if or (.Values.clusterRole.create) (.Values.presets.kubernetesAttributes.enabled) (.Values.presets.clusterMetrics.enabled) (.Values.presets.kubeletMetrics.enabled) (.Values.presets.kubernetesEvents.enabled) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "common.names.fullname" . }} + labels: + {{- include "opentelemetry-collector.labels" . | nindent 4 }} + {{- if .Values.clusterRole.annotations }} + annotations: + {{- range $key, $value := .Values.clusterRole.annotations }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} +rules: + {{- if .Values.clusterRole.rules -}} + {{ toYaml .Values.clusterRole.rules | nindent 2 -}} + {{- end }} + {{- if .Values.presets.kubernetesAttributes.enabled}} + - apiGroups: [""] + resources: ["pods", "namespaces"] + verbs: ["get", "watch", "list"] + - apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["replicasets"] + verbs: ["get", "list", "watch"] + {{- end }} + {{- if .Values.presets.clusterMetrics.enabled}} + - apiGroups: [""] + resources: ["events", "namespaces", "namespaces/status", "nodes", "nodes/spec", "pods", "pods/status", "replicationcontrollers", "replicationcontrollers/status", "resourcequotas", "services" ] + verbs: ["get", "list", "watch"] + - apiGroups: ["apps"] + resources: ["daemonsets", "deployments", "replicasets", "statefulsets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["extensions"] + resources: ["daemonsets", "deployments", "replicasets"] + verbs: ["get", "list", "watch"] + - apiGroups: ["batch"] + resources: ["jobs", "cronjobs"] + verbs: ["get", "list", "watch"] + - apiGroups: ["autoscaling"] + resources: ["horizontalpodautoscalers"] + verbs: ["get", "list", "watch"] + {{- end}} + {{- if .Values.presets.kubeletMetrics.enabled}} + - apiGroups: [""] + resources: ["nodes/stats"] + verbs: ["get", "watch", "list"] + {{- end }} + {{- if .Values.presets.kubernetesEvents.enabled }} + - apiGroups: ["events.k8s.io"] + resources: ["events"] + verbs: ["watch", "list"] + {{- end }} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/clusterrolebinding.yaml b/helmcharts/services/opentelemetry-collector/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..cda42ad2 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/clusterrolebinding.yaml @@ -0,0 +1,22 @@ +{{- if or (.Values.clusterRole.create) (.Values.presets.kubernetesAttributes.enabled) (.Values.presets.clusterMetrics.enabled) (.Values.presets.kubeletMetrics.enabled) (.Values.presets.kubernetesEvents.enabled) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "common.names.fullname" . }} + labels: + {{- include "opentelemetry-collector.labels" . | nindent 4 }} + {{- if .Values.clusterRole.clusterRoleBinding.annotations }} + annotations: + {{- range $key, $value := .Values.clusterRole.clusterRoleBinding.annotations }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "opentelemetry-collector.clusterRoleName" . }} +subjects: +- kind: ServiceAccount + name: {{ include "opentelemetry-collector.serviceAccountName" . }} + namespace: {{ include "base.namespace" . }} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/configmap-agent.yaml b/helmcharts/services/opentelemetry-collector/templates/configmap-agent.yaml new file mode 100644 index 00000000..5e07daae --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/configmap-agent.yaml @@ -0,0 +1,12 @@ +{{- if and (eq .Values.mode "daemonset") (.Values.configMap.create) (not .Values.configMap.existingName) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "opentelemetry-collector.fullname" . }}-agent + namespace: {{ template "opentelemetry-collector.namespace" . }} + labels: + {{- include "opentelemetry-collector.labels" . | nindent 4 }} +data: + relay: | + {{- include "opentelemetry-collector.daemonsetConfig" . | nindent 4 -}} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/configmap-statefulset.yaml b/helmcharts/services/opentelemetry-collector/templates/configmap-statefulset.yaml new file mode 100644 index 00000000..157f6a07 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/configmap-statefulset.yaml @@ -0,0 +1,12 @@ +{{- if and (eq .Values.mode "statefulset") (.Values.configMap.create) (not .Values.configMap.existingName) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "opentelemetry-collector.fullname" . }}-statefulset + namespace: {{ template "opentelemetry-collector.namespace" . }} + labels: + {{- include "opentelemetry-collector.labels" . | nindent 4 }} +data: + relay: | + {{- include "opentelemetry-collector.deploymentConfig" . | nindent 4 -}} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/configmap.yaml b/helmcharts/services/opentelemetry-collector/templates/configmap.yaml new file mode 100644 index 00000000..15cffad9 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/configmap.yaml @@ -0,0 +1,12 @@ +{{- if and (eq .Values.mode "deployment") (.Values.configMap.create) (not .Values.configMap.existingName) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "base.namespace" . }} + labels: + {{- include "opentelemetry-collector.labels" . | nindent 4 }} +data: + relay: | + {{- include "opentelemetry-collector.deploymentConfig" . | nindent 4 -}} +{{- end }} \ No newline at end of file diff --git a/helmcharts/services/opentelemetry-collector/templates/daemonset.yaml b/helmcharts/services/opentelemetry-collector/templates/daemonset.yaml new file mode 100644 index 00000000..de563e56 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/daemonset.yaml @@ -0,0 +1,49 @@ +{{- if eq .Values.mode "daemonset" -}} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "opentelemetry-collector.fullname" . }}-agent + namespace: {{ template "opentelemetry-collector.namespace" . }} + labels: + {{- include "opentelemetry-collector.labels" . | nindent 4 }} + {{- if .Values.annotations }} + annotations: + {{- range $key, $value := .Values.annotations }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "opentelemetry-collector.selectorLabels" . | nindent 6 }} + {{- include "opentelemetry-collector.component" . | nindent 6 }} + updateStrategy: + {{- if eq .Values.rollout.strategy "RollingUpdate" }} + {{- with .Values.rollout.rollingUpdate }} + rollingUpdate: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + type: {{ .Values.rollout.strategy }} + template: + metadata: + annotations: + {{- include "opentelemetry-collector.configTemplateChecksumAnnotation" . | nindent 8 }} + {{- include "opentelemetry-collector.podAnnotations" . | nindent 8 }} + labels: + {{- include "opentelemetry-collector.selectorLabels" . | nindent 8 }} + {{- include "opentelemetry-collector.component" . | nindent 8 }} + {{- include "opentelemetry-collector.podLabels" . | nindent 8 }} + spec: + {{- $podValues := deepCopy .Values }} + {{- $podData := dict "Values" $podValues "configmapSuffix" "-agent" "isAgent" true }} + {{- include "opentelemetry-collector.pod" ($podData | mustMergeOverwrite (deepCopy .)) | nindent 6 }} + hostNetwork: {{ .Values.hostNetwork }} + {{- with .Values.dnsPolicy }} + dnsPolicy: {{ . }} + {{- end }} + {{- with .Values.dnsConfig }} + dnsConfig: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/deployment.yaml b/helmcharts/services/opentelemetry-collector/templates/deployment.yaml new file mode 100644 index 00000000..86c37c69 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/deployment.yaml @@ -0,0 +1,53 @@ +{{- if eq .Values.mode "deployment" -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "base.namespace" . }} + labels: + {{- include "opentelemetry-collector.labels" . | nindent 4 }} + {{- if .Values.annotations }} + annotations: + {{- range $key, $value := .Values.annotations }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} +spec: +{{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} +{{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "opentelemetry-collector.selectorLabels" . | nindent 6 }} + {{- include "opentelemetry-collector.component" . | nindent 6 }} + strategy: + {{- if eq .Values.rollout.strategy "RollingUpdate" }} + {{- with .Values.rollout.rollingUpdate }} + rollingUpdate: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + type: {{ .Values.rollout.strategy }} + template: + metadata: + annotations: + {{- include "opentelemetry-collector.configTemplateChecksumAnnotation" . | nindent 8 }} + {{- include "opentelemetry-collector.podAnnotations" . | nindent 8 }} + labels: + {{- include "opentelemetry-collector.selectorLabels" . | nindent 8 }} + {{- include "opentelemetry-collector.component" . | nindent 8 }} + {{- include "opentelemetry-collector.podLabels" . | nindent 8 }} + spec: + {{- with .Values.dnsPolicy }} + dnsPolicy: {{ . }} + {{- end }} + {{- with .Values.dnsConfig }} + dnsConfig: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- $podValues := deepCopy .Values }} + {{- $podData := dict "Values" $podValues "configmapSuffix" "" "isAgent" false }} + {{- include "opentelemetry-collector.pod" ($podData | mustMergeOverwrite (deepCopy .)) | nindent 6 }} + hostNetwork: {{ .Values.hostNetwork }} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/hpa.yaml b/helmcharts/services/opentelemetry-collector/templates/hpa.yaml new file mode 100644 index 00000000..c5d06333 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/hpa.yaml @@ -0,0 +1,37 @@ +{{- if and .Values.autoscaling.enabled (or (eq .Values.mode "deployment") (eq .Values.mode "statefulset")) }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "opentelemetry-collector.fullname" . }} + namespace: {{ template "opentelemetry-collector.namespace" . }} + labels: + {{- include "opentelemetry-collector.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: {{ include "opentelemetry-collector.hpaKind" . }} + name: {{ include "opentelemetry-collector.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + {{- if .Values.autoscaling.behavior }} + behavior: + {{- toYaml .Values.autoscaling.behavior | nindent 4 }} + {{- end }} + metrics: + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/ingress.yaml b/helmcharts/services/opentelemetry-collector/templates/ingress.yaml new file mode 100644 index 00000000..0a5a4ef6 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/ingress.yaml @@ -0,0 +1,55 @@ +{{- if .Values.ingress.enabled -}} +{{- $ingresses := prepend .Values.ingress.additionalIngresses .Values.ingress -}} +{{- range $ingresses }} +apiVersion: "networking.k8s.io/v1" +kind: Ingress +metadata: + {{- if .name }} + name: {{ printf "%s-%s" (include "opentelemetry-collector.fullname" $) .name }} + {{- else }} + name: {{ include "opentelemetry-collector.fullname" $ }} + {{- end }} + namespace: {{ template "opentelemetry-collector.namespace" $ }} + labels: + {{- include "opentelemetry-collector.labels" $ | nindent 4 }} + {{- include "opentelemetry-collector.component" $ | nindent 4 }} + {{- if .annotations }} + annotations: + {{- range $key, $value := .annotations }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if .ingressClassName }} + ingressClassName: {{ .ingressClassName }} + {{- end -}} + {{- if .tls }} + tls: + {{- range .tls }} + - hosts: + {{- range .hosts }} + - {{ tpl . $ | quote }} + {{- end }} + {{- with .secretName }} + secretName: {{ . }} + {{- end }} + {{- end }} + {{- end }} + rules: + {{- range .hosts }} + - host: {{ tpl .host $ | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + pathType: {{ .pathType }} + backend: + service: + name: {{ include "opentelemetry-collector.fullname" $ }} + port: + number: {{ .port }} + {{- end }} + {{- end }} +--- +{{- end }} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/networkpolicy.yaml b/helmcharts/services/opentelemetry-collector/templates/networkpolicy.yaml new file mode 100644 index 00000000..58d58c9e --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/networkpolicy.yaml @@ -0,0 +1,39 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ include "opentelemetry-collector.fullname" . }} + namespace: {{ template "opentelemetry-collector.namespace" . }} + labels: + {{- include "opentelemetry-collector.labels" . | nindent 4 }} + {{- if .Values.networkPolicy.annotations }} + annotations: + {{- range $key, $value := .Values.networkPolicy.annotations }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "opentelemetry-collector.selectorLabels" . | nindent 6 }} + {{- include "opentelemetry-collector.component" . | nindent 6 }} + ingress: + - ports: + {{- range $port := .Values.ports }} + {{- if $port.enabled }} + - port: {{ $port.containerPort }} + protocol: {{ $port.protocol }} + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.allowIngressFrom }} + from: + {{- toYaml .Values.networkPolicy.allowIngressFrom | nindent 8 }} + {{- end }} + {{- if .Values.networkPolicy.extraIngressRules }} + {{- toYaml .Values.networkPolicy.extraIngressRules | nindent 4 }} + {{- end }} + {{- if .Values.networkPolicy.egressRules }} + egress: + {{- toYaml .Values.networkPolicy.egressRules | nindent 4 }} + {{- end }} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/pdb.yaml b/helmcharts/services/opentelemetry-collector/templates/pdb.yaml new file mode 100644 index 00000000..c386a47b --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/pdb.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.podDisruptionBudget.enabled (eq .Values.mode "deployment") }} +apiVersion: {{ include "podDisruptionBudget.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "opentelemetry-collector.fullname" . }} + namespace: {{ template "opentelemetry-collector.namespace" . }} + labels: + {{- include "opentelemetry-collector.labels" . | nindent 4 }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + {{- include "opentelemetry-collector.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/podmonitor.yaml b/helmcharts/services/opentelemetry-collector/templates/podmonitor.yaml new file mode 100644 index 00000000..9bc5465a --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/podmonitor.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.podMonitor.enabled .Values.podMonitor.metricsEndpoints }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ include "opentelemetry-collector.fullname" . }}-agent + namespace: {{ template "opentelemetry-collector.namespace" . }} + labels: + {{- include "opentelemetry-collector.labels" . | nindent 4 }} + {{- range $key, $value := .Values.podMonitor.extraLabels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "opentelemetry-collector.selectorLabels" . | nindent 6 }} + {{- include "opentelemetry-collector.component" . | nindent 6 }} + podMetricsEndpoints: + {{- toYaml .Values.podMonitor.metricsEndpoints | nindent 2 }} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/prometheusrule.yaml b/helmcharts/services/opentelemetry-collector/templates/prometheusrule.yaml new file mode 100644 index 00000000..d6446aba --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/prometheusrule.yaml @@ -0,0 +1,112 @@ +{{- if and .Values.prometheusRule.enabled .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "opentelemetry-collector.fullname" . }} + namespace: {{ template "opentelemetry-collector.namespace" . }} + labels: + {{- include "opentelemetry-collector.labels" . | nindent 4 }} + {{- range $key, $value := .Values.prometheusRule.extraLabels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + groups: +{{- if .Values.prometheusRule.groups }} + {{- toYaml .Values.prometheusRule.groups | nindent 4 }} +{{- end }} +{{- if .Values.prometheusRule.defaultRules.enabled }} + - name: collectorRules + rules: + - alert: ReceiverDroppedSpans + expr: rate(otelcol_receiver_refused_spans[5m]) > 0 + for: 2m + labels: + severity: critical + annotations: + description: '{{`The {{ $labels.receiver }} receiver is dropping spans at a rate of {{ humanize $value }} per second `}}' + runbook_url: 'https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/monitoring.md#receive-failures' + - alert: ReceiverDroppedMetrics + expr: rate(otelcol_receiver_refused_metric_points[5m]) > 0 + for: 2m + labels: + severity: critical + annotations: + description: '{{`The {{ $labels.receiver }} receiver is dropping metrics at a rate of {{ humanize $value }} per second `}}' + runbook_url: 'https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/monitoring.md#receive-failures' + - alert: ReceiverDroppedLogs + expr: rate(otelcol_receiver_refused_log_records[5m]) > 0 + for: 5m + labels: + severity: critical + annotations: + description: '{{` The {{ $labels.receiver }} is dropping logs at a rate of {{ humanize $value }} per second `}}' + runbook_url: 'https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/monitoring.md#receive-failures' + - alert: ProcessorDroppedSpans + expr: rate(otelcol_processor_dropped_spans[5m]) > 0 + for: 2m + labels: + severity: critical + annotations: + description: '{{`The {{ $labels.processor }} processor is dropping spans at a rate of {{ humanize $value }} per second `}}' + runbook_url: 'https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/monitoring.md#data-loss' + - alert: ProcessorDroppedMetrics + expr: rate(otelcol_processor_dropped_metric_points[5m]) > 0 + for: 2m + labels: + severity: critical + annotations: + description: '{{`The {{ $labels.processor }} processor is dropping metrics at a rate of {{ humanize $value }} per second `}}' + runbook_url: 'https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/monitoring.md#data-loss' + - alert: ProcessorDroppedLogs + expr: rate(otelcol_processor_dropped_log_records[5m]) > 0 + for: 5m + labels: + severity: critical + annotations: + description: '{{` The {{ $labels.processor }} is dropping logs at a rate of {{ humanize $value }} per second `}}' + runbook_url: 'https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/monitoring.md#data-loss' + - alert: ExporterDroppedSpans + expr: rate(otelcol_exporter_send_failed_spans[5m]) > 0 + for: 2m + labels: + severity: critical + annotations: + description: '{{`The {{ $labels.exporter }} exporter is dropping spans at a rate of {{ humanize $value }} per second `}}' + runbook_url: 'https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/monitoring.md#data-egress' + - alert: ExporterDroppedMetrics + expr: rate(otelcol_exporter_send_failed_metric_points[5m]) > 0 + for: 2m + labels: + severity: critical + annotations: + description: '{{`The {{ $labels.exporter }} exporter is dropping metrics at a rate of {{ humanize $value }} per second `}}' + runbook_url: 'https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/monitoring.md#data-egress' + - alert: ExporterDroppedLogs + expr: rate(otelcol_exporter_send_failed_log_records[5m]) > 0 + for: 5m + labels: + severity: critical + annotations: + description: '{{` The {{ $labels.exporter }} is dropping logs at a rate of {{ humanize $value }} per second `}}' + runbook_url: 'https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/monitoring.md#data-egress' + - alert: ExporterQueueSize + expr: otelcol_exporter_queue_size > 5000 + for: 1m + labels: + severity: warning + annotations: + description: '{{`The {{ $labels.exporter }} queue has reached a size of {{ $value }} `}}' + runbook_url: 'https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/monitoring.md#queue-length' +{{- $signals := list "spans" "metric_points" "log_records" }} +{{- range $signal := $signals }} + - alert: SendQueueFailed{{ $signal }} + expr: rate(otelcol_exporter_enqueue_failed_{{ $signal }}[5m]) > 0 + for: 1m + labels: + severity: warning + annotations: + description: '{{`The {{ $labels.exporter }} sending queue failed to accept {{ $value }} `}} {{ $signal }}' + runbook_url: 'https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/monitoring.md#queue-length' +{{- end }} +{{- end }} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/service.yaml b/helmcharts/services/opentelemetry-collector/templates/service.yaml new file mode 100644 index 00000000..0e2f73f6 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/service.yaml @@ -0,0 +1,42 @@ +{{- if or (eq (include "opentelemetry-collector.serviceEnabled" .) "true") (.Values.ingress.enabled) -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "base.namespace" . }} + labels: + {{- include "opentelemetry-collector.labels" . | nindent 4 }} + {{- include "opentelemetry-collector.component" . | nindent 4 }} + {{- if .Values.service.annotations }} + annotations: + {{- range $key, $value := .Values.service.annotations }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if and .Values.service.loadBalancerIP (eq .Values.service.type "LoadBalancer") }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if and .Values.service.loadBalancerSourceRanges (eq .Values.service.type "LoadBalancer") }} + loadBalancerSourceRanges: + {{- range .Values.service.loadBalancerSourceRanges }} + - {{ . }} + {{- end }} + {{- end }} + {{- $ports := include "opentelemetry-collector.servicePortsConfig" . }} + {{- if $ports }} + ports: + {{- $ports | nindent 4}} + {{- end }} + selector: + {{- include "opentelemetry-collector.selectorLabels" . | nindent 4 }} + {{- include "opentelemetry-collector.component" . | nindent 4 }} + internalTrafficPolicy: {{ include "opentelemetry-collector.serviceInternalTrafficPolicy" . }} + {{- if and (eq .Values.service.type "LoadBalancer") (.Values.service.externalTrafficPolicy) }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} + {{- end }} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/serviceaccount.yaml b/helmcharts/services/opentelemetry-collector/templates/serviceaccount.yaml new file mode 100644 index 00000000..329cf915 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if or (.Values.serviceAccount.create) (.Values.presets.kubeletMetrics.enabled) -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "base.namespace" . }} + labels: + {{- include "opentelemetry-collector.labels" . | nindent 4 }} + {{- if .Values.serviceAccount.annotations }} + annotations: + {{- range $key, $value := .Values.serviceAccount.annotations }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} +{{ end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/servicemonitor.yaml b/helmcharts/services/opentelemetry-collector/templates/servicemonitor.yaml new file mode 100644 index 00000000..8cd29c4d --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/servicemonitor.yaml @@ -0,0 +1,27 @@ +{{- if and (eq (include "opentelemetry-collector.serviceEnabled" .) "true") .Values.serviceMonitor.enabled .Values.serviceMonitor.metricsEndpoints }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ include "base.namespace" . }} + labels: + {{- include "opentelemetry-collector.labels" . | nindent 4 }} + {{- range $key, $value := .Values.serviceMonitor.extraLabels }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "opentelemetry-collector.selectorLabels" . | nindent 6 }} + {{- include "opentelemetry-collector.component" . | nindent 6 }} + endpoints: + {{- toYaml .Values.serviceMonitor.metricsEndpoints | nindent 4 }} + {{- with .Values.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml . | nindent 8 }} + {{- end }} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/templates/statefulset.yaml b/helmcharts/services/opentelemetry-collector/templates/statefulset.yaml new file mode 100644 index 00000000..7a2cb964 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/templates/statefulset.yaml @@ -0,0 +1,57 @@ +{{- if eq .Values.mode "statefulset" -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "opentelemetry-collector.fullname" . }} + namespace: {{ template "opentelemetry-collector.namespace" . }} + labels: + {{- include "opentelemetry-collector.labels" . | nindent 4 }} + {{- if .Values.annotations }} + annotations: + {{- range $key, $value := .Values.annotations }} + {{- printf "%s: %s" $key (tpl $value $ | quote) | nindent 4 }} + {{- end }} + {{- end }} +spec: + serviceName: {{ include "opentelemetry-collector.fullname" . }} + podManagementPolicy: {{ .Values.statefulset.podManagementPolicy }} +{{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} +{{- end }} +{{- if .Values.statefulset.persistentVolumeClaimRetentionPolicy.enabled }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ .Values.statefulset.persistentVolumeClaimRetentionPolicy.whenDeleted }} + whenScaled: {{ .Values.statefulset.persistentVolumeClaimRetentionPolicy.whenScaled }} +{{- end }} + selector: + matchLabels: + {{- include "opentelemetry-collector.selectorLabels" . | nindent 6 }} + {{- include "opentelemetry-collector.component" . | nindent 6 }} + updateStrategy: + type: {{ .Values.rollout.strategy }} + template: + metadata: + annotations: + {{- include "opentelemetry-collector.configTemplateChecksumAnnotation" . | nindent 8 }} + {{- include "opentelemetry-collector.podAnnotations" . | nindent 8 }} + labels: + {{- include "opentelemetry-collector.selectorLabels" . | nindent 8 }} + {{- include "opentelemetry-collector.component" . | nindent 8 }} + {{- include "opentelemetry-collector.podLabels" . | nindent 8 }} + spec: + {{- with .Values.dnsPolicy }} + dnsPolicy: {{ . }} + {{- end }} + {{- with .Values.dnsConfig }} + dnsConfig: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- $podValues := deepCopy .Values }} + {{- $podData := dict "Values" $podValues "configmapSuffix" "-statefulset" "isAgent" false }} + {{- include "opentelemetry-collector.pod" ($podData | mustMergeOverwrite (deepCopy .)) | nindent 6 }} + hostNetwork: {{ .Values.hostNetwork }} + {{- with .Values.statefulset.volumeClaimTemplates }} + volumeClaimTemplates: + {{- toYaml . | nindent 2 }} + {{- end }} +{{- end }} diff --git a/helmcharts/services/opentelemetry-collector/values.schema.json b/helmcharts/services/opentelemetry-collector/values.schema.json new file mode 100644 index 00000000..d4442a52 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/values.schema.json @@ -0,0 +1,989 @@ +{ + "$schema": "http://json-schema.org/schema#", + "$defs": { + "intOrString": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "integer" + } + ] + } + }, + "type": "object", + "title": "Values", + "additionalProperties": true, + "properties": { + "enabled": { + "description": "Usually used when using Opentelemetry-collector as a subchart.", + "type": "boolean" + }, + "global": { + "type": "object" + }, + "nameOverride": { + "description": "Override name of the chart used in Kubernetes object names.", + "type": "string" + }, + "fullnameOverride": { + "description": "Override fully qualified app name.", + "type": "string" + }, + "mode": { + "type": "string", + "enum": ["daemonset", "deployment", "statefulset", ""] + }, + "namespaceOverride": { + "type": "string", + "description": "Name of the namespace to deploy the resources into." + }, + "presets": { + "type": "object", + "additionalProperties": true, + "properties": { + "logsCollection": { + "type": "object", + "additionalProperties": true, + "properties": { + "enabled": { + "description": "Specifies whether the collector should collect logs.", + "type": "boolean" + }, + "includeCollectorLogs": { + "description": "Specifies whether the collector should collect its own logs.", + "type": "boolean" + }, + "storeCheckpoints": { + "description": "Specifies whether logs checkpoints should be stored in /var/lib/otelcol/ host directory.", + "type": "boolean" + }, + "maxRecombineLogSize": { + "description": "Specifies the max recombine log size.", + "type": "integer" + } + } + }, + "hostMetrics": { + "type": "object", + "additionalProperties": true, + "properties": { + "enabled": { + "description": "Specifies whether the collector should collect host metrics.", + "type": "boolean" + } + } + }, + "kubeletMetrics": { + "type": "object", + "additionalProperties": true, + "properties": { + "enabled": { + "description": "Specifies whether the collector should collect kubelet metrics.", + "type": "boolean" + } + } + }, + "kubernetesAttributes": { + "type": "object", + "additionalProperties": true, + "properties": { + "enabled": { + "description": "Specifies whether the collector should add Kubernetes metdata to resource attributes.", + "type": "boolean" + }, + "extractAllPodLabels": { + "description": "Specifies whether the k8sattributes processor should extract all pod labels.", + "type": "boolean" + }, + "extractAllPodAnnotations": { + "description": "Specifies whether the k8sattributes processor should extract all pod annotations.", + "type": "boolean" + } + } + }, + "kubernetesEvents": { + "type": "object", + "additionalProperties": true, + "properties": { + "enabled": { + "description": "Specifies whether the collector should collect Kubernetes objects.", + "type": "boolean" + } + } + }, + "clusterMetrics": { + "type": "object", + "additionalProperties": true, + "properties": { + "enabled": { + "description": "Specifies whether the collector should collect cluster metrics.", + "type": "boolean" + } + } + } + } + }, + "configMap": { + "type": "object", + "properties": { + "create": { + "description": "Specifies whether a configMap should be created (true by default).", + "type": "boolean" + }, + "existingName": { + "description": "Specifies an existing configMap to be mounted to the pod", + "type": "string" + }, + "existingPath": { + "description": "Relative path to the template file used to generate a custom configMap. Needed for pod restart on custom config change.", + "type": "string" + } + } + }, + "alternateConfig": { + "description": "When not empty, will be used instead of config. Presets will be merged in the same way they are for config.", + "type": "object" + }, + "config": { + "description": "Configuration that applies to both standalone and agent collector. Overwritable by standalone and agent specific configs.", + "type": "object" + }, + "image": { + "description": "Image use in both standalone and agent configs", + "type": "object", + "additionalProperties": true, + "properties": { + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "digest": { + "type": "string" + }, + "pullPolicy": { + "type": "string", + "enum": ["IfNotPresent", "Always", "Never"] + } + } + }, + "imagePullSecrets": { + "type": "array", + "items": { + "type": "object" + } + }, + "command": { + "description": "OpenTelemetry Collector executable", + "type": "object", + "additionalProperties": true, + "properties": { + "name": { + "type": "string" + }, + "extraArgs": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "serviceAccount": { + "type": "object", + "additionalProperties": true, + "properties": { + "create": { + "type": "boolean" + }, + "annotations": { + "type": "object" + }, + "name": { + "type": "string" + } + }, + "required": ["create"] + }, + "clusterRole": { + "type": "object", + "additionalProperties": true, + "properties": { + "create": { + "type": "boolean" + }, + "annotations": { + "type": "object" + }, + "name": { + "type": "string" + }, + "rules": { + "type": "array", + "items": { + "type": "object" + } + }, + "clusterRoleBinding": { + "type": "object", + "additionalProperties": true, + "properties": { + "annotations": { + "type": "object" + }, + "name": { + "type": "string" + } + } + } + }, + "required": ["create"] + }, + "podSecurityContext": { + "type": "object" + }, + "securityContext": { + "type": "object" + }, + "nodeSelector": { + "type": "object" + }, + "tolerations": { + "type": "array", + "items": { + "type": "object" + } + }, + "affinity": { + "type": "object" + }, + "topologySpreadConstraints": { + "type": "array", + "items": { + "type": "object" + } + }, + "priorityClassName": { + "type": "string" + }, + "extraContainers": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true, + "properties": { + "name": { + "type": "string" + } + }, + "required": ["name"] + } + }, + "initContainers": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true, + "properties": { + "name": { + "type": "string" + } + }, + "required": ["name"] + } + }, + "extraEnvs": { + "type": "array", + "items": { + "type": "object" + } + }, + "extraEnvsFrom": { + "type": "array", + "items": { + "type": "object" + } + }, + "extraVolumes": { + "type": "array", + "items": { + "type": "object" + } + }, + "extraVolumeMounts": { + "type": "array", + "items": { + "type": "object" + } + }, + "ports": { + "type": "object", + "patternProperties": { + ".*": { + "type": "object", + "additionalProperties": true, + "properties": { + "enabled": { + "type": "boolean" + }, + "containerPort": { + "type": "integer" + }, + "servicePort": { + "type": "integer" + }, + "hostPort": { + "type": "integer" + }, + "nodePort": { + "type": "integer" + }, + "protocol": { + "type": "string" + }, + "appProtocol": { + "type": "string" + } + }, + "required": ["enabled"] + } + } + }, + "resources": { + "type": "object", + "additionalProperties": true, + "properties": { + "limits": { + "type": "object", + "additionalProperties": true, + "properties": { + "cpu": { + "type": ["string", "integer"] + }, + "ephemeral-storage": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + }, + "requests": { + "type": "object", + "additionalProperties": true, + "properties": { + "cpu": { + "type": ["string", "integer"] + }, + "ephemeral-storage": { + "type": "string" + }, + "memory": { + "type": "string" + } + } + } + } + }, + "lifecycleHooks": { + "type": "object", + "additionalProperties": true, + "properties": { + "postStart": { + "type": "object", + "additionalProperties": true, + "properties": { + "exec": { + "type": "object", + "additionalProperties": true, + "properties": { + "command": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "httpGet": { + "type": "object", + "additionalProperties": true, + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "type": "integer" + }, + "scheme": { + "type": "string" + } + } + } + }, + "oneOf": [ + { + "required": ["exec"] + }, + { + "required": ["httpGet"] + } + ] + }, + "preStop": { + "type": "object", + "additionalProperties": true, + "properties": { + "exec": { + "type": "object", + "additionalProperties": true, + "properties": { + "command": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "httpGet": { + "type": "object", + "additionalProperties": true, + "properties": { + "host": { + "type": "string" + }, + "httpHeaders": { + "type": "array" + }, + "path": { + "type": "string" + }, + "port": { + "type": "integer" + }, + "scheme": { + "type": "string" + } + } + } + }, + "oneOf": [ + { + "required": ["exec"] + }, + { + "required": ["httpGet"] + } + ] + } + } + }, + "livenessProbe": { + "type": "object", + "additionalProperties": true, + "properties": { + "initialDelaySeconds": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + }, + "timeoutSeconds": { + "type": "integer" + }, + "failureThreshold": { + "type": "integer" + }, + "terminationGracePeriodSeconds": { + "type": "integer" + }, + "httpGet": { + "type": "object", + "additionalProperties": true, + "properties": { + "port": { + "type": "integer" + }, + "path": { + "type": "string" + } + } + } + } + }, + "readinessProbe": { + "type": "object", + "additionalProperties": true, + "properties": { + "initialDelaySeconds": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + }, + "timeoutSeconds": { + "type": "integer" + }, + "successThreshold": { + "type": "integer" + }, + "failureThreshold": { + "type": "integer" + }, + "httpGet": { + "type": "object", + "additionalProperties": true, + "properties": { + "port": { + "type": "integer" + }, + "path": { + "type": "string" + } + } + } + } + }, + "startupProbe": { + "type": "object", + "additionalProperties": true, + "properties": { + "initialDelaySeconds": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + }, + "timeoutSeconds": { + "type": "integer" + }, + "failureThreshold": { + "type": "integer" + }, + "terminationGracePeriodSeconds": { + "type": "integer" + }, + "httpGet": { + "type": "object", + "additionalProperties": true, + "properties": { + "port": { + "type": "integer" + }, + "path": { + "type": "string" + } + } + } + } + }, + "podAnnotations": { + "type": "object" + }, + "podLabels": { + "type": "object" + }, + "additionalLabels": { + "type": "object" + }, + "hostNetwork": { + "type": "boolean" + }, + "hostAliases": { + "type": "array", + "items": { + "type": "object" + } + }, + "dnsPolicy": { + "type": "string", + "enum": ["ClusterFirst", "ClusterFirstWithHostNet", "Default", "None", ""] + }, + "dnsConfig": { + "type": "object" + }, + "replicaCount": { + "type": "integer" + }, + "revisionHistoryLimit": { + "type": "integer" + }, + "annotations": { + "type": "object" + }, + "service": { + "type": "object", + "additionalProperties": true, + "properties": { + "enabled": { + "type": "boolean" + }, + "type": { + "type": "string", + "enum": ["ClusterIP", "NodePort", "LoadBalancer", "ExternalName"] + }, + "clusterIP": { + "type": "string" + }, + "loadBalancerIP": { + "type": "string" + }, + "loadBalancerSourceRanges": { + "type": "array", + "items": { + "type": "string" + } + }, + "annotations": { + "type": "object" + }, + "internalTrafficPolicy": { + "type": "string", + "enum": ["Cluster", "Local", ""] + }, + "externalTrafficPolicy": { + "type": "string", + "enum": ["Cluster", "Local", ""] + } + } + }, + "ingress": { + "type": "object", + "additionalProperties": true, + "properties": { + "enabled": { + "type": "boolean" + }, + "ingressClassName": { + "type": "string" + }, + "annotations": { + "type": "object" + }, + "hosts": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true, + "properties": { + "host": { + "type": "string" + }, + "paths": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true, + "properties": { + "path": { + "type": "string" + }, + "pathType": { + "type": "string", + "enum": ["Exact", "Prefix", "ImplementationSpecific"] + }, + "port": { + "type": "integer" + } + }, + "required": ["path", "pathType", "port"] + } + } + }, + "required": ["paths"] + } + }, + "tls": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true, + "properties": { + "secretName": { + "type": "string" + }, + "hosts": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + }, + "additionalIngresses": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true, + "properties": { + "name": { + "type": "string" + }, + "ingressClassName": { + "type": "string" + }, + "annotations": { + "type": "object" + }, + "hosts": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true, + "properties": { + "host": { + "type": "string" + }, + "paths": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true, + "properties": { + "path": { + "type": "string" + }, + "pathType": { + "type": "string", + "enum": [ + "Exact", + "Prefix", + "ImplementationSpecific" + ] + }, + "port": { + "type": "integer" + } + }, + "required": ["path", "pathType", "port"] + } + } + }, + "required": ["paths"] + } + }, + "tls": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": true, + "properties": { + "secretName": { + "type": "string" + }, + "hosts": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + }, + "required": ["name"] + } + } + }, + "required": ["enabled"] + }, + "podMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "metricsEndpoints": { + "type": "array", + "items": { + "type": "object" + } + }, + "extraLabels": { + "type": "object" + } + }, + "required": ["enabled"] + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "metricsEndpoints": { + "type": "array", + "items": { + "type": "object" + } + }, + "extraLabels": { + "type": "object" + }, + "relabelings": { + "type": "array", + "description": "The relabelings Schema", + "default": [], + "items": {} + }, + "metricRelabelings": { + "type": "array", + "default": [], + "title": "The metricRelabelings Schema", + "items": {}, + "examples": [ + [] + ] + } + }, + "required": ["enabled"] + }, + "podDisruptionBudget": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + }, + "required": ["enabled"] + }, + "autoscaling": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "minReplicas": { + "type": "integer" + }, + "maxReplicas": { + "type": "integer" + }, + "behavior": { + "type": "object" + }, + "targetCPUUtilizationPercentage": { + "type": "integer" + } + }, + "required": ["enabled"] + }, + "rollout": { + "type": "object", + "properties": { + "rollingUpdate": { + "type": "object", + "properties": { + "maxSurge": { + "$ref": "#/$defs/intOrString" + }, + "maxUnavailable": { + "$ref": "#/$defs/intOrString" + } + } + }, + "strategy": { + "type": "string", + "enum": ["OnDelete", "Recreate", "RollingUpdate"], + "default": "RollingUpdate" + } + }, + "required": ["strategy"] + }, + "prometheusRule": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "groups": { + "type": "array", + "items": { + "type": "object" + } + }, + "defaultRules": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + }, + "required": ["enabled"] + }, + "extraLabels": { + "type": "object" + } + }, + "required": ["enabled"] + }, + "statefulset": { + "type": "object", + "properties": { + "volumeClaimTemplates": { + "type": "array", + "items": { + "type": "object" + } + }, + "podManagementPolicy": { + "type": "string" + } + } + }, + "networkPolicy": { + "type": "object", + "additionalProperties": true, + "properties": { + "enabled": { + "type": "boolean" + }, + "annotations": { + "type": "object" + }, + "allowIngressFrom": { + "type": "array", + "description": "List of sources which should be able to access the collector. See the standard NetworkPolicy 'spec.ingress.from' definition for more information: https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/. If left empty, ingress traffic will be permitted on to all enabled ports from all sources.", + "items": { + "type": "object" + } + }, + "extraIngressRules": { + "type": "array", + "description": "Additional ingress rules to apply to the policy. See the standard NetworkPolicy 'spec.ingress' definition for more information: https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/", + "items": { + "type": "object" + } + }, + "egressRules": { + "description": "Optional egress configuration, see the standard NetworkPolicy 'spec.egress' definition for more information: https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/", + "type": "array", + "items": { + "type": "object" + } + } + } + }, + "useGOMEMLIMIT": { + "type": "boolean" + }, + "shareProcessNamespace": { + "type": "boolean" + } + }, + "required": ["mode"] +} diff --git a/helmcharts/services/opentelemetry-collector/values.yaml b/helmcharts/services/opentelemetry-collector/values.yaml new file mode 100644 index 00000000..5491bfc7 --- /dev/null +++ b/helmcharts/services/opentelemetry-collector/values.yaml @@ -0,0 +1,658 @@ +# Default values for opentelemetry-collector. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +nameOverride: "" +fullnameOverride: "" + +# Valid values are "daemonset", "deployment", and "statefulset". +mode: "deployment" + +# Specify which namespace should be used to deploy the resources into +namespaceOverride: "" + +# Handles basic configuration of components that +# also require k8s modifications to work correctly. +# .Values.config can be used to modify/add to a preset +# component configuration, but CANNOT be used to remove +# preset configuration. If you require removal of any +# sections of a preset configuration, you cannot use +# the preset. Instead, configure the component manually in +# .Values.config and use the other fields supplied in the +# values.yaml to configure k8s as necessary. +presets: + # Configures the collector to collect logs. + # Adds the filelog receiver to the logs pipeline + # and adds the necessary volumes and volume mounts. + # Best used with mode = daemonset. + # See https://opentelemetry.io/docs/kubernetes/collector/components/#filelog-receiver for details on the receiver. + logsCollection: + enabled: true + includeCollectorLogs: true + # Enabling this writes checkpoints in /var/lib/otelcol/ host directory. + # Note this changes collector's user to root, so that it can write to host directory. + storeCheckpoints: true + # The maximum bytes size of the recombined field. + # Once the size exceeds the limit, all received entries of the source will be combined and flushed. + maxRecombineLogSize: 102400 + # Configures the collector to collect host metrics. + # Adds the hostmetrics receiver to the metrics pipeline + # and adds the necessary volumes and volume mounts. + # Best used with mode = daemonset. + # See https://opentelemetry.io/docs/kubernetes/collector/components/#host-metrics-receiver for details on the receiver. + hostMetrics: + enabled: true + # Configures the Kubernetes Processor to add Kubernetes metadata. + # Adds the k8sattributes processor to all the pipelines + # and adds the necessary rules to ClusteRole. + # Best used with mode = daemonset. + # See https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor for details on the receiver. + kubernetesAttributes: + enabled: true + # When enabled the processor will extra all labels for an associated pod and add them as resource attributes. + # The label's exact name will be the key. + extractAllPodLabels: true + # When enabled the processor will extra all annotations for an associated pod and add them as resource attributes. + # The annotation's exact name will be the key. + extractAllPodAnnotations: true + # Configures the collector to collect node, pod, and container metrics from the API server on a kubelet.. + # Adds the kubeletstats receiver to the metrics pipeline + # and adds the necessary rules to ClusteRole. + # Best used with mode = daemonset. + # See https://opentelemetry.io/docs/kubernetes/collector/components/#kubeletstats-receiver for details on the receiver. + kubeletMetrics: + enabled: true + # Configures the collector to collect kubernetes events. + # Adds the k8sobject receiver to the logs pipeline + # and collects kubernetes events by default. + # Best used with mode = deployment or statefulset. + # See https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-objects-receiver for details on the receiver. + kubernetesEvents: + enabled: true + # Configures the Kubernetes Cluster Receiver to collect cluster-level metrics. + # Adds the k8s_cluster receiver to the metrics pipeline + # and adds the necessary rules to ClusteRole. + # Best used with mode = deployment or statefulset. + # See https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-cluster-receiver for details on the receiver. + clusterMetrics: + enabled: true + +configMap: + # Specifies whether a configMap should be created (true by default) + create: true + # Specifies an existing ConfigMap to be mounted to the pod + # The ConfigMap MUST include the collector configuration via a key named 'relay' or the collector will not start. + existingName: "" + # Specifies the relative path to custom ConfigMap template file. This option SHOULD be used when bundling a custom + # ConfigMap template, as it enables pod restart via a template checksum annotation. + # existingPath: "" + +# Base collector configuration. +# Supports templating. To escape existing instances of {{ }}, use {{` `}}. +# For example, {{ REDACTED_EMAIL }} becomes {{` {{ REDACTED_EMAIL }} `}}. +config: + # extensions: + # basicauth/otlp: + # client_auth: + # username: admin + # password: admin + + exporters: + debug: + verbosity: detailed + sampling_initial: 5 + sampling_thereafter: 200 + otlphttp: + endpoint: http://loki-test-gateway.loki-telemetry.svc.cluster.local/otlp + tls: + insecure: true + loki: + endpoint: "http://loki-gateway.loki.svc.cluster.local/loki/api/v1/push" + tls: + insecure: true + prometheus: + endpoint: "0.0.0.0:9090" + namespace: "otel" # A prefix for the metrics, e.g., obsrv_ + enable_open_metrics: true # Enables the OpenMetrics format + + + extensions: + # The health_check extension is mandatory for this chart. + # Without the health_check extension the collector will fail the readiness and liveliness probes. + # The health_check extension can be modified, but should never be removed. + health_check: + endpoint: ${env:MY_POD_IP}:13133 + processors: + batch: + timeout: 500ms + send_batch_size: 100 + send_batch_max_size: 500 # Increase the queue size to hold more logs + # Default memory limiter configuration for the collector based on k8s resource limits. + memory_limiter: + # check_interval is the time between measurements of memory usage. + check_interval: 5s + # By default limit_mib is set to 80% of ".Values.resources.limits.memory" + limit_percentage: 85 + # By default spike_limit_mib is set to 25% of ".Values.resources.limits.memory" + spike_limit_percentage: 35 + + receivers: + otlp: + protocols: + grpc: + endpoint: ${env:MY_POD_IP}:4317 + http: + endpoint: ${env:MY_POD_IP}:4318 + filelog: + include: + - /var/log/pods/*/*/*.log + exclude: + # Exclude logs from all containers named otel-collector + - /var/log/pods/*/otel-collector/*.log + start_at: "beginning" # Optionally start reading logs from the end (default is "beginning") + include_file_path: true + include_file_name: false + + service: + telemetry: + metrics: + address: ${env:MY_POD_IP}:8888 + extensions: + - health_check + pipelines: + logs: + exporters: + #- otlphttp + - loki + processors: + - memory_limiter + - batch + receivers: + - otlp + - filelog + metrics: + exporters: + - prometheus + - debug + processors: + - memory_limiter + - batch + receivers: + - otlp + +# Helm currently has an issue (https://github.com/helm/helm/pull/12879) when using null to remove +# default configuration from a subchart. The result is that you cannot remove default configuration +# from `config`, such as a specific recevier or a specific pipeline, when the chart is used as a +# subchart. +# +# Until the helm bug is fixed, this field is provided as an alternative when using this chart as a subchart. +# It is not recommended to use this field when installing the chart directly. +# +# When not empty, `alternateConfig` will be used to set the collector's configuration. It has NO default +# values and IS NOT MERGED with config. Any configuration provided via `config` will be ignored when +# `alternateConfig` is set. You MUST provide your own collector configuration. +# +# Reminder that the healthcheck extension (or something else that provides the same functionality) is required. +# +# Components configured by presets will be injected in the same way they are for `config`. +alternateConfig: {} + +image: + # If you want to use the core image `otel/opentelemetry-collector`, you also need to change `command.name` value to `otelcol`. + repository: "otel/opentelemetry-collector-contrib" + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" + # When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value). + digest: "" +imagePullSecrets: [] + +# OpenTelemetry Collector executable +command: + name: "" + extraArgs: [] + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +clusterRole: + # Specifies whether a clusterRole should be created + # Some presets also trigger the creation of a cluster role and cluster role binding. + # If using one of those presets, this field is no-op. + create: false + # Annotations to add to the clusterRole + # Can be used in combination with presets that create a cluster role. + annotations: {} + # The name of the clusterRole to use. + # If not set a name is generated using the fullname template + # Can be used in combination with presets that create a cluster role. + name: "" + # A set of rules as documented here : https://kubernetes.io/docs/reference/access-authn-authz/rbac/ + # Can be used in combination with presets that create a cluster role to add additional rules. + rules: [] + # - apiGroups: + # - '' + # resources: + # - 'pods' + # - 'nodes' + # verbs: + # - 'get' + # - 'list' + # - 'watch' + + clusterRoleBinding: + # Annotations to add to the clusterRoleBinding + # Can be used in combination with presets that create a cluster role binding. + annotations: {} + # The name of the clusterRoleBinding to use. + # If not set a name is generated using the fullname template + # Can be used in combination with presets that create a cluster role binding. + name: "" + +podSecurityContext: {} +securityContext: {} + +nodeSelector: {} +tolerations: [] +affinity: {} +topologySpreadConstraints: [] + +# Allows for pod scheduler prioritisation +priorityClassName: "" + +extraEnvs: [] +extraEnvsFrom: [] +# This also supports template content, which will eventually be converted to yaml. +extraVolumes: [] + +# This also supports template content, which will eventually be converted to yaml. +extraVolumeMounts: [] + +# Configuration for ports +# nodePort is also allowed +ports: + otlp: + enabled: true + containerPort: 4317 + servicePort: 4317 + hostPort: 4317 + protocol: TCP + # nodePort: 30317 + appProtocol: grpc + otlp-http: + enabled: true + containerPort: 4318 + servicePort: 4318 + hostPort: 4318 + protocol: TCP + jaeger-compact: + enabled: true + containerPort: 6831 + servicePort: 6831 + hostPort: 6831 + protocol: UDP + jaeger-thrift: + enabled: true + containerPort: 14268 + servicePort: 14268 + hostPort: 14268 + protocol: TCP + jaeger-grpc: + enabled: true + containerPort: 14250 + servicePort: 14250 + hostPort: 14250 + protocol: TCP + zipkin: + enabled: true + containerPort: 9411 + servicePort: 9411 + hostPort: 9411 + protocol: TCP + metrics: + # The metrics port is disabled by default. However you need to enable the port + # in order to use the ServiceMonitor (serviceMonitor.enabled) or PodMonitor (podMonitor.enabled). + enabled: true + containerPort: 9090 + servicePort: 9090 + protocol: TCP + +# When enabled, the chart will set the GOMEMLIMIT env var to 80% of the configured resources.limits.memory. +# If no resources.limits.memory are defined then enabling does nothing. +# It is HIGHLY recommend to enable this setting and set a value for resources.limits.memory. +useGOMEMLIMIT: true + +# Resource limits & requests. +# It is HIGHLY recommended to set resource limits. +# resources: {} +resources: + limits: + cpu: 250m + memory: 512Mi + requests: + cpu: 250m + memory: 512Mi + +podAnnotations: {} + +podLabels: + release: monitoring + +# Common labels to add to all otel-collector resources. Evaluated as a template. +additionalLabels: + release: monitoring +# app.kubernetes.io/part-of: my-app + +# Host networking requested for this pod. Use the host's network namespace. +hostNetwork: false + +# Adding entries to Pod /etc/hosts with HostAliases +# https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/ +hostAliases: [] + # - ip: "1.2.3.4" + # hostnames: + # - "my.host.com" + +# Pod DNS policy ClusterFirst, ClusterFirstWithHostNet, None, Default, None +dnsPolicy: "" + +# Custom DNS config. Required when DNS policy is None. +dnsConfig: {} + +# only used with deployment mode +replicaCount: 1 + +# only used with deployment mode +revisionHistoryLimit: 10 + +annotations: {} + +# List of extra sidecars to add. +# This also supports template content, which will eventually be converted to yaml. +extraContainers: [] +# extraContainers: +# - name: test +# command: +# - cp +# args: +# - /bin/sleep +# - /test/sleep +# image: busybox:latest +# volumeMounts: +# - name: test +# mountPath: /test + +# List of init container specs, e.g. for copying a binary to be executed as a lifecycle hook. +# This also supports template content, which will eventually be converted to yaml. +# Another usage of init containers is e.g. initializing filesystem permissions to the OTLP Collector user `10001` in case you are using persistence and the volume is producing a permission denied error for the OTLP Collector container. +initContainers: [] +# initContainers: +# - name: test +# image: busybox:latest +# command: +# - cp +# args: +# - /bin/sleep +# - /test/sleep +# volumeMounts: +# - name: test +# mountPath: /test +# - name: init-fs +# image: busybox:latest +# command: +# - sh +# - '-c' +# - 'chown -R 10001: /var/lib/storage/otc' # use the path given as per `extensions.file_storage.directory` & `extraVolumeMounts[x].mountPath` +# volumeMounts: +# - name: opentelemetry-collector-data # use the name of the volume used for persistence +# mountPath: /var/lib/storage/otc # use the path given as per `extensions.file_storage.directory` & `extraVolumeMounts[x].mountPath` + +# Pod lifecycle policies. +lifecycleHooks: {} +# lifecycleHooks: +# preStop: +# exec: +# command: +# - /test/sleep +# - "5" + +# liveness probe configuration +# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ +## +livenessProbe: + # Number of seconds after the container has started before startup, liveness or readiness probes are initiated. + # initialDelaySeconds: 1 + # How often in seconds to perform the probe. + # periodSeconds: 10 + # Number of seconds after which the probe times out. + # timeoutSeconds: 1 + # Minimum consecutive failures for the probe to be considered failed after having succeeded. + # failureThreshold: 1 + # Duration in seconds the pod needs to terminate gracefully upon probe failure. + # terminationGracePeriodSeconds: 10 + httpGet: + port: 13133 + path: / + +# readiness probe configuration +# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ +## +readinessProbe: + # Number of seconds after the container has started before startup, liveness or readiness probes are initiated. + # initialDelaySeconds: 1 + # How often (in seconds) to perform the probe. + # periodSeconds: 10 + # Number of seconds after which the probe times out. + # timeoutSeconds: 1 + # Minimum consecutive successes for the probe to be considered successful after having failed. + # successThreshold: 1 + # Minimum consecutive failures for the probe to be considered failed after having succeeded. + # failureThreshold: 1 + httpGet: + port: 13133 + path: / + +# startup probe configuration +# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ +## +startupProbe: {} + # Number of seconds after the container has started before startup probes are initiated. + # initialDelaySeconds: 1 + # How often in seconds to perform the probe. + # periodSeconds: 10 + # Number of seconds after which the probe times out. + # timeoutSeconds: 1 + # Minimum consecutive failures for the probe to be considered failed after having succeeded. + # failureThreshold: 1 + # Duration in seconds the pod needs to terminate gracefully upon probe failure. + # terminationGracePeriodSeconds: 10 + # httpGet: + # port: 13133 + # path: / + +service: + # Enable the creation of a Service. + # By default, it's enabled on mode != daemonset. + # However, to enable it on mode = daemonset, its creation must be explicitly enabled + enabled: true + + type: ClusterIP + # type: LoadBalancer + # loadBalancerIP: 1.2.3.4 + # loadBalancerSourceRanges: [] + + # By default, Service of type 'LoadBalancer' will be created setting 'externalTrafficPolicy: Cluster' + # unless other value is explicitly set. + # Possible values are Cluster or Local (https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip) + # externalTrafficPolicy: Cluster + + annotations: {} + + # By default, Service will be created setting 'internalTrafficPolicy: Local' on mode = daemonset + # unless other value is explicitly set. + # Setting 'internalTrafficPolicy: Cluster' on a daemonset is not recommended + # internalTrafficPolicy: Cluster + +ingress: + enabled: false + # annotations: {} + # ingressClassName: nginx + # hosts: + # - host: collector.example.com + # paths: + # - path: / + # pathType: Prefix + # port: 4318 + # tls: + # - secretName: collector-tls + # hosts: + # - collector.example.com + + # Additional ingresses - only created if ingress.enabled is true + # Useful for when differently annotated ingress services are required + # Each additional ingress needs key "name" set to something unique + additionalIngresses: [] + # - name: cloudwatch + # ingressClassName: nginx + # annotations: {} + # hosts: + # - host: collector.example.com + # paths: + # - path: / + # pathType: Prefix + # port: 4318 + # tls: + # - secretName: collector-tls + # hosts: + # - collector.example.com + +podMonitor: + # The pod monitor by default scrapes the metrics port. + # The metrics port needs to be enabled as well. + enabled: false + metricsEndpoints: + - port: metrics + interval: 15s + + # additional labels for the PodMonitor + extraLabels: + release: monitoring + +serviceMonitor: + # The service monitor by default scrapes the metrics port. + # The metrics port needs to be enabled as well. + enabled: true + metricsEndpoints: + - port: metrics + interval: 15s + + # additional labels for the ServiceMonitor + extraLabels: + release: monitoring + # Used to set relabeling and metricRelabeling configs on the ServiceMonitor + # https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + relabelings: [] + metricRelabelings: [] + +# PodDisruptionBudget is used only if deployment enabled +podDisruptionBudget: + enabled: false +# minAvailable: 2 +# maxUnavailable: 1 + +# autoscaling is used only if mode is "deployment" or "statefulset" +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 10 + behavior: {} + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +rollout: + rollingUpdate: {} + # When 'mode: daemonset', maxSurge cannot be used when hostPort is set for any of the ports + # maxSurge: 25% + # maxUnavailable: 0 + strategy: RollingUpdate + +prometheusRule: + enabled: false + groups: [] + # Create default rules for monitoring the collector + defaultRules: + enabled: false + + # additional labels for the PrometheusRule + extraLabels: {} + +statefulset: + # volumeClaimTemplates for a statefulset + volumeClaimTemplates: [] + podManagementPolicy: "Parallel" + # Controls if and how PVCs created by the StatefulSet are deleted. Available in Kubernetes 1.23+. + persistentVolumeClaimRetentionPolicy: + enabled: false + whenDeleted: Retain + whenScaled: Retain + +networkPolicy: + enabled: false + + # Annotations to add to the NetworkPolicy + annotations: {} + + # Configure the 'from' clause of the NetworkPolicy. + # By default this will restrict traffic to ports enabled for the Collector. If + # you wish to further restrict traffic to other hosts or specific namespaces, + # see the standard NetworkPolicy 'spec.ingress.from' definition for more info: + # https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/ + allowIngressFrom: [] + # # Allow traffic from any pod in any namespace, but not external hosts + # - namespaceSelector: {} + # # Allow external access from a specific cidr block + # - ipBlock: + # cidr: 192.168.1.64/32 + # # Allow access from pods in specific namespaces + # - namespaceSelector: + # matchExpressions: + # - key: kubernetes.io/metadata.name + # operator: In + # values: + # - "cats" + # - "dogs" + + # Add additional ingress rules to specific ports + # Useful to allow external hosts/services to access specific ports + # An example is allowing an external prometheus server to scrape metrics + # + # See the standard NetworkPolicy 'spec.ingress' definition for more info: + # https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/ + extraIngressRules: [] + # - ports: + # - port: metrics + # protocol: TCP + # from: + # - ipBlock: + # cidr: 192.168.1.64/32 + + # Restrict egress traffic from the OpenTelemetry collector pod + # See the standard NetworkPolicy 'spec.egress' definition for more info: + # https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/ + egressRules: [] + # - to: + # - namespaceSelector: {} + # - ipBlock: + # cidr: 192.168.10.10/24 + # ports: + # - port: 1234 + # protocol: TCP + +# Allow containers to share processes across pod namespace +shareProcessNamespace: false