From b380f876644c473e5d2636d696205da99d3931f1 Mon Sep 17 00:00:00 2001 From: marvasgit Date: Sat, 1 Jun 2024 04:15:44 +0300 Subject: [PATCH] Release/2.0.0 (#2) v2.0.0 --- README.md | 185 ++++++++++-- appsettings.json | 80 +++++- .../kubestatewatch/configs/appsettings.json | 82 +++++- .../kubestatewatch/templates/_affinities.tpl | 102 ------- .../templates/_capabilities.tpl | 128 --------- charts/kubestatewatch/templates/_errors.tpl | 23 -- charts/kubestatewatch/templates/_helpers.tpl | 36 --- charts/kubestatewatch/templates/_images.tpl | 75 ----- charts/kubestatewatch/templates/_ingress.tpl | 68 ----- charts/kubestatewatch/templates/_labels.tpl | 18 -- charts/kubestatewatch/templates/_names.tpl | 63 ---- charts/kubestatewatch/templates/_secrets.tpl | 140 --------- charts/kubestatewatch/templates/_storage.tpl | 23 -- .../kubestatewatch/templates/_tplvalues.tpl | 13 - charts/kubestatewatch/templates/_utils.tpl | 62 ---- charts/kubestatewatch/templates/_warnings.tpl | 14 - charts/kubestatewatch/templates/ingress.yaml | 26 ++ charts/kubestatewatch/values.yaml | 139 +++++++-- config/config.go | 51 ++-- go.mod | 1 + go.sum | 2 + main.go | 65 ++++- pkg/client/client.go | 17 +- pkg/controller/controller.go | 269 ++++++++++-------- pkg/utils/ttlList.go | 142 +++++++++ pkg/utils/ttlList_test.go | 93 ++++++ 26 files changed, 942 insertions(+), 975 deletions(-) delete mode 100644 charts/kubestatewatch/templates/_affinities.tpl delete mode 100644 charts/kubestatewatch/templates/_capabilities.tpl delete mode 100644 charts/kubestatewatch/templates/_errors.tpl delete mode 100644 charts/kubestatewatch/templates/_helpers.tpl delete mode 100644 charts/kubestatewatch/templates/_images.tpl delete mode 100644 charts/kubestatewatch/templates/_ingress.tpl delete mode 100644 charts/kubestatewatch/templates/_labels.tpl delete mode 100644 charts/kubestatewatch/templates/_names.tpl delete mode 100644 charts/kubestatewatch/templates/_secrets.tpl delete mode 100644 charts/kubestatewatch/templates/_storage.tpl delete mode 100644 charts/kubestatewatch/templates/_tplvalues.tpl delete mode 100644 charts/kubestatewatch/templates/_utils.tpl delete mode 100644 charts/kubestatewatch/templates/_warnings.tpl create mode 100644 charts/kubestatewatch/templates/ingress.yaml create mode 100644 pkg/utils/ttlList.go create mode 100644 pkg/utils/ttlList_test.go diff --git a/README.md b/README.md index 10a57191..6278f2b7 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ It can be used standalone or deployed in Kubernetes. But its main purpose is to KubeStateWatch is an extended and simplified version of [kubewatch](https://github.com/robusta-dev/kubewatch) to meet the needs of our team ####Whats the difference between kubewatch and KubeStateWatch? - It has been extended to support more the one connector, better support on multiple namespaces, visiblity to what was changed,simplified configuration, removed added metrics and few other small stuff. + It has been extended to support more the one connector, better support on multiple namespaces, visiblity to what was changed,simplified configuration, extended resource specific configura, added metrics and few other small stuff. ##UseCase Imagine you're managing a large Kubernetes cluster that has many different areas (namespaces) used by various people or teams. You need a way to keep an eye on any changes that happen in these areas that were made without the use of CI/CD pipelines ( for example using kubectl, lens, k9s etc.). In such cases you want to get notified about such changes,you also want to see what exactly was changed. This is what **KubeStateWatch** is for. @@ -29,6 +29,45 @@ There are basically two kind of notifications: Although this aspect is important, our primary focus is on the first scenario: tracking modifications to the items we are monitoring, such as deployments, replica sets (rs), horizontal pod autoscalers (hpa), and configmaps. We aim to be promptly informed about any and all changes occurring within these elements. +## Version 2.0.0 Changes +- Added support for configuration per resource (**breaking change in the configuration file**) +> This gives you the ability to configure each resource separately. For example, you can configure monitoring only the UPDATES on deployments, while monitoring ADD and DELETE on replica sets and DELETE on pods and etc.Also, now you can ignore specific paths in the diff per resource. Which gives you more control over what you want to monitor. +``` yaml +#OLD +resourcesToWatch: + configmap: true +#NEW + configmap: + enabled: true + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" +``` +- Added support for muting notifications during deployment. +> This feature allows you to mute notifications during deployment. This is useful when you have a deployment ongoing and you don't want to be notified about every change that happens during the deployment. You can mute notifications for a specific time period. This keeps the notifications clean and relevant. Also this keeps your webhook from being overloaded.Some webhooks have a rate [limit](https://docs.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook#rate-limits) per minute, so this feature can help you avoid hitting the rate limit and missing important notifications after the deployment. + +It accepts two parameters: +- namespace: the namespace you want to mute notifications for +- duration: the duration you want to mute notifications for in minutes +The path looks like this: +PUT url/deploy/**namespace**/**duration** +PUT url/deploy/**namespace** - uses default duration of 2 minutes +POST url/reset - clears all muted namespaces +```sh +#Examples: +curl -X PUT http://localhost:8080/deploy/default/5 +#Mutes notifications for the default namespace for 5 minutes +curl -x Delete http://localhost:8080/deploy/default +#Clears the default namespace from the muted list +curl -X PUT http://localhost:8080/deploy/default +#Mutes notifications for the default namespace for 2 minutes +curl -X POST http://localhost:8080/reset +#Clears all muted namespaces +``` + ### How it looks like
@@ -38,7 +77,7 @@ Although this aspect is important, our primary focus is on the first scenario: t # Latest image ``` -docmarr/kubestatewatch:1.0.2 +docmarr/kubestatewatch:2.0.0 ``` ## Installing the Chart @@ -46,8 +85,8 @@ docmarr/kubestatewatch:1.0.2 To install the chart with the release name `my-release`: ```console -$ helm repo add statemonitor https://marvasgit.github.io/kubernetes-statemonitor/ -$ helm install my-release statemonitor -n NS +$ helm repo add kubestatewatch https://marvasgit.github.io/kubernetes-statemonitor/ +$ helm install my-release kubestatewatch -n NS ``` The command deploys statemonitor on the Kubernetes cluster in the default configuration. With the default configuration, the chart monitors all namespaces. @@ -57,6 +96,7 @@ $ helm install my-release -f values.yaml statemonitor ``` > **Tip**: You can use the default [values.yaml](/charts/kubestatewatch/values.yaml) + ## Uninstalling the Chart To uninstall/delete the `my-release` deployment: @@ -91,7 +131,7 @@ Based on the desired communication channel, you need to configure the following - `namespaceconfig.include & namespaceconfig.exclude` - the namespaces you want to monitor, By default you monitor everything. If you want to monitor only specific namespaces, you can use the include and exclude options. If you use both, the exclude option will be ignored. You probably want to exclude the kube-system namespace. - `resources` - the resources you want to monitor - `ignore` - the resources you want to ignore -- `diff.ignorePath` - the paths you want to ignore in the diff ( Usually /metadata, /status, and everything that is not relevant to you) +- `diff.ignorePath` - this configuration affects all components that you watch. the paths you want to ignore in the diff ( Usually /metadata, /status, and everything that is not relevant to you) ``` yaml message: @@ -114,20 +154,125 @@ namespacesconfig: exclude: #- "kube-system" #- "cattle-fleet-system" +# changed on V2.0.0 resourcesToWatch: - configmap: true - daemonset: true - deployment: true - event: false - coreevent: false - hpa: true - job: false - persistentvolume: false - pod: false - replicaset: true - replicationcontroller: false - node: false - services: false + configmap: + enabled: true + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + daemonset: + enabled: true + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + deployment: + enabled: true + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + event: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + coreevent: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + hpa: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + job: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + persistentvolume: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + pod: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + replicaset: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + replicationcontroller: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + node: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + services: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + ``` > #### Configure connectors @@ -242,9 +387,7 @@ statemonitor latest 919896d3cd90 3 minutes ago - you need to have [docker](https://docs.docker.com/) installed. # Things for future version - -- Dissable notification - regular during deployment - +None at the moment. But if you have any suggestions, please create an issue. # Contribution diff --git a/appsettings.json b/appsettings.json index 2bd03397..487eb3cd 100644 --- a/appsettings.json +++ b/appsettings.json @@ -6,19 +6,71 @@ } }, "resource": { - "configmap": true, - "coreevent": false, - "daemonset": true, - "deployment": true, - "event": false, - "hpa": true, - "job": false, - "node": false, - "persistentvolume": false, - "pod": false, - "replicaset": true, - "replicationcontroller": false, - "services": false + "configmap": { + "enabled": true, + "includeEvenTypes": [], + "ignorePath": [] + }, + "coreevent": { + "enabled": false, + "includeEvenTypes": [], + "ignorePath": [] + }, + "daemonset": { + "enabled": false, + "includeEvenTypes": [], + "ignorePath": [] + }, + "deployment": { + "enabled": false, + "includeEvenTypes": ["update"], + "ignorePath": [] + }, + "event": { + "enabled": false, + "includeEvenTypes": ["update","delete"], + "ignorePath": [] + }, + "hpa": { + "enabled": false, + "includeEvenTypes": [], + "ignorePath": [] + }, + "job": { + "enabled": false, + "includeEvenTypes": [], + "ignorePath": [] + }, + "node": { + "enabled": false, + "includeEvenTypes": [], + "ignorePath": [] + }, + "persistentvolume": { + "enabled": false, + "includeEvenTypes": [], + "ignorePath": [] + }, + "pod": { + "enabled": true, + "includeEvenTypes": ["update"], + "ignorePath": ["/1/state"] + }, + "replicaset": { + "enabled": false, + "includeEvenTypes": [], + "ignorePath": [] + }, + "replicationcontroller": { + "enabled": false, + "includeEvenTypes": [], + "ignorePath": [] + }, + "services": { + "enabled": false, + "includeEvenTypes": [], + "ignorePath": [] + } }, "message": { "title": "Message Title" @@ -40,4 +92,4 @@ "cattle-fleet-system" ] } -} +} \ No newline at end of file diff --git a/charts/kubestatewatch/configs/appsettings.json b/charts/kubestatewatch/configs/appsettings.json index e68e2c24..16be608d 100644 --- a/charts/kubestatewatch/configs/appsettings.json +++ b/charts/kubestatewatch/configs/appsettings.json @@ -61,20 +61,72 @@ }, }, "resource": { - "configmap": {{ .Values.resourcesToWatch.configmap }}, - "coreevent": {{ .Values.resourcesToWatch.coreevent }}, - "daemonset": {{ .Values.resourcesToWatch.daemonset }}, - "deployment": {{ .Values.resourcesToWatch.deployment }}, - "event": {{ .Values.resourcesToWatch.event }}, - "hpa": {{ .Values.resourcesToWatch.hpa }}, - "job": {{ .Values.resourcesToWatch.job }}, - "node": {{ .Values.resourcesToWatch.node }}, - "persistentvolume": {{ .Values.resourcesToWatch.persistentvolume }}, - "pod": {{ .Values.resourcesToWatch.pod }}, - "replicaset": {{ .Values.resourcesToWatch.replicaset }}, - "replicationcontroller": {{ .Values.resourcesToWatch.replicationcontroller }}, - "services": {{ .Values.resourcesToWatch.services }} -}, + "configmap": { + "enabled": {{ .Values.resourcesToWatch.configmap.enabled }}, + "includeEvenTypes": {{ .Values.resourcesToWatch.configmap.includeEventTypes | toJson }}, + "ignorePath": {{ .Values.resourcesToWatch.configmap.ignorePath | toJson }} + }, + "coreevent": { + "enabled": {{ .Values.resourcesToWatch.coreevent.enabled }}, + "includeEvenTypes": {{ .Values.resourcesToWatch.coreevent.includeEventTypes | toJson }}, + "ignorePath": {{ .Values.resourcesToWatch.configmap.ignorePath | toJson }} + }, + "daemonset": { + "enabled": {{ .Values.resourcesToWatch.daemonset.enabled }}, + "includeEvenTypes": {{ .Values.resourcesToWatch.daemonset.includeEventTypes | toJson }}, + "ignorePath": {{ .Values.resourcesToWatch.daemonset.ignorePath | toJson }} + }, + "deployment": { + "enabled": {{ .Values.resourcesToWatch.deployment.enabled }}, + "includeEvenTypes": {{ .Values.resourcesToWatch.deployment.includeEventTypes | toJson }}, + "ignorePath": {{ .Values.resourcesToWatch.deployment.ignorePath | toJson }} + }, + "event": { + "enabled": {{ .Values.resourcesToWatch.event.enabled }}, + "includeEvenTypes": {{ .Values.resourcesToWatch.event.includeEventTypes | toJson }}, + "ignorePath": {{ .Values.resourcesToWatch.event.ignorePath | toJson }} + }, + "hpa": { + "enabled": {{ .Values.resourcesToWatch.hpa.enabled }}, + "includeEvenTypes": {{ .Values.resourcesToWatch.hpa.includeEventTypes | toJson }}, + "ignorePath": {{ .Values.resourcesToWatch.hpa.ignorePath | toJson }} + }, + "job": { + "enabled": {{ .Values.resourcesToWatch.job.enabled }}, + "includeEvenTypes": {{ .Values.resourcesToWatch.job.includeEventTypes | toJson }}, + "ignorePath": {{ .Values.resourcesToWatch.job.ignorePath | toJson }} + }, + "node": { + "enabled": {{ .Values.resourcesToWatch.node.enabled }}, + "includeEvenTypes": {{ .Values.resourcesToWatch.node.includeEventTypes | toJson }}, + "ignorePath": {{ .Values.resourcesToWatch.node.ignorePath | toJson }} + }, + "persistentvolume": { + "enabled": {{ .Values.resourcesToWatch.persistentvolume.enabled }}, + "includeEvenTypes": {{ .Values.resourcesToWatch.persistentvolume.includeEventTypes | toJson }}, + "ignorePath": {{ .Values.resourcesToWatch.persistentvolume.ignorePath | toJson }} + }, + "pod": { + "enabled": {{ .Values.resourcesToWatch.pod.enabled }}, + "includeEvenTypes": {{ .Values.resourcesToWatch.pod.includeEventTypes | toJson }}, + "ignorePath": {{ .Values.resourcesToWatch.pod.ignorePath | toJson }} + }, + "replicaset": { + "enabled": {{ .Values.resourcesToWatch.replicaset.enabled }}, + "includeEvenTypes": {{ .Values.resourcesToWatch.replicaset.includeEventTypes | toJson }}, + "ignorePath": {{ .Values.resourcesToWatch.replicaset.ignorePath | toJson }} + }, + "replicationcontroller": { + "enabled": {{ .Values.resourcesToWatch.replicationcontroller.enabled }}, + "includeEvenTypes": {{ .Values.resourcesToWatch.replicationcontroller.includeEventTypes | toJson }}, + "ignorePath": {{ .Values.resourcesToWatch.replicationcontroller.ignorePath | toJson }} + }, + "services": { + "enabled": {{ .Values.resourcesToWatch.services.enabled }}, + "includeEvenTypes": {{ .Values.resourcesToWatch.services.includeEventTypes | toJson }}, + "ignorePath": {{ .Values.resourcesToWatch.services.ignorePath | toJson }} + } + }, "message": { "title": {{ .Values.message.title | quote }} }, @@ -83,5 +135,5 @@ }, "namespacesconfig": { "exclude": {{ .Values.namespacesconfig.exclude | toJson }} - }, + } } diff --git a/charts/kubestatewatch/templates/_affinities.tpl b/charts/kubestatewatch/templates/_affinities.tpl deleted file mode 100644 index 189ea403..00000000 --- a/charts/kubestatewatch/templates/_affinities.tpl +++ /dev/null @@ -1,102 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - -{{/* -Return a soft nodeAffinity definition -{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} -*/}} -{{- define "common.affinities.nodes.soft" -}} -preferredDuringSchedulingIgnoredDuringExecution: - - preference: - matchExpressions: - - key: {{ .key }} - operator: In - values: - {{- range .values }} - - {{ . | quote }} - {{- end }} - weight: 1 -{{- end -}} - -{{/* -Return a hard nodeAffinity definition -{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} -*/}} -{{- define "common.affinities.nodes.hard" -}} -requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: {{ .key }} - operator: In - values: - {{- range .values }} - - {{ . | quote }} - {{- end }} -{{- end -}} - -{{/* -Return a nodeAffinity definition -{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} -*/}} -{{- define "common.affinities.nodes" -}} - {{- if eq .type "soft" }} - {{- include "common.affinities.nodes.soft" . -}} - {{- else if eq .type "hard" }} - {{- include "common.affinities.nodes.hard" . -}} - {{- end -}} -{{- end -}} - -{{/* -Return a soft podAffinity/podAntiAffinity definition -{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} -*/}} -{{- define "common.affinities.pods.soft" -}} -{{- $component := default "" .component -}} -{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} -preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} - {{- if not (empty $component) }} - {{ printf "app.kubernetes.io/component: %s" $component }} - {{- end }} - {{- range $key, $value := $extraMatchLabels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - namespaces: - - {{ .context.Release.Namespace | quote }} - topologyKey: kubernetes.io/hostname - weight: 1 -{{- end -}} - -{{/* -Return a hard podAffinity/podAntiAffinity definition -{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "context" $) -}} -*/}} -{{- define "common.affinities.pods.hard" -}} -{{- $component := default "" .component -}} -{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} -requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} - {{- if not (empty $component) }} - {{ printf "app.kubernetes.io/component: %s" $component }} - {{- end }} - {{- range $key, $value := $extraMatchLabels }} - {{ $key }}: {{ $value | quote }} - {{- end }} - namespaces: - - {{ .context.Release.Namespace | quote }} - topologyKey: kubernetes.io/hostname -{{- end -}} - -{{/* -Return a podAffinity/podAntiAffinity definition -{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} -*/}} -{{- define "common.affinities.pods" -}} - {{- if eq .type "soft" }} - {{- include "common.affinities.pods.soft" . -}} - {{- else if eq .type "hard" }} - {{- include "common.affinities.pods.hard" . -}} - {{- end -}} -{{- end -}} diff --git a/charts/kubestatewatch/templates/_capabilities.tpl b/charts/kubestatewatch/templates/_capabilities.tpl deleted file mode 100644 index b94212bb..00000000 --- a/charts/kubestatewatch/templates/_capabilities.tpl +++ /dev/null @@ -1,128 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - -{{/* -Return the target Kubernetes version -*/}} -{{- define "common.capabilities.kubeVersion" -}} -{{- if .Values.global }} - {{- if .Values.global.kubeVersion }} - {{- .Values.global.kubeVersion -}} - {{- else }} - {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} - {{- end -}} -{{- else }} -{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for poddisruptionbudget. -*/}} -{{- define "common.capabilities.policy.apiVersion" -}} -{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "policy/v1beta1" -}} -{{- else -}} -{{- print "policy/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for networkpolicy. -*/}} -{{- define "common.capabilities.networkPolicy.apiVersion" -}} -{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "extensions/v1beta1" -}} -{{- else -}} -{{- print "networking.k8s.io/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for cronjob. -*/}} -{{- define "common.capabilities.cronjob.apiVersion" -}} -{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "batch/v1beta1" -}} -{{- else -}} -{{- print "batch/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for deployment. -*/}} -{{- define "common.capabilities.deployment.apiVersion" -}} -{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "extensions/v1beta1" -}} -{{- else -}} -{{- print "apps/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for statefulset. -*/}} -{{- define "common.capabilities.statefulset.apiVersion" -}} -{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "apps/v1beta1" -}} -{{- else -}} -{{- print "apps/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for ingress. -*/}} -{{- define "common.capabilities.ingress.apiVersion" -}} -{{- if .Values.ingress -}} -{{- if .Values.ingress.apiVersion -}} -{{- .Values.ingress.apiVersion -}} -{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "extensions/v1beta1" -}} -{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "networking.k8s.io/v1beta1" -}} -{{- else -}} -{{- print "networking.k8s.io/v1" -}} -{{- end }} -{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "extensions/v1beta1" -}} -{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "networking.k8s.io/v1beta1" -}} -{{- else -}} -{{- print "networking.k8s.io/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for RBAC resources. -*/}} -{{- define "common.capabilities.rbac.apiVersion" -}} -{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "rbac.authorization.k8s.io/v1beta1" -}} -{{- else -}} -{{- print "rbac.authorization.k8s.io/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for CRDs. -*/}} -{{- define "common.capabilities.crd.apiVersion" -}} -{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "apiextensions.k8s.io/v1beta1" -}} -{{- else -}} -{{- print "apiextensions.k8s.io/v1" -}} -{{- end -}} -{{- end -}} - -{{/* -Returns true if the used Helm version is 3.3+. -A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. -This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. -**To be removed when the catalog's minimun Helm version is 3.3** -*/}} -{{- define "common.capabilities.supportsHelmVersion" -}} -{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} - {{- true -}} -{{- end -}} -{{- end -}} diff --git a/charts/kubestatewatch/templates/_errors.tpl b/charts/kubestatewatch/templates/_errors.tpl deleted file mode 100644 index a79cc2e3..00000000 --- a/charts/kubestatewatch/templates/_errors.tpl +++ /dev/null @@ -1,23 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Through error when upgrading using empty passwords values that must not be empty. - -Usage: -{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} -{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} -{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} - -Required password params: - - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. - - context - Context - Required. Parent context. -*/}} -{{- define "common.errors.upgrade.passwords.empty" -}} - {{- $validationErrors := join "" .validationErrors -}} - {{- if and $validationErrors .context.Release.IsUpgrade -}} - {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} - {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} - {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} - {{- $errorString = print $errorString "\n%s" -}} - {{- printf $errorString $validationErrors | fail -}} - {{- end -}} -{{- end -}} diff --git a/charts/kubestatewatch/templates/_helpers.tpl b/charts/kubestatewatch/templates/_helpers.tpl deleted file mode 100644 index ad939655..00000000 --- a/charts/kubestatewatch/templates/_helpers.tpl +++ /dev/null @@ -1,36 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - -{{/* -Create the name of the service account to use -*/}} -{{- define "statemonitor.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} - -{{/* -Return the proper statemonitor image name -*/}} -{{- define "statemonitor.image" -}} -{{- $registry := .Values.image.registry -}} -{{- $repository := .Values.image.repository -}} -{{- $tag := .Values.image.tag -}} -{{- printf "%s/%s:%s" $registry $repository $tag -}} -{{- end -}} - -{{/* -Return the proper Docker Image Registry Secret Names -*/}} -{{- define "statemonitor.imagePullSecrets" -}} -{{- include "common.images.pullSecrets" (dict "images" (list .Values.image) "global" .Values.global) -}} -{{- end -}} - -{{/* -Check if there are rolling tags in the images -*/}} -{{- define "statemonitor.checkRollingTags" -}} -{{- include "common.warnings.rollingTag" .Values.image }} -{{- end -}} diff --git a/charts/kubestatewatch/templates/_images.tpl b/charts/kubestatewatch/templates/_images.tpl deleted file mode 100644 index 42ffbc72..00000000 --- a/charts/kubestatewatch/templates/_images.tpl +++ /dev/null @@ -1,75 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Return the proper image name -{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} -*/}} -{{- define "common.images.image" -}} -{{- $registryName := .imageRoot.registry -}} -{{- $repositoryName := .imageRoot.repository -}} -{{- $tag := .imageRoot.tag | toString -}} -{{- if .global }} - {{- if .global.imageRegistry }} - {{- $registryName = .global.imageRegistry -}} - {{- end -}} -{{- end -}} -{{- if $registryName }} -{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} -{{- else -}} -{{- printf "%s:%s" $repositoryName $tag -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) -{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} -*/}} -{{- define "common.images.pullSecrets" -}} - {{- $pullSecrets := list }} - - {{- if .global }} - {{- range .global.imagePullSecrets -}} - {{- $pullSecrets = append $pullSecrets . -}} - {{- end -}} - {{- end -}} - - {{- range .images -}} - {{- range .pullSecrets -}} - {{- $pullSecrets = append $pullSecrets . -}} - {{- end -}} - {{- end -}} - - {{- if (not (empty $pullSecrets)) }} -imagePullSecrets: - {{- range $pullSecrets }} - - name: {{ . }} - {{- end }} - {{- end }} -{{- end -}} - -{{/* -Return the proper Docker Image Registry Secret Names evaluating values as templates -{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} -*/}} -{{- define "common.images.renderPullSecrets" -}} - {{- $pullSecrets := list }} - {{- $context := .context }} - - {{- if $context.Values.global }} - {{- range $context.Values.global.imagePullSecrets -}} - {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} - {{- end -}} - {{- end -}} - - {{- range .images -}} - {{- range .pullSecrets -}} - {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} - {{- end -}} - {{- end -}} - - {{- if (not (empty $pullSecrets)) }} -imagePullSecrets: - {{- range $pullSecrets }} - - name: {{ . }} - {{- end }} - {{- end }} -{{- end -}} diff --git a/charts/kubestatewatch/templates/_ingress.tpl b/charts/kubestatewatch/templates/_ingress.tpl deleted file mode 100644 index 8caf73a6..00000000 --- a/charts/kubestatewatch/templates/_ingress.tpl +++ /dev/null @@ -1,68 +0,0 @@ -{{/* vim: set filetype=mustache: */}} - -{{/* -Generate backend entry that is compatible with all Kubernetes API versions. - -Usage: -{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} - -Params: - - serviceName - String. Name of an existing service backend - - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. - - context - Dict - Required. The context for the template evaluation. -*/}} -{{- define "common.ingress.backend" -}} -{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} -{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} -serviceName: {{ .serviceName }} -servicePort: {{ .servicePort }} -{{- else -}} -service: - name: {{ .serviceName }} - port: - {{- if typeIs "string" .servicePort }} - name: {{ .servicePort }} - {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} - number: {{ .servicePort | int }} - {{- end }} -{{- end -}} -{{- end -}} - -{{/* -Print "true" if the API pathType field is supported -Usage: -{{ include "common.ingress.supportsPathType" . }} -*/}} -{{- define "common.ingress.supportsPathType" -}} -{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} -{{- print "false" -}} -{{- else -}} -{{- print "true" -}} -{{- end -}} -{{- end -}} - -{{/* -Returns true if the ingressClassname field is supported -Usage: -{{ include "common.ingress.supportsIngressClassname" . }} -*/}} -{{- define "common.ingress.supportsIngressClassname" -}} -{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} -{{- print "false" -}} -{{- else -}} -{{- print "true" -}} -{{- end -}} -{{- end -}} - -{{/* -Return true if cert-manager required annotations for TLS signed -certificates are set in the Ingress annotations -Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations -Usage: -{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} -*/}} -{{- define "common.ingress.certManagerRequest" -}} -{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") }} - {{- true -}} -{{- end -}} -{{- end -}} diff --git a/charts/kubestatewatch/templates/_labels.tpl b/charts/kubestatewatch/templates/_labels.tpl deleted file mode 100644 index 252066c7..00000000 --- a/charts/kubestatewatch/templates/_labels.tpl +++ /dev/null @@ -1,18 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Kubernetes standard labels -*/}} -{{- define "common.labels.standard" -}} -app.kubernetes.io/name: {{ include "common.names.name" . }} -helm.sh/chart: {{ include "common.names.chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector -*/}} -{{- define "common.labels.matchLabels" -}} -app.kubernetes.io/name: {{ include "common.names.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end -}} diff --git a/charts/kubestatewatch/templates/_names.tpl b/charts/kubestatewatch/templates/_names.tpl deleted file mode 100644 index c8574d17..00000000 --- a/charts/kubestatewatch/templates/_names.tpl +++ /dev/null @@ -1,63 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "common.names.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "common.names.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "common.names.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create a default fully qualified dependency name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -Usage: -{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} -*/}} -{{- define "common.names.dependency.fullname" -}} -{{- if .chartValues.fullnameOverride -}} -{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .chartName .chartValues.nameOverride -}} -{{- if contains $name .context.Release.Name -}} -{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Allow the release namespace to be overridden for multi-namespace deployments in combined charts. -*/}} -{{- define "common.names.namespace" -}} -{{- if .Values.namespaceOverride -}} -{{- .Values.namespaceOverride -}} -{{- else -}} -{{- .Release.Namespace -}} -{{- end -}} -{{- end -}} \ No newline at end of file diff --git a/charts/kubestatewatch/templates/_secrets.tpl b/charts/kubestatewatch/templates/_secrets.tpl deleted file mode 100644 index a53fb44f..00000000 --- a/charts/kubestatewatch/templates/_secrets.tpl +++ /dev/null @@ -1,140 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Generate secret name. - -Usage: -{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} - -Params: - - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user - to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. - +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret - - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. - - context - Dict - Required. The context for the template evaluation. -*/}} -{{- define "common.secrets.name" -}} -{{- $name := (include "common.names.fullname" .context) -}} - -{{- if .defaultNameSuffix -}} -{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{- with .existingSecret -}} -{{- if not (typeIs "string" .) -}} -{{- with .name -}} -{{- $name = . -}} -{{- end -}} -{{- else -}} -{{- $name = . -}} -{{- end -}} -{{- end -}} - -{{- printf "%s" $name -}} -{{- end -}} - -{{/* -Generate secret key. - -Usage: -{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} - -Params: - - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user - to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. - +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret - - key - String - Required. Name of the key in the secret. -*/}} -{{- define "common.secrets.key" -}} -{{- $key := .key -}} - -{{- if .existingSecret -}} - {{- if not (typeIs "string" .existingSecret) -}} - {{- if .existingSecret.keyMapping -}} - {{- $key = index .existingSecret.keyMapping $.key -}} - {{- end -}} - {{- end }} -{{- end -}} - -{{- printf "%s" $key -}} -{{- end -}} - -{{/* -Generate secret password or retrieve one if already created. - -Usage: -{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} - -Params: - - secret - String - Required - Name of the 'Secret' resource where the password is stored. - - key - String - Required - Name of the key in the secret. - - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. - - length - int - Optional - Length of the generated random password. - - strong - Boolean - Optional - Whether to add symbols to the generated random password. - - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. - - context - Context - Required - Parent context. - -The order in which this function returns a secret password: - 1. Already existing 'Secret' resource - (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) - 2. Password provided via the values.yaml - (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) - 3. Randomly generated secret password - (A new random secret password with the length specified in the 'length' parameter will be generated and returned) - -*/}} -{{- define "common.secrets.passwords.manage" -}} - -{{- $password := "" }} -{{- $subchart := "" }} -{{- $chartName := default "" .chartName }} -{{- $passwordLength := default 10 .length }} -{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} -{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} -{{- $secretData := (lookup "v1" "Secret" $.context.Release.Namespace .secret).data }} -{{- if $secretData }} - {{- if hasKey $secretData .key }} - {{- $password = index $secretData .key }} - {{- else }} - {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} - {{- end -}} -{{- else if $providedPasswordValue }} - {{- $password = $providedPasswordValue | toString | b64enc | quote }} -{{- else }} - - {{- if .context.Values.enabled }} - {{- $subchart = $chartName }} - {{- end -}} - - {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} - {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} - {{- $passwordValidationErrors := list $requiredPasswordError -}} - {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} - - {{- if .strong }} - {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} - {{- $password = randAscii $passwordLength }} - {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} - {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} - {{- else }} - {{- $password = randAlphaNum $passwordLength | b64enc | quote }} - {{- end }} -{{- end -}} -{{- printf "%s" $password -}} -{{- end -}} - -{{/* -Returns whether a previous generated secret already exists - -Usage: -{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} - -Params: - - secret - String - Required - Name of the 'Secret' resource where the password is stored. - - context - Context - Required - Parent context. -*/}} -{{- define "common.secrets.exists" -}} -{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} -{{- if $secret }} - {{- true -}} -{{- end -}} -{{- end -}} diff --git a/charts/kubestatewatch/templates/_storage.tpl b/charts/kubestatewatch/templates/_storage.tpl deleted file mode 100644 index 60e2a844..00000000 --- a/charts/kubestatewatch/templates/_storage.tpl +++ /dev/null @@ -1,23 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Return the proper Storage Class -{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} -*/}} -{{- define "common.storage.class" -}} - -{{- $storageClass := .persistence.storageClass -}} -{{- if .global -}} - {{- if .global.storageClass -}} - {{- $storageClass = .global.storageClass -}} - {{- end -}} -{{- end -}} - -{{- if $storageClass -}} - {{- if (eq "-" $storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" $storageClass -}} - {{- end -}} -{{- end -}} - -{{- end -}} diff --git a/charts/kubestatewatch/templates/_tplvalues.tpl b/charts/kubestatewatch/templates/_tplvalues.tpl deleted file mode 100644 index 2db16685..00000000 --- a/charts/kubestatewatch/templates/_tplvalues.tpl +++ /dev/null @@ -1,13 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Renders a value that contains template. -Usage: -{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} -*/}} -{{- define "common.tplvalues.render" -}} - {{- if typeIs "string" .value }} - {{- tpl .value .context }} - {{- else }} - {{- tpl (.value | toYaml) .context }} - {{- end }} -{{- end -}} diff --git a/charts/kubestatewatch/templates/_utils.tpl b/charts/kubestatewatch/templates/_utils.tpl deleted file mode 100644 index ea083a24..00000000 --- a/charts/kubestatewatch/templates/_utils.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Print instructions to get a secret value. -Usage: -{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} -*/}} -{{- define "common.utils.secret.getvalue" -}} -{{- $varname := include "common.utils.fieldToEnvVar" . -}} -export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) -{{- end -}} - -{{/* -Build env var name given a field -Usage: -{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} -*/}} -{{- define "common.utils.fieldToEnvVar" -}} - {{- $fieldNameSplit := splitList "-" .field -}} - {{- $upperCaseFieldNameSplit := list -}} - - {{- range $fieldNameSplit -}} - {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} - {{- end -}} - - {{ join "_" $upperCaseFieldNameSplit }} -{{- end -}} - -{{/* -Gets a value from .Values given -Usage: -{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} -*/}} -{{- define "common.utils.getValueFromKey" -}} -{{- $splitKey := splitList "." .key -}} -{{- $value := "" -}} -{{- $latestObj := $.context.Values -}} -{{- range $splitKey -}} - {{- if not $latestObj -}} - {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} - {{- end -}} - {{- $value = ( index $latestObj . ) -}} - {{- $latestObj = $value -}} -{{- end -}} -{{- printf "%v" (default "" $value) -}} -{{- end -}} - -{{/* -Returns first .Values key with a defined value or first of the list if all non-defined -Usage: -{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} -*/}} -{{- define "common.utils.getKeyFromList" -}} -{{- $key := first .keys -}} -{{- $reverseKeys := reverse .keys }} -{{- range $reverseKeys }} - {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} - {{- if $value -}} - {{- $key = . }} - {{- end -}} -{{- end -}} -{{- printf "%s" $key -}} -{{- end -}} diff --git a/charts/kubestatewatch/templates/_warnings.tpl b/charts/kubestatewatch/templates/_warnings.tpl deleted file mode 100644 index 301db1b4..00000000 --- a/charts/kubestatewatch/templates/_warnings.tpl +++ /dev/null @@ -1,14 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Warning about using rolling tag. -Usage: -{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} -*/}} -{{- define "common.warnings.rollingTag" -}} - -{{- if and (contains "docmarr/kubernetes-statemonitor/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} -WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. -+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ -{{- end }} - -{{- end -}} diff --git a/charts/kubestatewatch/templates/ingress.yaml b/charts/kubestatewatch/templates/ingress.yaml new file mode 100644 index 00000000..0e507d67 --- /dev/null +++ b/charts/kubestatewatch/templates/ingress.yaml @@ -0,0 +1,26 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + generation: 1 + labels: + run: nginx-ingress-{{ .Release.Name }} + name: nginx-ingress-{{ .Release.Name }} + namespace: {{ .Release.Namespace }} + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/ssl-redirect: "false" + nginx.ingress.kubernetes.io/force-ssl-redirect: "false" + nginx.ingress.kubernetes.io/service-upstream: "true" + nginx.ingress.kubernetes.io/use-regex: "true" +spec: + rules: + - host: {{ Release.Namespace }}.{{ Values.ingress.host }} + http: + paths: + - path: {{ Values.ingress.path }} + pathType: Prefix + backend: + service: + name: {{ Release.Name }} + port: + number: {{ Values.ingress.servicePort }} \ No newline at end of file diff --git a/charts/kubestatewatch/values.yaml b/charts/kubestatewatch/values.yaml index 27345c99..0ca551ec 100644 --- a/charts/kubestatewatch/values.yaml +++ b/charts/kubestatewatch/values.yaml @@ -81,7 +81,7 @@ extraHandlers: {} message: title: "XXXX" diff: - ignorePath: [] + ignorePath: # - "/metadata" # - "/spec/template/metadata" # - "/status" @@ -94,24 +94,129 @@ diff: # - "/status" # - "/metadata/replicas" namespacesconfig: - #include: [] + include: exclude: - - "kube-system" - - "cattle-fleet-system" + #- "kube-system" + #- "cattle-fleet-system" + resourcesToWatch: - configmap: true - daemonset: true - deployment: true - event: false - coreevent: false - hpa: true - job: false - persistentvolume: false - pod: false - replicaset: true - replicationcontroller: false - node: false - services: false + configmap: + enabled: true + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + daemonset: + enabled: true + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + deployment: + enabled: true + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + event: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + coreevent: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + hpa: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + job: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + persistentvolume: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + pod: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + replicaset: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + replicationcontroller: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + node: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + + services: + enabled: false + includeEvenTypes: + #- "add" + #- "update" + #- "delete" + ignorePath: + # - "/status" + lifecycleHooks: {} extraEnvVars: [] extraEnvVarsCM: "" diff --git a/config/config.go b/config/config.go index 11318ee5..c52a1592 100755 --- a/config/config.go +++ b/config/config.go @@ -17,26 +17,26 @@ type Handler struct { // Resource contains resource configuration type Resource struct { - Deployment bool - ReplicationController bool - ReplicaSet bool - DaemonSet bool - StatefulSet bool - Services bool - Pod bool - Job bool - Node bool - ClusterRole bool - ClusterRoleBinding bool - ServiceAccount bool - PersistentVolume bool - Namespace bool - Secret bool - ConfigMap bool - Ingress bool - HPA bool - Event bool - CoreEvent bool + Deployment ResourceConfig + ReplicationController ResourceConfig + ReplicaSet ResourceConfig + DaemonSet ResourceConfig + StatefulSet ResourceConfig + Services ResourceConfig + Pod ResourceConfig + Job ResourceConfig + Node ResourceConfig + ClusterRole ResourceConfig + ClusterRoleBinding ResourceConfig + ServiceAccount ResourceConfig + PersistentVolume ResourceConfig + Namespace ResourceConfig + Secret ResourceConfig + ConfigMap ResourceConfig + Ingress ResourceConfig + HPA ResourceConfig + Event ResourceConfig + CoreEvent ResourceConfig } // Config struct contains statemonitor configuration @@ -44,8 +44,6 @@ type Config struct { // Handlers know how to send notifications to specific services. Handler Handler - //Reason []string - // Resources to watch. Resource Resource @@ -65,7 +63,16 @@ type NamespacesConfig struct { Exclude []string } +type ResourceConfig struct { + Enabled bool + // process events based on its type + //create, update, delete + //if empty, all events will be processed + IncludeEvenTypes []string + IgnorePath []string +} type Diff struct { + //IgnorePath for all resources IgnorePath []string } diff --git a/go.mod b/go.mod index 8ce1e06f..8409b6ab 100755 --- a/go.mod +++ b/go.mod @@ -42,6 +42,7 @@ require ( github.com/imdario/mergo v0.3.6 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect diff --git a/go.sum b/go.sum index ec5eaee9..d7b39908 100755 --- a/go.sum +++ b/go.sum @@ -56,6 +56,8 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/knadh/koanf/maps v0.1.1 h1:G5TjmUh2D7G2YWf5SQQqSiHRJEjaicvU0KpypqB3NIs= diff --git a/main.go b/main.go index 59539ded..4f68fb64 100644 --- a/main.go +++ b/main.go @@ -1,21 +1,80 @@ package main import ( + "context" + "fmt" "net/http" "os" + "strconv" + "time" + "github.com/julienschmidt/httprouter" "github.com/marvasgit/kubernetes-statemonitor/pkg/client" + "github.com/marvasgit/kubernetes-statemonitor/pkg/utils" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/sirupsen/logrus" ) +var list = utils.NewTTLList() + func main() { + // Create a context with cancellation + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + router := httprouter.New() + + router.GET("/metrics", Metrics) + router.PUT("/deploy/:namespace/:duration", namespaceDeployment) + router.PUT("/deploy/:namespace", namespaceDeployment) + router.DELETE("/deploy/:namespace", deletenamespaceDeployment) + router.POST("/reset", reset) go func() { - http.Handle("/metrics", promhttp.Handler()) - http.ListenAndServe(":2112", nil) + http.ListenAndServe(":80", router) }() + initLogger() - client.Start() + client.Start(ctx, list) +} +func Metrics(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + promhttp.Handler().ServeHTTP(w, r) +} +func deletenamespaceDeployment(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + namespace := ps.ByName("namespace") + + list.Remove(namespace) + response := fmt.Sprintf("Namespace -%s was removed ", namespace) + w.Write([]byte(response)) +} +func reset(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + list.Reset() + w.Write([]byte("Deployment List was reset ")) +} + +func namespaceDeployment(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + namespace := ps.ByName("namespace") + durationString := ps.ByName("duration") + + //set default time to 5 minutes if not provided + if durationString == "" { + durationString = "2" + } + + // Parse JSON request body + // Parse durationInMinutes from string to int + durationInMinutes, err := strconv.Atoi(durationString) + if err != nil { + http.Error(w, "Invalid time value", http.StatusBadRequest) + return + } + er := list.Add(namespace, time.Duration(durationInMinutes)*time.Minute) + if er != nil { + http.Error(w, er.Error(), http.StatusBadRequest) + return + } + response := fmt.Sprintf("Namespace -%s added to deployment list", namespace) + // change status code to 201 + w.WriteHeader(http.StatusCreated) + w.Write([]byte(response)) } func initLogger() { diff --git a/pkg/client/client.go b/pkg/client/client.go index 5ffa8f29..1cd4c885 100755 --- a/pkg/client/client.go +++ b/pkg/client/client.go @@ -1,6 +1,9 @@ package client import ( + "context" + "os" + "github.com/knadh/koanf/parsers/json" "github.com/knadh/koanf/providers/file" "github.com/knadh/koanf/v2" @@ -17,14 +20,15 @@ import ( "github.com/marvasgit/kubernetes-statemonitor/pkg/handlers/slackwebhook" "github.com/marvasgit/kubernetes-statemonitor/pkg/handlers/smtpClient" "github.com/marvasgit/kubernetes-statemonitor/pkg/handlers/webhook" + "github.com/marvasgit/kubernetes-statemonitor/pkg/utils" "github.com/sirupsen/logrus" ) -func Start() { +func Start(ctx context.Context, list *utils.TTLList) { conf := loadConfig() handlers := parseEventHandler(&conf) - controller.Start(&conf, handlers) + controller.Start(&conf, handlers, list) } // Global koanf instance. Use "." as the key path delimiter. This can be "/" or any character. @@ -32,7 +36,14 @@ var k = koanf.New(".") func loadConfig() config.Config { // Load JSON config. - if err := k.Load(file.Provider("/config/appsettings.json"), json.Parser()); err != nil { + //read envVariable IsLOCAL + isLocal := os.Getenv("IsLOCAL") + configPath := "/config/appsettings.json" + if isLocal == "true" { + configPath = "appsettings.json" + } + + if err := k.Load(file.Provider(configPath), json.Parser()); err != nil { logrus.Fatalf("error loading config: %v", err) } diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go index c50e911c..294143f7 100644 --- a/pkg/controller/controller.go +++ b/pkg/controller/controller.go @@ -71,6 +71,13 @@ var confDiff config.Diff var namespaces []string var metric *prometheus.CounterVec var mu sync.Mutex +var ttlList *utils.TTLList + +// Event indicate the informerEvent +type EventWrapper struct { + Event Event + ResourceConfig *config.ResourceConfig +} // Event indicate the informerEvent type Event struct { @@ -105,7 +112,8 @@ func init() { // TODO: we don't need the informer to be indexed // Start prepares watchers and run their controllers, then waits for process termination signals -func Start(conf *config.Config, eventHandlers []handlers.Handler) { +func Start(conf *config.Config, eventHandlers []handlers.Handler, list *utils.TTLList) { + ttlList = list //TODO remove imput of evenhandlers and decide here var kubeClient kubernetes.Interface @@ -121,7 +129,7 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { ns := "" defer close(stopCh) - if conf.Resource.CoreEvent { + if conf.Resource.CoreEvent.Enabled { allCoreEventsInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -138,12 +146,12 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, allCoreEventsInformer, objName(api_v1.Event{}), V1) + c := newResourceController(kubeClient, eventHandlers, allCoreEventsInformer, objName(api_v1.Event{}), V1, conf.Resource.CoreEvent) go c.Run(stopCh) } - if conf.Resource.Event { + if conf.Resource.Event.Enabled { allEventsInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ @@ -161,19 +169,22 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, allEventsInformer, objName(events_v1.Event{}), EVENTS_V1) + c := newResourceController(kubeClient, eventHandlers, allEventsInformer, objName(events_v1.Event{}), EVENTS_V1, conf.Resource.Event) go c.Run(stopCh) } - if conf.Resource.Pod { + if conf.Resource.Pod.Enabled { + pods := kubeClient.CoreV1().Pods(ns) informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { - return kubeClient.CoreV1().Pods(ns).List(context.Background(), options) + ll, err := pods.List(context.Background(), options) + return ll, err }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { - return kubeClient.CoreV1().Pods(ns).Watch(context.Background(), options) + ww, err := pods.Watch(context.Background(), options) + return ww, err }, }, &api_v1.Pod{}, @@ -181,12 +192,12 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.Pod{}), V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.Pod{}), V1, conf.Resource.Pod) go c.Run(stopCh) } - if conf.Resource.HPA { + if conf.Resource.HPA.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -201,13 +212,13 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(autoscaling_v1.HorizontalPodAutoscaler{}), AUTOSCALING_V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(autoscaling_v1.HorizontalPodAutoscaler{}), AUTOSCALING_V1, conf.Resource.HPA) go c.Run(stopCh) } - if conf.Resource.DaemonSet { + if conf.Resource.DaemonSet.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -222,12 +233,12 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(apps_v1.DaemonSet{}), APPS_V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(apps_v1.DaemonSet{}), APPS_V1, conf.Resource.DaemonSet) go c.Run(stopCh) } - if conf.Resource.StatefulSet { + if conf.Resource.StatefulSet.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -242,11 +253,11 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(apps_v1.StatefulSet{}), APPS_V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(apps_v1.StatefulSet{}), APPS_V1, conf.Resource.StatefulSet) go c.Run(stopCh) } - if conf.Resource.ReplicaSet { + if conf.Resource.ReplicaSet.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -261,12 +272,12 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(apps_v1.ReplicaSet{}), APPS_V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(apps_v1.ReplicaSet{}), APPS_V1, conf.Resource.ReplicaSet) go c.Run(stopCh) } - if conf.Resource.Services { + if conf.Resource.Services.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -281,12 +292,12 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.Service{}), V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.Service{}), V1, conf.Resource.Services) go c.Run(stopCh) } - if conf.Resource.Deployment { + if conf.Resource.Deployment.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -301,12 +312,12 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(apps_v1.Deployment{}), APPS_V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(apps_v1.Deployment{}), APPS_V1, conf.Resource.Deployment) go c.Run(stopCh) } - if conf.Resource.Namespace { + if conf.Resource.Namespace.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -321,12 +332,12 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.Namespace{}), V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.Namespace{}), V1, conf.Resource.Namespace) go c.Run(stopCh) } - if conf.Resource.ReplicationController { + if conf.Resource.ReplicationController.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -341,12 +352,12 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.ReplicationController{}), V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.ReplicationController{}), V1, conf.Resource.ReplicationController) go c.Run(stopCh) } - if conf.Resource.Job { + if conf.Resource.Job.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -361,12 +372,12 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(batch_v1.Job{}), BATCH_V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(batch_v1.Job{}), BATCH_V1, conf.Resource.Job) go c.Run(stopCh) } - if conf.Resource.Node { + if conf.Resource.Node.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -381,12 +392,12 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.Node{}), V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.Node{}), V1, conf.Resource.Node) go c.Run(stopCh) } - if conf.Resource.ServiceAccount { + if conf.Resource.ServiceAccount.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -401,12 +412,12 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.ServiceAccount{}), V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.ServiceAccount{}), V1, conf.Resource.ServiceAccount) go c.Run(stopCh) } - if conf.Resource.ClusterRole { + if conf.Resource.ClusterRole.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -421,12 +432,12 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(rbac_v1.ClusterRole{}), RBAC_V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(rbac_v1.ClusterRole{}), RBAC_V1, conf.Resource.ClusterRole) go c.Run(stopCh) } - if conf.Resource.ClusterRoleBinding { + if conf.Resource.ClusterRoleBinding.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -441,12 +452,12 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(rbac_v1.ClusterRoleBinding{}), RBAC_V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(rbac_v1.ClusterRoleBinding{}), RBAC_V1, conf.Resource.ClusterRoleBinding) go c.Run(stopCh) } - if conf.Resource.PersistentVolume { + if conf.Resource.PersistentVolume.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -461,12 +472,12 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.PersistentVolume{}), V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.PersistentVolume{}), V1, conf.Resource.PersistentVolume) go c.Run(stopCh) } - if conf.Resource.Secret { + if conf.Resource.Secret.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -481,12 +492,12 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.Secret{}), V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.Secret{}), V1, conf.Resource.Secret) go c.Run(stopCh) } - if conf.Resource.ConfigMap { + if conf.Resource.ConfigMap.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -501,12 +512,12 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.ConfigMap{}), V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(api_v1.ConfigMap{}), V1, conf.Resource.ConfigMap) go c.Run(stopCh) } - if conf.Resource.Ingress { + if conf.Resource.Ingress.Enabled { informer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { @@ -521,7 +532,7 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { cache.Indexers{}, ) - c := newResourceController(kubeClient, eventHandlers, informer, objName(networking_v1.Ingress{}), NETWORKING_V1) + c := newResourceController(kubeClient, eventHandlers, informer, objName(networking_v1.Ingress{}), NETWORKING_V1, conf.Resource.Ingress) go c.Run(stopCh) } @@ -532,93 +543,113 @@ func Start(conf *config.Config, eventHandlers []handlers.Handler) { } // TODO: proper implementation of this function without the hack of multi ns -func newResourceController(client kubernetes.Interface, eventHandlers []handlers.Handler, informer cache.SharedIndexInformer, resourceType string, apiVersion string) *Controller { +func newResourceController(client kubernetes.Interface, eventHandlers []handlers.Handler, informer cache.SharedIndexInformer, resourceType string, apiVersion string, resourceConfig config.ResourceConfig) *Controller { queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) var newEvent Event + var eventWrapper EventWrapper + eventWrapper.ResourceConfig = &resourceConfig + var err error informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - var ok bool - newEvent.namespace = "" // namespace retrived in processItem incase namespace value is empty - newEvent.key, err = cache.MetaNamespaceKeyFunc(obj) - newEvent.eventType = "create" - newEvent.resourceType = resourceType - newEvent.apiVersion = apiVersion - newEvent.obj, ok = obj.(runtime.Object) - if !ok { - logrus.WithField("pkg", "statemonitor-"+resourceType).Errorf("cannot convert to runtime.Object for add on %v", obj) - } - if err != nil { - logrus.WithField("pkg", "statemonitor-"+resourceType).Errorf("cannot get key for add on %v", obj) - return - } + if resourceConfig.Enabled && (len(resourceConfig.IncludeEvenTypes) == 0 || slices.Contains(resourceConfig.IncludeEvenTypes, "add")) { + var ok bool + newEvent.namespace = "" // namespace retrived in processItem incase namespace value is empty + newEvent.key, err = cache.MetaNamespaceKeyFunc(obj) + newEvent.eventType = "create" + newEvent.resourceType = resourceType + newEvent.apiVersion = apiVersion + newEvent.obj, ok = obj.(runtime.Object) + if !ok { + logrus.WithField("pkg", "statemonitor-"+resourceType).Errorf("cannot convert to runtime.Object for add on %v", obj) + } + if err != nil { + logrus.WithField("pkg", "statemonitor-"+resourceType).Errorf("cannot get key for add on %v", obj) + return + } - if !slices.Contains(namespaces, strings.Split(newEvent.key, "/")[0]) { - logrus.Debugf("Skipping adding (namespaceconfig.ignore contains it) %v for %s", resourceType, newEvent.key) - return - } + if !slices.Contains(namespaces, strings.Split(newEvent.key, "/")[0]) { + logrus.Debugf("Skipping adding (namespaceconfig.ignore contains it) %v for %s", resourceType, newEvent.key) + return + } + + logrus.WithField("pkg", "statemonitor-"+resourceType).Infof("Processing add to %v: %s", resourceType, newEvent.key) - logrus.WithField("pkg", "statemonitor-"+resourceType).Infof("Processing add to %v: %s", resourceType, newEvent.key) - queue.Add(newEvent) + eventWrapper.Event = newEvent + queue.Add(eventWrapper) + } else { + logrus.Debugf("Skipping ADD (resource not enabled) %v for %s and is enabled - %t", resourceType, newEvent.key, resourceConfig.Enabled) + } }, UpdateFunc: func(old, new interface{}) { - var ok bool - newEvent.namespace = "" // namespace retrived in processItem incase namespace value is empty - newEvent.key, err = cache.MetaNamespaceKeyFunc(old) - newEvent.eventType = "update" - newEvent.resourceType = resourceType - newEvent.apiVersion = apiVersion - newEvent.obj, ok = new.(runtime.Object) - if !ok { - logrus.WithField("pkg", "statemonitor-"+resourceType).Errorf("cannot convert to runtime.Object for update on %v", new) - } - newEvent.oldObj, ok = old.(runtime.Object) - if !ok { - logrus.WithField("pkg", "statemonitor-"+resourceType).Errorf("cannot convert old to runtime.Object for update on %v", old) - } + if resourceConfig.Enabled && (len(resourceConfig.IncludeEvenTypes) == 0 || slices.Contains(resourceConfig.IncludeEvenTypes, "update")) { + var ok bool + newEvent.namespace = "" // namespace retrived in processItem incase namespace value is empty + newEvent.key, err = cache.MetaNamespaceKeyFunc(old) + newEvent.eventType = "update" + newEvent.resourceType = resourceType + newEvent.apiVersion = apiVersion + newEvent.obj, ok = new.(runtime.Object) + if !ok { + logrus.WithField("pkg", "statemonitor-"+resourceType).Errorf("cannot convert to runtime.Object for update on %v", new) + } + newEvent.oldObj, ok = old.(runtime.Object) + if !ok { + logrus.WithField("pkg", "statemonitor-"+resourceType).Errorf("cannot convert old to runtime.Object for update on %v", old) + } - if err != nil { - logrus.WithField("pkg", "statemonitor-"+resourceType).Errorf("cannot get key for update on %v", old) - return - } + if err != nil { + logrus.WithField("pkg", "statemonitor-"+resourceType).Errorf("cannot get key for update on %v", old) + return + } - if !slices.Contains(namespaces, strings.Split(newEvent.key, "/")[0]) { - logrus.Debugf("Skipping updating(namespaceconfig.ignore contains it) %v for %s", resourceType, newEvent.key) - return - } + if !slices.Contains(namespaces, strings.Split(newEvent.key, "/")[0]) { + logrus.Debugf("Skipping updating(namespaceconfig.ignore contains it) %v for %s", resourceType, newEvent.key) + return + } + + logrus.WithField("pkg", "statemonitor-"+resourceType).Infof("Processing update to %v: %s", resourceType, newEvent.key) - logrus.WithField("pkg", "statemonitor-"+resourceType).Infof("Processing update to %v: %s", resourceType, newEvent.key) - queue.Add(newEvent) + eventWrapper.Event = newEvent + queue.Add(eventWrapper) + } else { + logrus.Debugf("Skipping UPDATE (resource not enabled) %v for %s and is enabled - %t", resourceType, newEvent.key, resourceConfig.Enabled) + } }, DeleteFunc: func(obj interface{}) { - var ok bool - newEvent.namespace = "" // namespace retrived in processItem incase namespace value is empty - newEvent.key, err = cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - newEvent.eventType = "delete" - newEvent.resourceType = resourceType - newEvent.apiVersion = apiVersion - newEvent.obj, ok = obj.(runtime.Object) - if !ok { - logrus.WithField("pkg", "statemonitor-"+resourceType).Errorf("cannot convert to runtime.Object for delete on %v", obj) - } + if resourceConfig.Enabled && (len(resourceConfig.IncludeEvenTypes) == 0 || slices.Contains(resourceConfig.IncludeEvenTypes, "delete")) { + var ok bool + newEvent.namespace = "" // namespace retrived in processItem incase namespace value is empty + newEvent.key, err = cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + newEvent.eventType = "delete" + newEvent.resourceType = resourceType + newEvent.apiVersion = apiVersion + newEvent.obj, ok = obj.(runtime.Object) + if !ok { + logrus.WithField("pkg", "statemonitor-"+resourceType).Errorf("cannot convert to runtime.Object for delete on %v", obj) + } - if err != nil { - logrus.WithField("pkg", "statemonitor-"+resourceType).Errorf("cannot get key for delete on %v", obj) - return - } + if err != nil { + logrus.WithField("pkg", "statemonitor-"+resourceType).Errorf("cannot get key for delete on %v", obj) + return + } - if !slices.Contains(namespaces, strings.Split(newEvent.key, "/")[0]) { - logrus.Debugf("Skipping deletion (namespaceconfig.ignore contains it) %v for %s", resourceType, newEvent.key) - return - } + if !slices.Contains(namespaces, strings.Split(newEvent.key, "/")[0]) { + logrus.Debugf("Skipping deletion (namespaceconfig.ignore contains it) %v for %s", resourceType, newEvent.key) + return + } - logrus.WithField("pkg", "statemonitor-"+resourceType).Infof("Processing delete to %v: %s", resourceType, newEvent.key) - queue.Add(newEvent) + logrus.WithField("pkg", "statemonitor-"+resourceType).Infof("Processing delete to %v: %s", resourceType, newEvent.key) + eventWrapper.Event = newEvent + queue.Add(eventWrapper) + } else { + logrus.Debugf("Skipping deletion (resource not enabled) %v for %s and is enabled - %t", resourceType, newEvent.key, resourceConfig.Enabled) + } }, }) return &Controller{ - logger: logrus.WithField("pkg", "statemonitor-"+resourceType), + logger: logrus.WithField("pkg", resourceType+"-statemonitor"), clientset: client, informer: informer, queue: queue, @@ -669,7 +700,7 @@ func (c *Controller) processNextItem() bool { return false } defer c.queue.Done(newEvent) - err := c.processItem(newEvent.(Event)) + err := c.processItem(newEvent.(EventWrapper)) if err == nil { // No error, reset the ratelimit counters c.queue.Forget(newEvent) @@ -692,8 +723,9 @@ func (c *Controller) processNextItem() bool { - Send alerts correspoding to events - done */ -func (c *Controller) processItem(newEvent Event) error { +func (c *Controller) processItem(eventWrapper EventWrapper) error { // NOTE that obj will be nil on deletes! + newEvent := eventWrapper.Event obj, _, err := c.informer.GetIndexer().GetByKey(newEvent.key) if err != nil { @@ -713,7 +745,11 @@ func (c *Controller) processItem(newEvent Event) error { } else { newEvent.namespace = objectMeta.Namespace } - + //check if deployment is in process + if ttlList.Contains(newEvent.namespace) { + logrus.Warnf("Deployment is in process for %v timeleft %v", newEvent.namespace, ttlList.GetTTL(newEvent.namespace).String()) + return nil + } // process events based on its type switch newEvent.eventType { case "create": @@ -763,7 +799,7 @@ func (c *Controller) processItem(newEvent Event) error { ApiVersion: newEvent.apiVersion, Status: status, Reason: "Updated", - Diff: compareObjects(newEvent), + Diff: compareObjects(eventWrapper), } if kbEvent.Diff == "" { @@ -797,9 +833,11 @@ func (c *Controller) processItem(newEvent Event) error { } // compareObjects compares two objects and returns the diff -func compareObjects(e Event) string { +func compareObjects(ew EventWrapper) string { var patch jsondiff.Patch var err error + ignorePath := append(confDiff.IgnorePath, ew.ResourceConfig.IgnorePath...) + e := ew.Event oldObjj := e.oldObj objj := e.obj @@ -808,7 +846,7 @@ func compareObjects(e Event) string { } if patch == nil || err != nil { - patch, err = jsondiff.Compare(oldObjj, objj, jsondiff.Ignores(confDiff.IgnorePath...)) + patch, err = jsondiff.Compare(oldObjj, objj, jsondiff.Ignores(ignorePath...)) } //jsondiff.CompareJSON(source, target) @@ -888,6 +926,7 @@ func getNamespaces(clientset kubernetes.Interface, namespacesConfig *config.Name logrus.Infof("Namespaces to watch %v", namespaces) return namespaces } + func handleMetric(newEvent Event) { mu.Lock() defer mu.Unlock() diff --git a/pkg/utils/ttlList.go b/pkg/utils/ttlList.go new file mode 100644 index 00000000..9a78f4bb --- /dev/null +++ b/pkg/utils/ttlList.go @@ -0,0 +1,142 @@ +package utils + +import ( + "context" + "fmt" + "strings" + "sync" + "time" +) + +// ItemSlice is a type alias for a slice of Items +type ItemSlice []Item + +// Contains checks if a value exists in the slice. +func (items ItemSlice) ExtendIfExists(value string, ttl time.Duration) bool { + for _, item := range items { + if item.Value == value { + if ttl > 0 { + item.ExpiresAt = time.Now().Add(ttl) + } + return true + } + } + return false +} + +// TTLList represents a list with TTL for each item. +type TTLList struct { + mu sync.Mutex + items ItemSlice +} + +// Item represents an item in the list with a TTL. +type Item struct { + Value string + ExpiresAt time.Time +} + +// NewTTLList creates a new TTLList. +func NewTTLList() *TTLList { + l := &TTLList{} + go l.cleanupLoop() + return l +} +func (l *TTLList) GetTTL(namespace string) time.Duration { + //return the item with the namespace + for _, item := range l.items { + if item.Value == namespace { + + return time.Until(item.ExpiresAt) + } + } + + return -1 +} + +// Add adds a new item to the list with a specified TTL. +func (l *TTLList) Add(value string, ttl time.Duration) error { + l.mu.Lock() + defer l.mu.Unlock() + lowercaseValue := strings.ToLower(value) + // Check if value already exists in the list + if l.items.ExtendIfExists(lowercaseValue, ttl) { + return nil + } + + l.items = append(l.items, Item{ + Value: lowercaseValue, + ExpiresAt: time.Now().Add(ttl), + }) + return nil +} + +// Contains checks if a value exists in the list. +func (l *TTLList) Contains(value string) bool { + return l.items.ExtendIfExists(strings.ToLower(value), 0) +} + +// REMOVE ALL +func (l *TTLList) Reset() { + l.mu.Lock() + defer l.mu.Unlock() + l.items = ItemSlice{} +} + +// Remove removes items from the list based on a matching value. +func (l *TTLList) Remove(value string) { + l.mu.Lock() + defer l.mu.Unlock() + + var remainingItems ItemSlice + for _, item := range l.items { + if item.Value != value { + remainingItems = append(remainingItems, item) + } + } + l.items = remainingItems +} + +// cleanupLoop runs periodically to remove expired items. +func (l *TTLList) cleanupLoop() { + for { + time.Sleep(2 * time.Second) // Cleanup interval + l.removeExpired() + } +} + +// removeExpired removes expired items from the list. +func (l *TTLList) removeExpired() { + l.mu.Lock() + defer l.mu.Unlock() + + currentTime := time.Now() + var validItems []Item + for _, item := range l.items { + if item.ExpiresAt.After(currentTime) { + validItems = append(validItems, item) + } + } + l.items = validItems +} + +type httpInput struct { + Namespace string `json:"namespace"` + ExpiresAfter int8 `json:"expires_after_minutes"` +} + +func StartUpdateRoutine(ctx context.Context, resource *TTLList, updateCh <-chan httpInput) { + go func() { + for { + select { + case <-ctx.Done(): + return + case value := <-updateCh: + + if err := resource.Add(value.Namespace, time.Duration(value.ExpiresAfter)*time.Minute); err != nil { + fmt.Printf("Error setting value: %v\n", err) + } + } + } + }() +} diff --git a/pkg/utils/ttlList_test.go b/pkg/utils/ttlList_test.go new file mode 100644 index 00000000..76f83e37 --- /dev/null +++ b/pkg/utils/ttlList_test.go @@ -0,0 +1,93 @@ +package utils + +import ( + "testing" + "time" +) + +func TestTTLList_Add(t *testing.T) { + list := NewTTLList() + + // Add an item with a TTL of 1 second + list.Add("item1", time.Second) + + // Check if the item exists in the list + if !list.Contains("item1") { + t.Errorf("Expected item1 to exist in the list") + } + + // Wait for the item to expire + time.Sleep(time.Second) + + // Check if the item has been removed from the list + if list.Contains("item1") { + t.Errorf("Expected item1 to be removed from the list") + } +} + +func TestTTLList_Remove(t *testing.T) { + list := NewTTLList() + + // Add an item to the list + list.Add("item1", time.Minute) + + // Check if the item exists in the list + if !list.Contains("item1") { + t.Errorf("Expected item1 to exist in the list") + } + + // Remove the item from the list + list.Remove("item1") + + // Check if the item has been removed + if list.Contains("item1") { + t.Errorf("Expected item1 to be removed from the list") + } +} + +func TestTTLList_Contains(t *testing.T) { + list := NewTTLList() + + // Add an item to the list + list.Add("item1", time.Minute) + + // Check if the item exists in the list + if !list.Contains("item1") { + t.Errorf("Expected item1 to exist in the list") + } + + // Check if a non-existing item exists in the list + if list.Contains("item2") { + t.Errorf("Expected item2 to not exist in the list") + } +} + +func TestTTLList_Extend(t *testing.T) { + list := NewTTLList() + + // Add an item with a TTL of 5 second + list.Add("item1", time.Second) + + // Check if the item exists in the list + if !list.Contains("item1") { + t.Errorf("Expected item1 to exist in the list") + } + // Wait for the item to almost expire + time.Sleep(50 * time.Millisecond) + + // Extend the TTL of the item with 2 seconds + list.Add("item1", time.Second) + // Wait for the item to expire + time.Sleep(time.Second) + //at this time first timer should have expired and second timer should be running + if !list.Contains("item1") { + t.Errorf("Expected item1 to exist in the list") + } + // Wait for the item to expire + time.Sleep(time.Second) + + // Check if the item has been removed from the list + if list.Contains("item1") { + t.Errorf("Expected item1 to be removed from the list") + } +}