From 17965ede8957c55df7eda322e05682424858a10c Mon Sep 17 00:00:00 2001 From: Alberto Gutierrez Date: Tue, 10 Dec 2024 16:20:17 +0100 Subject: [PATCH] Remove kiali 1.57 out support (#844) * Remove kiali 1.57 out support * Remove 2.1 role * Remove 2.1 from ossmc playbook --- .../kiali.clusterserviceversion.yaml | 12 +- playbooks/kiali-default-supported-images.yml | 2 - .../ossmconsole-default-supported-images.yml | 1 - roles/v1.57/kiali-deploy/defaults/main.yml | 338 ------- .../only_accessible_namespaces.py | 34 - .../kiali-deploy/filter_plugins/stripnone.py | 28 - roles/v1.57/kiali-deploy/meta/main.yml | 2 - .../tasks/kubernetes/k8s-main.yml | 86 -- roles/v1.57/kiali-deploy/tasks/main.yml | 912 ------------------ .../openshift/os-get-kiali-route-url.yml | 48 - .../kiali-deploy/tasks/openshift/os-main.yml | 131 --- .../kiali-deploy/tasks/process-resource.yml | 15 - .../tasks/remove-clusterroles.yml | 24 - .../v1.57/kiali-deploy/tasks/remove-roles.yml | 27 - .../kiali-deploy/tasks/snake_camel_case.yaml | 178 ---- .../tasks/update-status-progress.yml | 16 - .../kiali-deploy/tasks/update-status.yml | 8 - .../templates/kubernetes/configmap.yaml | 13 - .../templates/kubernetes/deployment.yaml | 190 ---- .../templates/kubernetes/hpa.yaml | 14 - .../templates/kubernetes/ingress.yaml | 43 - .../kubernetes/role-controlplane.yaml | 27 - .../templates/kubernetes/role-viewer.yaml | 71 -- .../templates/kubernetes/role.yaml | 77 -- .../kubernetes/rolebinding-controlplane.yaml | 14 - .../templates/kubernetes/rolebinding.yaml | 17 - .../templates/kubernetes/service.yaml | 34 - .../templates/kubernetes/serviceaccount.yaml | 6 - .../templates/openshift/cabundle.yaml | 8 - .../templates/openshift/configmap.yaml | 13 - .../templates/openshift/console-links.yaml | 16 - .../templates/openshift/deployment.yaml | 198 ---- .../kiali-deploy/templates/openshift/hpa.yaml | 14 - .../templates/openshift/oauth.yaml | 17 - .../openshift/role-controlplane.yaml | 27 - .../templates/openshift/role-viewer.yaml | 88 -- .../templates/openshift/role.yaml | 95 -- .../openshift/rolebinding-controlplane.yaml | 14 - .../templates/openshift/rolebinding.yaml | 17 - .../templates/openshift/route.yaml | 22 - .../templates/openshift/service.yaml | 32 - .../templates/openshift/serviceaccount.yaml | 6 - roles/v1.57/kiali-deploy/vars/main.yml | 110 --- roles/v1.57/kiali-remove/defaults/main.yml | 12 - .../kiali-remove/filter_plugins/stripnone.py | 28 - roles/v1.57/kiali-remove/meta/main.yml | 2 - roles/v1.57/kiali-remove/tasks/main.yml | 290 ------ .../tasks/remove-clusterroles.yml | 24 - roles/v1.57/kiali-remove/vars/main.yml | 9 - roles/v2.1/kiali-deploy/defaults/main.yml | 341 ------- .../filter_plugins/parse_selectors.py | 110 --- .../kiali-deploy/filter_plugins/stripnone.py | 28 - roles/v2.1/kiali-deploy/meta/main.yml | 2 - .../tasks/clusterroles-to-remove.yml | 25 - .../get-discovery-selector-namespaces.yml | 65 -- .../tasks/kubernetes/k8s-main.yml | 61 -- roles/v2.1/kiali-deploy/tasks/main.yml | 904 ----------------- .../openshift/os-get-kiali-route-url.yml | 48 - .../kiali-deploy/tasks/openshift/os-main.yml | 120 --- .../kiali-deploy/tasks/process-resource.yml | 31 - .../tasks/remove-clusterroles.yml | 9 - .../v2.1/kiali-deploy/tasks/remove-roles.yml | 27 - .../kiali-deploy/tasks/snake_camel_case.yaml | 152 --- .../tasks/update-status-progress.yml | 16 - .../v2.1/kiali-deploy/tasks/update-status.yml | 8 - .../templates/kubernetes/configmap.yaml | 13 - .../templates/kubernetes/deployment.yaml | 220 ----- .../templates/kubernetes/hpa.yaml | 14 - .../templates/kubernetes/ingress.yaml | 43 - .../templates/kubernetes/role-viewer.yaml | 78 -- .../templates/kubernetes/role.yaml | 84 -- .../templates/kubernetes/rolebinding.yaml | 17 - .../templates/kubernetes/service.yaml | 43 - .../templates/kubernetes/serviceaccount.yaml | 6 - .../templates/openshift/cabundle.yaml | 8 - .../openshift/clusterrole-oauth.yaml | 13 - .../openshift/clusterrolebinding-oauth.yaml | 13 - .../templates/openshift/configmap.yaml | 13 - .../templates/openshift/console-links.yaml | 16 - .../templates/openshift/deployment.yaml | 228 ----- .../kiali-deploy/templates/openshift/hpa.yaml | 14 - .../templates/openshift/oauth.yaml | 17 - .../templates/openshift/role-viewer.yaml | 95 -- .../templates/openshift/role.yaml | 102 -- .../templates/openshift/rolebinding.yaml | 17 - .../templates/openshift/route.yaml | 22 - .../templates/openshift/service.yaml | 41 - .../templates/openshift/serviceaccount.yaml | 6 - roles/v2.1/kiali-deploy/vars/main.yml | 116 --- roles/v2.1/kiali-remove/defaults/main.yml | 11 - .../kiali-remove/filter_plugins/stripnone.py | 28 - roles/v2.1/kiali-remove/meta/main.yml | 2 - .../tasks/clusterroles-to-remove.yml | 25 - roles/v2.1/kiali-remove/tasks/main.yml | 240 ----- .../tasks/os-resources-to-remove.yml | 17 - .../tasks/resources-to-remove.yml | 67 -- roles/v2.1/kiali-remove/vars/main.yml | 9 - .../v2.1/ossmconsole-deploy/defaults/main.yml | 26 - .../filter_plugins/stripnone.py | 28 - roles/v2.1/ossmconsole-deploy/meta/main.yml | 2 - roles/v2.1/ossmconsole-deploy/tasks/main.yml | 413 -------- .../tasks/openshift/os-main.yml | 13 - .../tasks/process-resource.yml | 31 - .../tasks/update-status-progress.yml | 16 - .../tasks/update-status.yml | 8 - .../templates/openshift/configmap-nginx.yaml | 28 - .../templates/openshift/configmap-plugin.yaml | 13 - .../templates/openshift/consoleplugin.yaml | 23 - .../templates/openshift/deployment.yaml | 71 -- .../templates/openshift/service.yaml | 19 - roles/v2.1/ossmconsole-deploy/vars/main.yml | 30 - .../v2.1/ossmconsole-remove/defaults/main.yml | 3 - .../filter_plugins/stripnone.py | 28 - roles/v2.1/ossmconsole-remove/meta/main.yml | 2 - roles/v2.1/ossmconsole-remove/tasks/main.yml | 72 -- .../tasks/resources-to-remove.yml | 29 - roles/v2.1/ossmconsole-remove/vars/main.yml | 7 - 117 files changed, 3 insertions(+), 7854 deletions(-) delete mode 100644 roles/v1.57/kiali-deploy/defaults/main.yml delete mode 100644 roles/v1.57/kiali-deploy/filter_plugins/only_accessible_namespaces.py delete mode 100644 roles/v1.57/kiali-deploy/filter_plugins/stripnone.py delete mode 100644 roles/v1.57/kiali-deploy/meta/main.yml delete mode 100644 roles/v1.57/kiali-deploy/tasks/kubernetes/k8s-main.yml delete mode 100644 roles/v1.57/kiali-deploy/tasks/main.yml delete mode 100644 roles/v1.57/kiali-deploy/tasks/openshift/os-get-kiali-route-url.yml delete mode 100644 roles/v1.57/kiali-deploy/tasks/openshift/os-main.yml delete mode 100644 roles/v1.57/kiali-deploy/tasks/process-resource.yml delete mode 100644 roles/v1.57/kiali-deploy/tasks/remove-clusterroles.yml delete mode 100644 roles/v1.57/kiali-deploy/tasks/remove-roles.yml delete mode 100644 roles/v1.57/kiali-deploy/tasks/snake_camel_case.yaml delete mode 100644 roles/v1.57/kiali-deploy/tasks/update-status-progress.yml delete mode 100644 roles/v1.57/kiali-deploy/tasks/update-status.yml delete mode 100644 roles/v1.57/kiali-deploy/templates/kubernetes/configmap.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/kubernetes/deployment.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/kubernetes/hpa.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/kubernetes/ingress.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/kubernetes/role-controlplane.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/kubernetes/role-viewer.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/kubernetes/role.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/kubernetes/rolebinding-controlplane.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/kubernetes/rolebinding.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/kubernetes/service.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/kubernetes/serviceaccount.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/openshift/cabundle.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/openshift/configmap.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/openshift/console-links.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/openshift/deployment.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/openshift/hpa.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/openshift/oauth.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/openshift/role-controlplane.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/openshift/role-viewer.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/openshift/role.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/openshift/rolebinding-controlplane.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/openshift/rolebinding.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/openshift/route.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/openshift/service.yaml delete mode 100644 roles/v1.57/kiali-deploy/templates/openshift/serviceaccount.yaml delete mode 100644 roles/v1.57/kiali-deploy/vars/main.yml delete mode 100644 roles/v1.57/kiali-remove/defaults/main.yml delete mode 100644 roles/v1.57/kiali-remove/filter_plugins/stripnone.py delete mode 100644 roles/v1.57/kiali-remove/meta/main.yml delete mode 100644 roles/v1.57/kiali-remove/tasks/main.yml delete mode 100644 roles/v1.57/kiali-remove/tasks/remove-clusterroles.yml delete mode 100644 roles/v1.57/kiali-remove/vars/main.yml delete mode 100644 roles/v2.1/kiali-deploy/defaults/main.yml delete mode 100644 roles/v2.1/kiali-deploy/filter_plugins/parse_selectors.py delete mode 100644 roles/v2.1/kiali-deploy/filter_plugins/stripnone.py delete mode 100644 roles/v2.1/kiali-deploy/meta/main.yml delete mode 100644 roles/v2.1/kiali-deploy/tasks/clusterroles-to-remove.yml delete mode 100644 roles/v2.1/kiali-deploy/tasks/get-discovery-selector-namespaces.yml delete mode 100644 roles/v2.1/kiali-deploy/tasks/kubernetes/k8s-main.yml delete mode 100644 roles/v2.1/kiali-deploy/tasks/main.yml delete mode 100644 roles/v2.1/kiali-deploy/tasks/openshift/os-get-kiali-route-url.yml delete mode 100644 roles/v2.1/kiali-deploy/tasks/openshift/os-main.yml delete mode 100644 roles/v2.1/kiali-deploy/tasks/process-resource.yml delete mode 100644 roles/v2.1/kiali-deploy/tasks/remove-clusterroles.yml delete mode 100644 roles/v2.1/kiali-deploy/tasks/remove-roles.yml delete mode 100644 roles/v2.1/kiali-deploy/tasks/snake_camel_case.yaml delete mode 100644 roles/v2.1/kiali-deploy/tasks/update-status-progress.yml delete mode 100644 roles/v2.1/kiali-deploy/tasks/update-status.yml delete mode 100644 roles/v2.1/kiali-deploy/templates/kubernetes/configmap.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/kubernetes/deployment.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/kubernetes/hpa.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/kubernetes/ingress.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/kubernetes/role-viewer.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/kubernetes/role.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/kubernetes/rolebinding.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/kubernetes/service.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/kubernetes/serviceaccount.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/openshift/cabundle.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/openshift/clusterrole-oauth.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/openshift/clusterrolebinding-oauth.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/openshift/configmap.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/openshift/console-links.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/openshift/deployment.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/openshift/hpa.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/openshift/oauth.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/openshift/role-viewer.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/openshift/role.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/openshift/rolebinding.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/openshift/route.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/openshift/service.yaml delete mode 100644 roles/v2.1/kiali-deploy/templates/openshift/serviceaccount.yaml delete mode 100644 roles/v2.1/kiali-deploy/vars/main.yml delete mode 100644 roles/v2.1/kiali-remove/defaults/main.yml delete mode 100644 roles/v2.1/kiali-remove/filter_plugins/stripnone.py delete mode 100644 roles/v2.1/kiali-remove/meta/main.yml delete mode 100644 roles/v2.1/kiali-remove/tasks/clusterroles-to-remove.yml delete mode 100644 roles/v2.1/kiali-remove/tasks/main.yml delete mode 100644 roles/v2.1/kiali-remove/tasks/os-resources-to-remove.yml delete mode 100644 roles/v2.1/kiali-remove/tasks/resources-to-remove.yml delete mode 100644 roles/v2.1/kiali-remove/vars/main.yml delete mode 100644 roles/v2.1/ossmconsole-deploy/defaults/main.yml delete mode 100644 roles/v2.1/ossmconsole-deploy/filter_plugins/stripnone.py delete mode 100644 roles/v2.1/ossmconsole-deploy/meta/main.yml delete mode 100644 roles/v2.1/ossmconsole-deploy/tasks/main.yml delete mode 100644 roles/v2.1/ossmconsole-deploy/tasks/openshift/os-main.yml delete mode 100644 roles/v2.1/ossmconsole-deploy/tasks/process-resource.yml delete mode 100644 roles/v2.1/ossmconsole-deploy/tasks/update-status-progress.yml delete mode 100644 roles/v2.1/ossmconsole-deploy/tasks/update-status.yml delete mode 100644 roles/v2.1/ossmconsole-deploy/templates/openshift/configmap-nginx.yaml delete mode 100644 roles/v2.1/ossmconsole-deploy/templates/openshift/configmap-plugin.yaml delete mode 100644 roles/v2.1/ossmconsole-deploy/templates/openshift/consoleplugin.yaml delete mode 100644 roles/v2.1/ossmconsole-deploy/templates/openshift/deployment.yaml delete mode 100644 roles/v2.1/ossmconsole-deploy/templates/openshift/service.yaml delete mode 100644 roles/v2.1/ossmconsole-deploy/vars/main.yml delete mode 100644 roles/v2.1/ossmconsole-remove/defaults/main.yml delete mode 100644 roles/v2.1/ossmconsole-remove/filter_plugins/stripnone.py delete mode 100644 roles/v2.1/ossmconsole-remove/meta/main.yml delete mode 100644 roles/v2.1/ossmconsole-remove/tasks/main.yml delete mode 100644 roles/v2.1/ossmconsole-remove/tasks/resources-to-remove.yml delete mode 100644 roles/v2.1/ossmconsole-remove/vars/main.yml diff --git a/manifests/kiali-ossm/manifests/kiali.clusterserviceversion.yaml b/manifests/kiali-ossm/manifests/kiali.clusterserviceversion.yaml index 630723491..90b6b2b24 100644 --- a/manifests/kiali-ossm/manifests/kiali.clusterserviceversion.yaml +++ b/manifests/kiali-ossm/manifests/kiali.clusterserviceversion.yaml @@ -334,19 +334,13 @@ spec: - name: WATCHES_FILE value: "watches-os.yaml" - name: RELATED_IMAGE_kiali_default - value: "${KIALI_2_1}" - - name: RELATED_IMAGE_kiali_v2_1 - value: "${KIALI_2_1}" + value: "${KIALI_1_73}" - name: RELATED_IMAGE_kiali_v1_73 value: "${KIALI_1_73}" - name: RELATED_IMAGE_kiali_v1_65 - value: "${KIALI_1_65}" - - name: RELATED_IMAGE_kiali_v1_57 - value: "${KIALI_1_57}" + value: "${KIALI_1_65}" - name: RELATED_IMAGE_ossmconsole_default - value: "${OSSMCONSOLE_2_1}" - - name: RELATED_IMAGE_ossmconsole_v2_1 - value: "${OSSMCONSOLE_2_1}" + value: "${OSSMCONSOLE_1_73}" - name: RELATED_IMAGE_ossmconsole_v1_73 value: "${OSSMCONSOLE_1_73}" ports: diff --git a/playbooks/kiali-default-supported-images.yml b/playbooks/kiali-default-supported-images.yml index 0d593ea7b..41450d819 100644 --- a/playbooks/kiali-default-supported-images.yml +++ b/playbooks/kiali-default-supported-images.yml @@ -1,5 +1,3 @@ default: {"image_name": "quay.io/kiali/kiali", "image_version": "operator_version"} -v1.57: {"image_name": "quay.io/kiali/kiali", "image_version": "v1.57"} v1.65: {"image_name": "quay.io/kiali/kiali", "image_version": "v1.65"} v1.73: {"image_name": "quay.io/kiali/kiali", "image_version": "v1.73"} -v2.1: {"image_name": "quay.io/kiali/kiali", "image_version": "v2.1"} diff --git a/playbooks/ossmconsole-default-supported-images.yml b/playbooks/ossmconsole-default-supported-images.yml index 46da7014c..61d37cf8a 100644 --- a/playbooks/ossmconsole-default-supported-images.yml +++ b/playbooks/ossmconsole-default-supported-images.yml @@ -1,3 +1,2 @@ default: {"imageName": "quay.io/kiali/ossmconsole", "imageVersion": "operator_version"} -v2.1: {"imageName": "quay.io/kiali/ossmconsole", "imageVersion": "v2.1"} v1.73: {"imageName": "quay.io/kiali/ossmconsole", "imageVersion": "v1.73"} diff --git a/roles/v1.57/kiali-deploy/defaults/main.yml b/roles/v1.57/kiali-deploy/defaults/main.yml deleted file mode 100644 index 954ad79b2..000000000 --- a/roles/v1.57/kiali-deploy/defaults/main.yml +++ /dev/null @@ -1,338 +0,0 @@ -# Defaults for all user-facing Kiali settings. These are documented in kiali_cr.yaml. -# -# Note that these are under the main dictionary group "kiali_defaults". -# The actual vars used by the role are found in the vars/ directory. -# These defaults (the dictionaries under "kiali_defaults") are merged into the vars such that the values -# below (e.g. deployment, server, etc.) are merged in rather than completely replaced by user-supplied values. -# -# If new groups are added to these defaults, you must remember to add the merge code to vars/main.yml. - -kiali_defaults: - installation_tag: "" - istio_namespace: "" - version: "default" - - additional_display_details: - - title: "API Documentation" - annotation: "kiali.io/api-spec" - icon_annotation: "kiali.io/api-type" - - api: - namespaces: - exclude: - - "^istio-operator" - - "^kube-.*" - - "^openshift.*" - - "^ibm.*" - - "^kiali-operator" - #label_selector: - - auth: - openid: - additional_request_params: {} - allowed_domains: [] - api_proxy: "" - api_proxy_ca_data: "" - api_token: "id_token" - authentication_timeout: 300 - authorization_endpoint: "" - client_id: "" - disable_rbac: false - http_proxy: "" - https_proxy: "" - insecure_skip_verify_tls: false - issuer_uri: "" - scopes: ["openid", "profile", "email"] - username_claim: "sub" - openshift: - auth_timeout: 10 - client_id_prefix: "kiali" - #token_inactivity_timeout: - #token_max_age: - strategy: "" - - custom_dashboards: [] - - deployment: - accessible_namespaces: ["^((?!(istio-operator|kube-.*|openshift.*|ibm.*|kiali-operator)).)*$"] - #additional_service_yaml: - affinity: - node: {} - pod: {} - pod_anti: {} - configmap_annotations: {} - custom_secrets: [] - host_aliases: [] - hpa: - api_version: "" - spec: {} - image_digest: "" - image_name: "" - image_pull_policy: "IfNotPresent" - image_pull_secrets: [] - image_version: "" - ingress: - additional_labels: {} - class_name: "nginx" - #enabled: - #override_yaml: - instance_name: "kiali" - logger: - log_format: "text" - log_level: "info" - sampler_rate: "1" - time_field_format: "2006-01-02T15:04:05Z07:00" - namespace: "" - node_selector: {} - pod_annotations: {} - pod_labels: {} - priority_class_name: "" - replicas: 1 - #resources: - secret_name: "kiali" - security_context: {} - service_annotations: {} - #service_type: "NodePort" - tolerations: [] - version_label: "" - view_only_mode: false - - external_services: - custom_dashboards: - discovery_auto_threshold: 10 - discovery_enabled: "auto" - enabled: true - is_core: false - namespace_label: "" - prometheus: - auth: - ca_file: "" - insecure_skip_verify: false - password: "" - token: "" - type: "none" - use_kiali_token: false - username: "" - cache_duration: 7 - cache_enabled: true - cache_expiration: 300 - custom_headers: {} - health_check_url: "" - is_core: true - query_scope: {} - thanos_proxy: - enabled: false - retention_period: "7d" - scrape_interval: "30s" - url: "" - grafana: - auth: - ca_file: "" - insecure_skip_verify: false - password: "" - token: "" - type: "none" - use_kiali_token: false - username: "" - dashboards: - - name: "Istio Service Dashboard" - variables: - namespace: "var-namespace" - service: "var-service" - - name: "Istio Workload Dashboard" - variables: - namespace: "var-namespace" - workload: "var-workload" - - name: "Istio Mesh Dashboard" - - name: "Istio Control Plane Dashboard" - - name: "Istio Performance Dashboard" - - name: "Istio Wasm Extension Dashboard" - enabled: true - health_check_url: "" - #in_cluster_url - is_core: false - url: "" - istio: - component_status: - components: - - app_label: "istiod" - is_core: true - is_proxy: false - namespace: "" - - app_label: "istio-ingressgateway" - is_core: true - is_proxy: true - namespace: "" - - app_label: "istio-egressgateway" - is_core: false - is_proxy: true - namespace: "" - enabled: true - config_map_name: "istio" - envoy_admin_local_port: 15000 - #istio_canary_revision: - #current: prod - #upgrade: canary - istio_identity_domain: "svc.cluster.local" - istio_injection_annotation: "sidecar.istio.io/inject" - istio_sidecar_annotation: "sidecar.istio.io/status" - istio_sidecar_injector_config_map_name: "istio-sidecar-injector" - istiod_deployment_name: "istiod" - istiod_pod_monitoring_port: 15014 - root_namespace: "" - url_service_version: "" - prometheus: - auth: - ca_file: "" - insecure_skip_verify: false - password: "" - token: "" - type: "none" - use_kiali_token: false - username: "" - cache_duration: 7 - cache_enabled: true - cache_expiration: 300 - custom_headers: {} - health_check_url: "" - is_core: true - query_scope: {} - thanos_proxy: - enabled: false - retention_period: "7d" - scrape_interval: "30s" - url: "" - tracing: - auth: - ca_file: "" - insecure_skip_verify: false - password: "" - token: "" - type: "none" - use_kiali_token: false - username: "" - enabled: true - in_cluster_url: "" - is_core: false - namespace_selector: true - query_scope: {} - url: "" - use_grpc: true - whitelist_istio_system: ["jaeger-query", "istio-ingressgateway"] - - health_config: - rate: [] - - identity: {} - #cert_file: - #private_key_file: - - istio_labels: - app_label_name: "app" - injection_label_name: "istio-injection" - injection_label_rev: "istio.io/rev" - version_label_name: "version" - - kiali_feature_flags: - certificates_information_indicators: - enabled: true - secrets: - - cacerts - - istio-ca-secret - clustering: - enabled: true - disabled_features: [] - istio_injection_action: true - istio_upgrade_action: false - ui_defaults: - graph: - find_options: - - description: "Find: slow edges (> 1s)" - expression: "rt > 1000" - - description: "Find: unhealthy nodes" - expression: "! healthy" - - description: "Find: unknown nodes" - expression: "name = unknown" - - description: "Find: nodes with the 2 top rankings" - expression: "rank <= 2" - hide_options: - - description: "Hide: healthy nodes" - expression: "healthy" - - description: "Hide: unknown nodes" - expression: "name = unknown" - - description: "Hide: nodes ranked lower than the 2 top rankings" - expression: "rank > 2" - settings: - font_label: 13 - min_font_badge: 7 - min_font_label: 10 - traffic: - grpc: "requests" - http: "requests" - tcp: "sent" - metrics_inbound: - aggregations: [] - metrics_outbound: - aggregations: [] - metrics_per_refresh: "1m" - namespaces: [] - refresh_interval: "60s" - validations: - ignore: ["KIA1201"] - - kubernetes_config: - burst: 200 - cache_duration: 300 - cache_enabled: true - cache_istio_types: - - "AuthorizationPolicy" - - "DestinationRule" - - "EnvoyFilter" - - "Gateway" - - "PeerAuthentication" - - "RequestAuthentication" - - "ServiceEntry" - - "Sidecar" - - "VirtualService" - - "WorkloadEntry" - - "WorkloadGroup" - - cache_namespaces: - - ".*" - cache_token_namespace_duration: 10 - excluded_workloads: - - "CronJob" - - "DeploymentConfig" - - "Job" - - "ReplicationController" - qps: 175 - - login_token: - expiration_seconds: 86400 - signing_key: "" - - server: - address: "" - audit_log: true - cors_allow_all: false - gzip_enabled: true - observability: - metrics: - enabled: true - port: 9090 - tracing: - collector_url: http://jaeger-collector.istio-system:14268/api/traces - enabled: false - port: 20001 - web_fqdn: "" - web_history_mode: "" - web_port: "" - web_root: "" - web_schema: "" - -# These variables are outside of the kiali_defaults. Their values will be -# auto-detected by the role and are not meant to be set by the user. -# However, for debugging purposes you can change these. - -is_k8s: false -is_openshift: false diff --git a/roles/v1.57/kiali-deploy/filter_plugins/only_accessible_namespaces.py b/roles/v1.57/kiali-deploy/filter_plugins/only_accessible_namespaces.py deleted file mode 100644 index bd171810f..000000000 --- a/roles/v1.57/kiali-deploy/filter_plugins/only_accessible_namespaces.py +++ /dev/null @@ -1,34 +0,0 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} - -import re - -# Given a list of all known namespaces (value) and a list of accessible namespace regular expressions, -# filter out all non-accessible namespaces (i.e. return a list of only the namespaces that match an accessible namespace regex). -def only_accessible_namespaces(value, accessible_namespaces=[]): - - # cache the regex patterns for speed - accessible_namespace_regex_patterns = [] - for accessible_namespace_regex in accessible_namespaces: - accessible_namespace_regex_patterns.append(re.compile('^' + accessible_namespace_regex + '$')) - - all_accessible_namespaces = [] - for namespace in value: - for p in accessible_namespace_regex_patterns: - if re.match(p, namespace): - all_accessible_namespaces.append(namespace) - break - return all_accessible_namespaces - -# ---- Ansible filters ---- -class FilterModule(object): - def filters(self): - return { - 'only_accessible_namespaces': only_accessible_namespaces - } diff --git a/roles/v1.57/kiali-deploy/filter_plugins/stripnone.py b/roles/v1.57/kiali-deploy/filter_plugins/stripnone.py deleted file mode 100644 index 4dbd53033..000000000 --- a/roles/v1.57/kiali-deploy/filter_plugins/stripnone.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} - -# Process recursively the given value if it is a dict and remove all keys that have a None value -def strip_none(value): - if isinstance(value, dict): - dicts = {} - for k,v in value.items(): - if isinstance(v, dict): - dicts[k] = strip_none(v) - elif v is not None: - dicts[k] = v - return dicts - else: - return value - -# ---- Ansible filters ---- -class FilterModule(object): - def filters(self): - return { - 'stripnone': strip_none - } diff --git a/roles/v1.57/kiali-deploy/meta/main.yml b/roles/v1.57/kiali-deploy/meta/main.yml deleted file mode 100644 index e9334e3c7..000000000 --- a/roles/v1.57/kiali-deploy/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -collections: -- kubernetes.core diff --git a/roles/v1.57/kiali-deploy/tasks/kubernetes/k8s-main.yml b/roles/v1.57/kiali-deploy/tasks/kubernetes/k8s-main.yml deleted file mode 100644 index fcccfbd79..000000000 --- a/roles/v1.57/kiali-deploy/tasks/kubernetes/k8s-main.yml +++ /dev/null @@ -1,86 +0,0 @@ -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Creating core resources" - when: - - is_k8s == True - -- name: Create Kiali objects on Kubernetes - include_tasks: process-resource.yml - vars: - process_resource_cluster: "kubernetes" - role_namespaces: "{{ [ kiali_vars.deployment.namespace ] }}" - loop: - - serviceaccount - - configmap - - "{{ 'role-viewer' if kiali_vars.deployment.view_only_mode|bool == True else 'role' }}" - - role-controlplane - - rolebinding - - rolebinding-controlplane - - deployment - - service - - "{{ 'hpa' if kiali_vars.deployment.hpa.spec | length > 0 else '' }}" - loop_control: - loop_var: process_resource_item - when: - - is_k8s == True - - process_resource_item != '' - -- name: Remove HPA if disabled on Kubernetes - k8s: - state: absent - api_version: "{{ kiali_vars.deployment.hpa.api_version }}" - kind: "HorizontalPodAutoscaler" - namespace: "{{ kiali_vars.deployment.namespace }}" - name: "{{ kiali_vars.deployment.instance_name }}" - when: - - is_k8s == True - - kiali_vars.deployment.hpa.spec | length == 0 - -- name: Create Ingress on Kubernetes if enabled - include_tasks: process-resource.yml - vars: - process_resource_cluster: "kubernetes" - role_namespace: "{{ kiali_vars.deployment.namespace }}" - loop: - - ingress - loop_control: - loop_var: process_resource_item - when: - - is_k8s == True - - kiali_vars.deployment.ingress.enabled|bool == True - -- name: Delete Ingress on Kubernetes if disabled - k8s: - state: absent - api_version: "networking.k8s.io/{{ 'v1' if (lookup(k8s_plugin, kind='Ingress', api_version='networking.k8s.io/v1', errors='ignore') is iterable) else 'v1beta1' }}" - kind: "Ingress" - namespace: "{{ kiali_vars.deployment.namespace }}" - name: "{{ kiali_vars.deployment.instance_name }}" - when: - - is_k8s == True - - kiali_vars.deployment.ingress.enabled|bool == False - -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Creating additional roles" - when: - - is_k8s == True - - '"**" not in kiali_vars.deployment.accessible_namespaces' - -- name: Create additional Kiali roles on all accessible namespaces on Kubernetes - vars: - role_namespaces: "{{ kiali_vars.deployment.accessible_namespaces }}" - k8s: - definition: "{{ lookup('template', 'templates/kubernetes/' + ('role-viewer' if kiali_vars.deployment.view_only_mode|bool == True else 'role') + '.yaml') }}" - when: - - is_k8s == True - - '"**" not in kiali_vars.deployment.accessible_namespaces' - -- name: Create additional Kiali role bindings on all accessible namespaces on Kubernetes - vars: - role_namespaces: "{{ kiali_vars.deployment.accessible_namespaces }}" - k8s: - definition: "{{ lookup('template', 'templates/kubernetes/rolebinding.yaml') }}" - when: - - is_k8s == True - - '"**" not in kiali_vars.deployment.accessible_namespaces' diff --git a/roles/v1.57/kiali-deploy/tasks/main.yml b/roles/v1.57/kiali-deploy/tasks/main.yml deleted file mode 100644 index d04239a21..000000000 --- a/roles/v1.57/kiali-deploy/tasks/main.yml +++ /dev/null @@ -1,912 +0,0 @@ -- set_fact: - k8s_plugin: kubernetes.core.k8s - -- name: Get the original CR as-is for the camelCase keys and so we can update its status field - set_fact: - current_cr: "{{ _kiali_io_kiali }}" - -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Initializing" - status_vars: - specVersion: "{{ kiali_vars.version }}" - deployment: - accessibleNamespaces: null - -- name: Get information about the cluster - set_fact: - api_groups: "{{ lookup(k8s_plugin, cluster_info='api_groups') }}" - when: - - is_openshift == False - - is_k8s == False - -- name: Determine the cluster type - set_fact: - is_openshift: "{{ True if 'operator.openshift.io' in api_groups else False }}" - is_k8s: "{{ False if 'operator.openshift.io' in api_groups else True }}" - when: - - is_openshift == False - - is_k8s == False - -# Indicate what kind of cluster we are in (OpenShift or Kubernetes). -- debug: - msg: "CLUSTER TYPE: is_openshift={{ is_openshift }}; is_k8s={{ is_k8s }}" -- fail: - msg: "Cannot determine what type of cluster we are in" - when: - - is_openshift == False - - is_k8s == False - -- name: Determine the Kubernetes version - set_fact: - k8s_version: "{{ lookup(k8s_plugin, cluster_info='version').kubernetes.gitVersion | regex_replace('^v', '') }}" - ignore_errors: yes - -- name: Determine the OpenShift version - vars: - kube_apiserver_cluster_op_raw: "{{ lookup(k8s_plugin, api_version='config.openshift.io/v1', kind='ClusterOperator', resource_name='kube-apiserver') | default({}) }}" - ri_query: "status.versions[?name == 'raw-internal'].version" - set_fact: - openshift_version: "{{ kube_apiserver_cluster_op_raw | json_query(ri_query) | join }}" - when: - - is_openshift == True - -- name: Determine the Istio implementation - set_fact: - is_maistra: "{{ True if 'maistra.io' in api_groups else False }}" - -- name: Get information about the operator - k8s_info: - api_version: v1 - kind: Pod - namespace: "{{ lookup('env', 'POD_NAMESPACE') }}" - name: "{{ lookup('env', 'POD_NAME') }}" - register: operator_pod_raw - ignore_errors: yes -- name: Determine the version of the operator based on the version label - set_fact: - operator_version: "{{ operator_pod_raw.resources[0].metadata.labels.version }}" - when: - - operator_pod_raw is defined - - operator_pod_raw.resources[0] is defined - - operator_pod_raw.resources[0].metadata is defined - - operator_pod_raw.resources[0].metadata.labels is defined - - operator_pod_raw.resources[0].metadata.labels.version is defined -- set_fact: - operator_version: "unknown" - when: - - operator_version is not defined -- debug: - msg: "OPERATOR VERSION: [{{ operator_version }}]" - -# To remain backward compatible with some settings that have changed in later releases, -# let's take some deprecated settings and set the current settings appropriately. - -- name: deployment.ingress_enabled is deprecated but if deployment.ingress.enabled is not set then use the old setting - set_fact: - kiali_vars: | - {% set ie=kiali_vars['deployment'].pop('ingress_enabled') %} - {{ kiali_vars | combine({'deployment': {'ingress': {'enabled': ie|bool }}}, recursive=True) }} - when: - - kiali_vars.deployment.ingress_enabled is defined - - kiali_vars.deployment.ingress is not defined or kiali_vars.deployment.ingress.enabled is not defined - -# convert snake case to camelCase where appropriate -- include_tasks: snake_camel_case.yaml - -- name: Print some debug information - vars: - msg: | - Kiali Variables: - -------------------------------- - {{ kiali_vars | to_nice_yaml }} - debug: - msg: "{{ msg.split('\n') }}" - -- name: Set default deployment namespace to the same namespace where the CR lives - set_fact: - kiali_vars: "{{ kiali_vars | combine({'deployment': {'namespace': current_cr.metadata.namespace}}, recursive=True) }}" - when: - - kiali_vars.deployment.namespace is not defined or kiali_vars.deployment.namespace == "" - -# Never allow the deployment.instance_name or deployment.namespace to change to avoid leaking resources - to uninstall resources you must delete the Kiali CR -- name: Ensure the deployment.instance_name has not changed - fail: - msg: "The deployment.instance_name cannot be changed to a different value. It was [{{ current_cr.status.deployment.instanceName }}] but is now [{{ kiali_vars.deployment.instance_name }}]. In order to install Kiali with a different deployment.instance_name, please uninstall Kiali first." - when: - - current_cr.status is defined - - current_cr.status.deployment is defined - - current_cr.status.deployment.instanceName is defined - - current_cr.status.deployment.instanceName != kiali_vars.deployment.instance_name - -- name: Ensure the deployment.namespace has not changed - fail: - msg: "The deployment.namespace cannot be changed to a different value. It was [{{ current_cr.status.deployment.namespace }}] but is now [{{ kiali_vars.deployment.namespace }}]. In order to install Kiali with a different deployment.namespace, please uninstall Kiali first." - when: - - current_cr.status is defined - - current_cr.status.deployment is defined - - current_cr.status.deployment.namespace is defined - - current_cr.status.deployment.namespace != kiali_vars.deployment.namespace - -- name: Only allow ad-hoc kiali namespace when appropriate - fail: - msg: "The operator is forbidden from installing Kiali in a namespace [{{ kiali_vars.deployment.namespace }}] that is different from the namespace where the CR was created [{{ current_cr.metadata.namespace }}]" - when: - - kiali_vars.deployment.namespace != current_cr.metadata.namespace - - lookup('env', 'ALLOW_AD_HOC_KIALI_NAMESPACE') | default('false', True) != "true" - -- name: Make sure instance_name follows the DNS label standard because it will be a Service name - fail: - msg: "The value for deployment.instance_name [{{ kiali_vars.deployment.instance_name }}] does not follow the DNS label standard as defined in RFC 1123. In short, it must only contain lowercase alphanumeric characters or '-'." - when: - # regex must follow https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names - # restrict to 40 chars, not 63, because instance_name is a prefix and we need to prepend additional chars for some resource names (like "-service-account") - - kiali_vars.deployment.instance_name is not regex('^(?![0-9]+$)(?!-)[a-z0-9-]{,40}(? /dev/null | grep "tag_name" | sed -e 's/.*://' -e 's/ *"//' -e 's/",//' | grep -v "snapshot" | sort -t "." -k 1.2g,1 -k 2g,2 -k 3g | tail -n 1) - register: github_lastrelease - when: - - kiali_vars.deployment.image_version == "lastrelease" -- set_fact: - kiali_vars: "{{ kiali_vars | combine({'deployment': {'image_version': github_lastrelease.stdout}}, recursive=True) }}" - when: - - kiali_vars.deployment.image_version == "lastrelease" - -- name: Determine image version when it explicitly was configured as the operator_version - set_fact: - kiali_vars: "{{ kiali_vars | combine({'deployment': {'image_version': 'latest' if operator_version == 'master' else operator_version}}, recursive=True) }}" - when: - - kiali_vars.deployment.image_version == "operator_version" - -- fail: - msg: "Could not determine what the image version should be. Set deployment.image_version to a valid value" - when: - - kiali_vars.deployment.image_version == "" or kiali_vars.deployment.image_version == "unknown" - -- name: Determine version_label based on image_version - set_fact: - kiali_vars: "{{ kiali_vars | combine({'deployment': {'version_label': 'master' if kiali_vars.deployment.image_version == 'latest' else kiali_vars.deployment.image_version}}, recursive=True) }}" - when: - - kiali_vars.deployment.version_label == "" - -# Kubernetes limits the length of version label strings to 63 characters or less - make sure the label is valid. -- name: Trim version_label when appropriate - set_fact: - kiali_vars: "{{ kiali_vars | combine({'deployment': {'version_label': kiali_vars.deployment.version_label[:60] + 'XXX' }}, recursive=True) }}" - when: - - kiali_vars.deployment.version_label | length > 63 - -# Indicate which Kiali image we are going to use. -- debug: - msg: "IMAGE_NAME={{ kiali_vars.deployment.image_name }}; IMAGE VERSION={{ kiali_vars.deployment.image_version }}; VERSION LABEL={{ kiali_vars.deployment.version_label }}" - -- name: Determine what metadata labels to apply to all created resources - set_fact: - kiali_resource_metadata_labels: - app: kiali - version: "{{ kiali_vars.deployment.version_label }}" - app.kubernetes.io/name: kiali - app.kubernetes.io/version: "{{ kiali_vars.deployment.version_label }}" - app.kubernetes.io/instance: "{{ kiali_vars.deployment.instance_name }}" - app.kubernetes.io/part-of: kiali - -# Determine the accessible namespaces. The user may have specified names using regex expressions. -# We need to get a list of all namespaces and match them to the regex expressions. -# Note that we replace kiali_vars.deployment.accessible_namespaces with the full list of actual namespace names -# with regex expressions removed because when the CR changes, we need to know what namespaces were granted roles in -# case we need to revoke those roles (to do this, we need to know the exact names of the namespaces). -# This must be done before the next step which is figuring out what namespaces are no longer accessible and revoking their roles. -# If the user did not specify Kiali's own namespace in accessible_namespaces, it will be added to the list automatically. -# NOTE: there is a special value of accessible_namespaces - two asterisks ("**") means Kiali is to be given access to all -# namespaces via a single cluster role (as opposed to individual roles in each accessible namespace). - -- name: Determine the Role and RoleBinding kinds that the operator will create and that the role templates will use - set_fact: - role_kind: "{{ 'ClusterRole' if '**' in kiali_vars.deployment.accessible_namespaces else 'Role' }}" - role_binding_kind: "{{ 'ClusterRoleBinding' if '**' in kiali_vars.deployment.accessible_namespaces else 'RoleBinding' }}" - -- name: Determine if the operator can support accessible_namespaces=** - can_i create clusterroles - register: can_i_create_clusterroles - ignore_errors: yes - k8s: - state: present - definition: - apiVersion: authorization.k8s.io/v1 - kind: SelfSubjectAccessReview - spec: - resourceAttributes: - group: rbac.authorization.k8s.io - resource: clusterroles - verb: create - when: - - '"**" in kiali_vars.deployment.accessible_namespaces' - -- name: Determine if the operator can support accessible_namespaces=** - can_i create clusterrolebindings - register: can_i_create_clusterrolebindings - ignore_errors: yes - k8s: - state: present - definition: - apiVersion: authorization.k8s.io/v1 - kind: SelfSubjectAccessReview - spec: - resourceAttributes: - group: rbac.authorization.k8s.io - resource: clusterrolebindings - verb: create - when: - - '"**" in kiali_vars.deployment.accessible_namespaces' - -- fail: - msg: "The operator cannot support deployment.accessible_namespaces set to ['**'] because it does not have permissions to create clusterroles" - when: - - '"**" in kiali_vars.deployment.accessible_namespaces' - - can_i_create_clusterroles is defined - - can_i_create_clusterroles.result is defined - - can_i_create_clusterroles.result.status is defined - - can_i_create_clusterroles.result.status.allowed is defined - - can_i_create_clusterroles.result.status.allowed == False - -- fail: - msg: "The operator cannot support deployment.accessible_namespaces set to ['**'] because it does not have permissions to create clusterrolebindings" - when: - - '"**" in kiali_vars.deployment.accessible_namespaces' - - can_i_create_clusterrolebindings is defined - - can_i_create_clusterrolebindings.result is defined - - can_i_create_clusterrolebindings.result.status is defined - - can_i_create_clusterrolebindings.result.status.allowed is defined - - can_i_create_clusterrolebindings.result.status.allowed == False - -- name: Find all namespaces (this is limited to what the operator has permission to see) - set_fact: - all_namespaces: "{{ lookup(k8s_plugin, api_version='v1', kind='Namespace') | default({}) | json_query('[].metadata.name') }}" - -- name: Determine all accessible namespaces, expanding regex expressions to matched namespaces - set_fact: - all_accessible_namespaces: "{{ (all_namespaces | only_accessible_namespaces(accessible_namespaces=kiali_vars.deployment.accessible_namespaces) + [ kiali_vars.deployment.namespace, kiali_vars.istio_namespace, kiali_vars.external_services.istio.root_namespace ]) | unique | sort }}" - when: - - '"**" not in kiali_vars.deployment.accessible_namespaces' - -- name: If accessible namespaces list has the special all-namespaces indicator, remove all other namespaces from the list - set_fact: - all_accessible_namespaces: ["**"] - when: - - '"**" in kiali_vars.deployment.accessible_namespaces' - -- name: Set deployment.accessible_namespaces to a list of full namespace names - set_fact: - kiali_vars: "{{ kiali_vars | combine({'deployment': {'accessible_namespaces': all_accessible_namespaces }}, recursive=True) }}" - -- name: Listing of all accessible namespaces (includes regex matches) - debug: - msg: "{{ kiali_vars.deployment.accessible_namespaces }}" - -# do some security checks - abort if the operator is forbidden from allowing certain accessible_namespace values -- name: Abort if all namespace access is not allowed - fail: - msg: "The operator is forbidden from installing Kiali with deployment.accessible_namespaces set to ['**']" - when: - - '"**" in kiali_vars.deployment.accessible_namespaces' - - lookup('env', 'ALLOW_ALL_ACCESSIBLE_NAMESPACES') | default('false', True) != "true" - -- name: Get labeled accessible namespaces - vars: - label: "{{ lookup('env', 'ACCESSIBLE_NAMESPACES_LABEL') | default('', True) }}" - label_selector: "{{ label + ('' if label is regex('^.+=.+$') else ('=' + kiali_vars.istio_namespace)) }}" - set_fact: - only_allowed_labeled_namespaces: "{{ query(k8s_plugin, kind='Namespace', api_version='v1', label_selector=label_selector) | json_query('[*].metadata.name') }}" - when: - - '"**" not in kiali_vars.deployment.accessible_namespaces' - - label != "" - -- name: Abort if accessible namespaces contains namespaces not labeled - vars: - ns_diff: "{{ kiali_vars.deployment.accessible_namespaces | difference(only_allowed_labeled_namespaces) }}" - fail: - msg: "Operator is forbidden to allow Kiali CR to specify one or more accessible namespaces that were not labeled: {{ ('Number of rejected namespaces=' + (ns_diff | length | string)) if (ns_diff | length > 10) else (ns_diff) }}" - when: - - '"**" not in kiali_vars.deployment.accessible_namespaces' - - only_allowed_labeled_namespaces is defined - - ns_diff | length > 0 - -# Note that we add the instance name to the member-of key name only if the instance name is not the default 'kiali'. -# This is for backward compatibility, and for simplicity when deploying under normal default conditions. -- name: When accessible namespaces are specified, ensure label selector is set - set_fact: - kiali_vars: "{{ kiali_vars | combine({'api': {'namespaces': {'label_selector': ('kiali.io/' + ((kiali_vars.deployment.instance_name + '.') if kiali_vars.deployment.instance_name != 'kiali' else '') + 'member-of=' + kiali_vars.deployment.namespace)}}}, recursive=True) }}" - when: - - '"**" not in kiali_vars.deployment.accessible_namespaces' - - kiali_vars.api.namespaces.label_selector is not defined - -- name: Make sure label selector is in the valid format name=value - fail: - msg: "The api.namespaces.label_selector is not valid [{{ kiali_vars.api.namespaces.label_selector }}] - it must be in the form of 'name=value' following Kubernetes syntax rules for label names and values." - when: - - kiali_vars.api.namespaces.label_selector is defined - # this regex is not 100% accurate, but we want to at least catch obvious errors - - kiali_vars.api.namespaces.label_selector is not regex('^[a-zA-Z0-9/_.-]+=[a-zA-Z0-9_.-]+$') - -# If the signing key is not empty string, and is not of the special value secret:name:key, -# do some validation on it's length -- name: Validate signing key, if it is set in the CR - fail: - msg: "Signing key must be 16, 24 or 32 byte length" - when: - - kiali_vars.auth.strategy != 'anonymous' - - kiali_vars.login_token.signing_key != "" - - not(kiali_vars.login_token.signing_key | regex_search('secret:.+:.+')) - - kiali_vars.login_token.signing_key | length != 16 - - kiali_vars.login_token.signing_key | length != 24 - - kiali_vars.login_token.signing_key | length != 32 - -# If the signing key is empty string, we need to ensure a signing key secret exists. If one does not exist, we need to generate one. -# Note that to avoid granting to the operator the very powerful permission to CRUD all secrets in all namespaces, we always generate -# a signing key secret with the name "kiali-signing-key" regardless of the value of kiali_vars.deployment.instance_name. -# Thus, all Kiali instances will be using the same signing key secret name. If the user does not want this, they can generate their -# own secret with their own key (which is a smart thing to do anyway). The user tells the operator what the name of that secret -# signing key is via "login_token.signing_key" with value "secret::". - -- name: Get information about any existing signing key secret if we need to know if it exists or not - k8s_info: - api_version: v1 - kind: Secret - namespace: "{{ kiali_vars.deployment.namespace }}" - name: kiali-signing-key - register: signing_key_secret_raw - when: - - kiali_vars.login_token.signing_key == "" - -- name: Create kiali-signing-key secret to store a random signing key if a secret does not already exist and we need one - k8s: - state: present - definition: - apiVersion: v1 - kind: Secret - metadata: - namespace: "{{ kiali_vars.deployment.namespace }}" - name: kiali-signing-key - labels: "{{ kiali_resource_metadata_labels }}" - type: Opaque - data: - key: "{{ lookup('password', '/dev/null length=16 chars=ascii_letters,digits') | b64encode }}" - when: - - kiali_vars.login_token.signing_key == "" - - signing_key_secret_raw is defined - - signing_key_secret_raw.resources is defined - - signing_key_secret_raw.resources | length == 0 - -# Because we must use a fixed name for the secret, we need to attach a label to indicate this Kiali install will be using it. -# This allows multiple Kiali instances deployed in the same namespace to share the secret. This secret won't be removed -# as long as our label exists on the secret resource. -- name: Add label to kiali-signing-key secret to make it known this Kiali instance will be using it - vars: - the_label: "{{ 'kiali.io/' + ((kiali_vars.deployment.instance_name + '.') if kiali_vars.deployment.instance_name != 'kiali' else '') + 'member-of' }}" - k8s: - state: present - definition: | - apiVersion: v1 - kind: Secret - metadata: - namespace: "{{ kiali_vars.deployment.namespace }}" - name: kiali-signing-key - labels: - {{ the_label }}: {{ kiali_vars.deployment.namespace }} - when: - - kiali_vars.login_token.signing_key == "" - -- name: Point signing key to the generated secret - set_fact: - kiali_vars: "{{ kiali_vars | combine({'login_token': {'signing_key': 'secret:kiali-signing-key:key'}}, recursive=True) }}" - when: - - kiali_vars.login_token.signing_key == "" - -# Some credentials in the config can be overridden by secrets that are to be mounted on the file system. -# Prepare these overrides that need to be defined as volumes in the deployment. - -- set_fact: - kiali_deployment_secret_volumes: {} - -- name: Prepare the secret volume for prometheus password - set_fact: - kiali_deployment_secret_volumes: "{{ kiali_deployment_secret_volumes | combine({'prometheus-password': {'secret_name': kiali_vars.external_services.prometheus.auth.password | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.prometheus.auth.password | regex_replace('secret:.+:(.+)', '\\1') }}) }}" - when: - - kiali_vars.external_services.prometheus.auth.password | regex_search('secret:.+:.+') - -- name: Prepare the secret volume for prometheus token - set_fact: - kiali_deployment_secret_volumes: "{{ kiali_deployment_secret_volumes | combine({'prometheus-token': {'secret_name': kiali_vars.external_services.prometheus.auth.token | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.prometheus.auth.token | regex_replace('secret:.+:(.+)', '\\1') }}) }}" - when: - - kiali_vars.external_services.prometheus.auth.token | regex_search('secret:.+:.+') - -- name: Prepare the secret volume for tracing password - set_fact: - kiali_deployment_secret_volumes: "{{ kiali_deployment_secret_volumes | combine({'tracing-password': {'secret_name': kiali_vars.external_services.tracing.auth.password | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.tracing.auth.password | regex_replace('secret:.+:(.+)', '\\1') }}) }}" - when: - - kiali_vars.external_services.tracing.enabled|bool == True - - kiali_vars.external_services.tracing.auth.password | regex_search('secret:.+:.+') - -- name: Prepare the secret volume for tracing token - set_fact: - kiali_deployment_secret_volumes: "{{ kiali_deployment_secret_volumes | combine({'tracing-token': {'secret_name': kiali_vars.external_services.tracing.auth.token | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.tracing.auth.token | regex_replace('secret:.+:(.+)', '\\1') }}) }}" - when: - - kiali_vars.external_services.tracing.enabled|bool == True - - kiali_vars.external_services.tracing.auth.token | regex_search('secret:.+:.+') - -- name: Prepare the secret volume for grafana password - set_fact: - kiali_deployment_secret_volumes: "{{ kiali_deployment_secret_volumes | combine({'grafana-password': {'secret_name': kiali_vars.external_services.grafana.auth.password | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.grafana.auth.password | regex_replace('secret:.+:(.+)', '\\1') }}) }}" - when: - - kiali_vars.external_services.grafana.enabled|bool == True - - kiali_vars.external_services.grafana.auth.password | regex_search('secret:.+:.+') - -- name: Prepare the secret volume for grafana token - set_fact: - kiali_deployment_secret_volumes: "{{ kiali_deployment_secret_volumes | combine({'grafana-token': {'secret_name': kiali_vars.external_services.grafana.auth.token | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.grafana.auth.token | regex_replace('secret:.+:(.+)', '\\1') }}) }}" - when: - - kiali_vars.external_services.grafana.enabled|bool == True - - kiali_vars.external_services.grafana.auth.token | regex_search('secret:.+:.+') - -- name: Prepare the secret volume for login token signing key - set_fact: - kiali_deployment_secret_volumes: "{{ kiali_deployment_secret_volumes | combine({'login-token-signing-key': {'secret_name': kiali_vars.login_token.signing_key | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.login_token.signing_key | regex_replace('secret:.+:(.+)', '\\1') }}) }}" - when: - - kiali_vars.login_token.signing_key | regex_search('secret:.+:.+') - -# The following few tasks read the current Kiali configmap (if one exists) in order to figure out what -# namespaces are no longer accessible. Those namespaces will have their Kiali roles removed. -# They will also have the Kiali labels removed. - -- name: Find current configmap, if it exists - set_fact: - current_configmap: "{{ lookup(k8s_plugin, resource_name=kiali_vars.deployment.instance_name, namespace=kiali_vars.deployment.namespace, api_version='v1', kind='ConfigMap') }}" - -- name: Find some current configuration settings - set_fact: - current_accessible_namespaces: "{{ current_configmap.data['config.yaml'] | from_yaml | json_query('deployment.accessible_namespaces') }}" - current_label_selector: "{{ current_configmap.data['config.yaml'] | from_yaml | json_query('api.namespaces.label_selector') }}" - current_view_only_mode: "{{ current_configmap.data['config.yaml'] | from_yaml | json_query('deployment.view_only_mode') }}" - current_image_name: "{{ current_configmap.data['config.yaml'] | from_yaml | json_query('deployment.image_name') }}" - current_image_version: "{{ current_configmap.data['config.yaml'] | from_yaml | json_query('deployment.image_version') }}" - current_instance_name: "{{ current_configmap.data['config.yaml'] | from_yaml | json_query('deployment.instance_name') }}" - when: - - current_configmap is defined - - current_configmap.data is defined - - current_configmap.data['config.yaml'] is defined - -# Because we need to remove the labels that were created before, we must not allow the user to change -# the label_selector. So if the current accessible_namespaces is not ** but the label_select is being changed, -# we need to abort since we won't know what the old labels were. If current accessible_namespaces is ** then -# we know we didn't create labels before so we can allow label_selector to change. -- name: Do not allow user to change label selector - fail: - msg: "The api.namespaces.label_selector cannot be changed to a different value. It was [{{ current_label_selector }}] but is now configured to be [{{ kiali_vars.api.namespaces.label_selector }}]. In order to install Kiali with a different label selector than what was used before, please uninstall Kiali first." - when: - - current_accessible_namespaces is defined - - '"**" not in current_accessible_namespaces' - - current_label_selector is defined - - kiali_vars.api.namespaces.label_selector is defined - - current_label_selector != kiali_vars.api.namespaces.label_selector - -- name: Determine the namespaces that were previously accessible but are now inaccessible - set_fact: - no_longer_accessible_namespaces: "{{ current_accessible_namespaces | difference(kiali_vars.deployment.accessible_namespaces) }}" - when: - - current_accessible_namespaces is defined - - '"**" not in current_accessible_namespaces' - -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Deleting obsolete roles" - -- name: Delete all additional Kiali roles from namespaces that Kiali no longer has access to - include_tasks: remove-roles.yml - vars: - role_namespaces: "{{ no_longer_accessible_namespaces }}" - when: - - no_longer_accessible_namespaces is defined - -- name: Delete Kiali cluster roles if no longer given special access to all namespaces - include_tasks: remove-clusterroles.yml - when: - - current_accessible_namespaces is defined - - '"**" in current_accessible_namespaces' - - '"**" not in kiali_vars.deployment.accessible_namespaces' - -- name: Delete all Kiali roles from namespaces if view_only_mode is changing since role bindings are immutable - include_tasks: remove-roles.yml - vars: - role_namespaces: "{{ kiali_vars.deployment.accessible_namespaces }}" - when: - - current_view_only_mode is defined - - current_view_only_mode|bool != kiali_vars.deployment.view_only_mode|bool - - current_accessible_namespaces is defined - - '"**" not in current_accessible_namespaces' - -- name: Delete Kiali cluster roles if view_only_mode is changing since role bindings are immutable - include_tasks: remove-clusterroles.yml - when: - - current_view_only_mode is defined - - current_view_only_mode|bool != kiali_vars.deployment.view_only_mode|bool - - current_accessible_namespaces is defined - - '"**" in current_accessible_namespaces' - -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Processing namespace labels" - -- name: Remove Kiali label from namespaces that Kiali no longer has access to - vars: - # everything to the left of the = is the name of the label we want to remove - the_namespace_label_name: "{{ current_label_selector | regex_replace('^(.*)=.*$', '\\1') }}" - # if a namespace happened to have been deleted, we do not want to (nor can we) resurrect it, hence we use state=patched - k8s: - state: patched - definition: | - {% for namespace in no_longer_accessible_namespaces %} - --- - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ namespace }}" - labels: - {{ the_namespace_label_name }}: null - ... - {% endfor %} - when: - - no_longer_accessible_namespaces is defined - - current_label_selector is defined - -- name: Create additional Kiali label on all accessible namespaces - vars: - namespaces: "{{ kiali_vars.deployment.accessible_namespaces }}" - # everything to the left of the = is the label name; to the right is the label value - the_namespace_label_name: "{{ kiali_vars.api.namespaces.label_selector | regex_replace('^(.*)=.*$', '\\1') }}" - the_namespace_label_value: "{{ kiali_vars.api.namespaces.label_selector | regex_replace('^.*=(.*)$', '\\1') }}" - k8s: - state: patched - definition: | - {% for namespace in namespaces %} - --- - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ namespace }}" - labels: - {{ the_namespace_label_name }}: "{{ the_namespace_label_value }}" - ... - {% endfor %} - when: - - '"**" not in kiali_vars.deployment.accessible_namespaces' - -- name: Delete Kiali deployment if image is changing - this uninstalled any old version of Kiali that might be running - k8s: - state: absent - api_version: apps/v1 - kind: Deployment - namespace: "{{ kiali_vars.deployment.namespace }}" - name: "{{ kiali_vars.deployment.instance_name }}" - when: - - current_image_name is defined and current_image_version is defined - - (current_image_name != kiali_vars.deployment.image_name) or (current_image_version != kiali_vars.deployment.image_version) - -# Get the deployment's custom annotation we set that tells us when we last updated the Deployment. -# We need this to ensure the Deployment we update retains this same timestamp unless changes are made -# that requires a pod restart - in which case we update this timestamp. -- name: Find current deployment, if it exists - set_fact: - current_deployment: "{{ lookup(k8s_plugin, resource_name=kiali_vars.deployment.instance_name, namespace=kiali_vars.deployment.namespace, api_version='apps/v1', kind='Deployment') }}" - -- name: Get current deployment last-updated annotation timestamp from existing deployment - set_fact: - current_deployment_last_updated: "{{ current_deployment.spec.template.metadata.annotations['operator.kiali.io/last-updated'] if current_deployment.spec.template.metadata.annotations['operator.kiali.io/last-updated'] is defined else lookup('pipe','date') }}" - deployment_is_new: false - when: - - current_deployment is defined - - current_deployment.spec is defined - - current_deployment.spec.template is defined - - current_deployment.spec.template.metadata is defined - - current_deployment.spec.template.metadata.annotations is defined - -- name: Set current deployment last-updated annotation timestamp for new deployments - set_fact: - current_deployment_last_updated: "{{ lookup('pipe','date') }}" - deployment_is_new: true - when: - - current_deployment_last_updated is not defined - -# Now deploy all resources for the specific cluster environment - -- name: Execute for OpenShift environment - include_tasks: openshift/os-main.yml - vars: - deployment_last_updated: "{{ current_deployment_last_updated }}" - when: - - is_openshift == True - -- name: Execute for Kubernetes environment - include_tasks: kubernetes/k8s-main.yml - vars: - deployment_last_updated: "{{ current_deployment_last_updated }}" - when: - - is_k8s == True - -# If something changed that can only be picked up when the Kiali pod starts up, then restart the Kiali pod using a rolling restart -- name: Force the Kiali pod to restart if necessary - vars: - updated_deployment: "{{ lookup(k8s_plugin, resource_name=kiali_vars.deployment.instance_name, namespace=kiali_vars.deployment.namespace, api_version='apps/v1', kind='Deployment') | combine({'spec': {'template': {'metadata': {'annotations': {'operator.kiali.io/last-updated': lookup('pipe','date') }}}}}, recursive=True) }}" - k8s: - state: "present" - definition: "{{ updated_deployment }}" - when: - - deployment_is_new == False - - processed_resources.configmap is defined - - processed_resources.configmap.changed == True - - processed_resources.configmap.method == "update" - -# Can't just populate with the list of namespaces - see https://github.com/operator-framework/operator-sdk-ansible-util/issues/12 -# So instead - if the list of namespaces is manageable, store them in a comma-separate list. -# Otherwise, we'll just log the count. The purpose of this accessibleNamespaces status field is -# just to inform the user how many namespaces the operator processed. -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Finished all resource creation" - status_vars: - deployment: - accessibleNamespaces: "{{ ('Number of accessible namespaces (including control plane namespace): ' + (kiali_vars.deployment.accessible_namespaces | length | string)) if (kiali_vars.deployment.accessible_namespaces | length > 20) else (kiali_vars.deployment.accessible_namespaces | join(',')) }}" diff --git a/roles/v1.57/kiali-deploy/tasks/openshift/os-get-kiali-route-url.yml b/roles/v1.57/kiali-deploy/tasks/openshift/os-get-kiali-route-url.yml deleted file mode 100644 index 4610f0d3c..000000000 --- a/roles/v1.57/kiali-deploy/tasks/openshift/os-get-kiali-route-url.yml +++ /dev/null @@ -1,48 +0,0 @@ -# All of this is ultimately to obtain the kiali_route_url - -# Give some time for the route to come up - -- name: Detect Kiali route on OpenShift - k8s_info: - api_version: route.openshift.io/v1 - kind: Route - name: "{{ kiali_vars.deployment.instance_name }}" - namespace: "{{ kiali_vars.deployment.namespace }}" - register: kiali_route_raw - until: - - kiali_route_raw['resources'] is defined - - kiali_route_raw['resources'][0] is defined - - kiali_route_raw['resources'][0]['status'] is defined - - kiali_route_raw['resources'][0]['status']['ingress'] is defined - - kiali_route_raw['resources'][0]['status']['ingress'][0] is defined - - kiali_route_raw['resources'][0]['status']['ingress'][0]['host'] is defined - retries: 30 - delay: 10 - when: - - is_openshift == True - -- name: Set Kiali TLS Termination from OpenShift route - set_fact: - kiali_route_tls_termination: "{{ kiali_route_raw['resources'][0]['spec']['tls']['termination'] }}" - when: - - is_openshift == True - -- name: Detect HTTP Kiali OpenShift route protocol - set_fact: - kiali_route_protocol: "http" - when: - - is_openshift == True - - kiali_route_tls_termination == "" - -- name: Detect HTTPS Kiali OpenShift route protocol - set_fact: - kiali_route_protocol: "https" - when: - - is_openshift == True - - kiali_route_tls_termination != "" - -- name: Create URL for Kiali OpenShift route - set_fact: - kiali_route_url: "{{ kiali_route_protocol }}://{{ kiali_route_raw['resources'][0]['status']['ingress'][0]['host'] }}" - when: - - is_openshift == True diff --git a/roles/v1.57/kiali-deploy/tasks/openshift/os-main.yml b/roles/v1.57/kiali-deploy/tasks/openshift/os-main.yml deleted file mode 100644 index e6510280f..000000000 --- a/roles/v1.57/kiali-deploy/tasks/openshift/os-main.yml +++ /dev/null @@ -1,131 +0,0 @@ -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Creating core resources" - when: - - is_openshift == True - -- name: Create Kiali objects on OpenShift - include_tasks: process-resource.yml - vars: - process_resource_cluster: "openshift" - role_namespaces: "{{ [ kiali_vars.deployment.namespace ] }}" - loop: - - serviceaccount - - configmap - - cabundle - - "{{ 'role-viewer' if kiali_vars.deployment.view_only_mode|bool == True else 'role' }}" - - role-controlplane - - rolebinding - - rolebinding-controlplane - - deployment - - service - - "{{ 'hpa' if kiali_vars.deployment.hpa.spec | length > 0 else '' }}" - loop_control: - loop_var: process_resource_item - when: - - is_openshift == True - - process_resource_item != '' - -- name: Remove HPA if disabled on OpenShift - k8s: - state: absent - api_version: "{{ kiali_vars.deployment.hpa.api_version }}" - kind: "HorizontalPodAutoscaler" - namespace: "{{ kiali_vars.deployment.namespace }}" - name: "{{ kiali_vars.deployment.instance_name }}" - when: - - is_openshift == True - - kiali_vars.deployment.hpa.spec | length == 0 - -- name: Create Route on OpenShift if enabled - include_tasks: process-resource.yml - vars: - process_resource_cluster: "openshift" - role_namespace: "{{ kiali_vars.deployment.namespace }}" - loop: - - route - loop_control: - loop_var: process_resource_item - when: - - is_openshift == True - - kiali_vars.deployment.ingress.enabled|bool == True - -- name: Delete Route on OpenShift if disabled - k8s: - state: absent - api_version: "route.openshift.io/v1" - kind: "Route" - namespace: "{{ kiali_vars.deployment.namespace }}" - name: "{{ kiali_vars.deployment.instance_name }}" - when: - - is_openshift == True - - kiali_vars.deployment.ingress.enabled|bool == False - -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Creating additional roles" - when: - - is_openshift == True - - '"**" not in kiali_vars.deployment.accessible_namespaces' - -- name: Create additional Kiali roles on all accessible namespaces on OpenShift - vars: - role_namespaces: "{{ kiali_vars.deployment.accessible_namespaces }}" - k8s: - definition: "{{ lookup('template', 'templates/openshift/' + ('role-viewer' if kiali_vars.deployment.view_only_mode|bool == True else 'role') + '.yaml') }}" - when: - - is_openshift == True - - '"**" not in kiali_vars.deployment.accessible_namespaces' - -- name: Create additional Kiali role bindings on all accessible namespaces on OpenShift - vars: - role_namespaces: "{{ kiali_vars.deployment.accessible_namespaces }}" - k8s: - definition: "{{ lookup('template', 'templates/openshift/rolebinding.yaml') }}" - when: - - is_openshift == True - - '"**" not in kiali_vars.deployment.accessible_namespaces' - -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Creating OpenShift resources" - when: - - is_openshift == True - -- name: Get the Kiali Route URL - include_tasks: openshift/os-get-kiali-route-url.yml - when: - - is_openshift == True - -- name: Process OpenShift OAuth client - k8s: - definition: "{{ lookup('template', 'templates/openshift/oauth.yaml') }}" - when: - - is_openshift == True - - kiali_vars.auth.strategy == "openshift" - -- name: Delete all ConsoleLinks for namespaces that are no longer accessible - k8s: - state: absent - definition: | - {% for namespace in no_longer_accessible_namespaces %} - --- - apiVersion: console.openshift.io/v1 - kind: ConsoleLink - metadata: - name: "{{ kiali_vars.deployment.instance_name }}-namespace-{{ namespace }}" - ... - {% endfor %} - when: - - is_openshift == True - - no_longer_accessible_namespaces is defined - -- name: Process OpenShift Console Links - k8s: - definition: "{{ lookup('template', 'templates/openshift/console-links.yaml') }}" - vars: - # When accessible_namespaces=**, the kiali.io/member-of label is not set, but maistra.io/member-of are always present - namespaces: "{{ lookup(k8s_plugin, api_version='v1', kind='Namespace', label_selector=('maistra.io/member-of=' + kiali_vars.istio_namespace)) | default({}) | json_query('[].metadata.name') if '**' in all_accessible_namespaces else all_accessible_namespaces }}" - when: - - is_openshift == True - - openshift_version is version('4.3', '>=') diff --git a/roles/v1.57/kiali-deploy/tasks/process-resource.yml b/roles/v1.57/kiali-deploy/tasks/process-resource.yml deleted file mode 100644 index ff0236634..000000000 --- a/roles/v1.57/kiali-deploy/tasks/process-resource.yml +++ /dev/null @@ -1,15 +0,0 @@ -- name: "Create resource [{{ process_resource_item }}] on [{{ process_resource_cluster }}]" - k8s: - state: "present" - definition: "{{ lookup('template', 'templates/' + process_resource_cluster + '/' + process_resource_item + '.yaml') }}" - register: process_resource_result - until: - - process_resource_result.error is not defined - - process_resource_result.result is defined - - process_resource_result.result.metadata is defined - retries: 6 - delay: 10 - -# Store the results of the processed resource so they can be examined later (e.g. to know if something changed or stayed the same) -- set_fact: - processed_resources: "{{ processed_resources | default({}) | combine( { process_resource_item: { 'changed': process_resource_result.changed, 'method': process_resource_result.method } } ) }}" diff --git a/roles/v1.57/kiali-deploy/tasks/remove-clusterroles.yml b/roles/v1.57/kiali-deploy/tasks/remove-clusterroles.yml deleted file mode 100644 index b2dd561a1..000000000 --- a/roles/v1.57/kiali-deploy/tasks/remove-clusterroles.yml +++ /dev/null @@ -1,24 +0,0 @@ -- name: Delete unused Kiali cluster roles - ignore_errors: yes - k8s: - state: absent - api_version: "{{ k8s_item.apiVersion }}" - kind: "{{ k8s_item.kind }}" - name: "{{ k8s_item.metadata.name }}" - register: delete_result - until: delete_result.result == {} or (delete_result.result.status is defined and delete_result.result.status == "Success") - retries: 6 - delay: 10 - when: - - is_openshift == True or is_k8s == True - - k8s_item is defined - - k8s_item.apiVersion is defined - - k8s_item.kind is defined - - k8s_item.metadata is defined - - k8s_item.metadata.name is defined - with_items: - - "{{ query(k8s_plugin, kind='ClusterRoleBinding', resource_name=kiali_vars.deployment.instance_name, api_version='rbac.authorization.k8s.io/v1') }}" - - "{{ query(k8s_plugin, kind='ClusterRole', resource_name=kiali_vars.deployment.instance_name, api_version='rbac.authorization.k8s.io/v1') }}" - - "{{ query(k8s_plugin, kind='ClusterRole', resource_name=kiali_vars.deployment.instance_name + '-viewer', api_version='rbac.authorization.k8s.io/v1') }}" - loop_control: - loop_var: k8s_item diff --git a/roles/v1.57/kiali-deploy/tasks/remove-roles.yml b/roles/v1.57/kiali-deploy/tasks/remove-roles.yml deleted file mode 100644 index c1bf4e0c4..000000000 --- a/roles/v1.57/kiali-deploy/tasks/remove-roles.yml +++ /dev/null @@ -1,27 +0,0 @@ -- name: Delete Kiali roles from previously accessible namespaces - k8s: - state: absent - definition: | - {% for namespace in role_namespaces %} - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: "{{ kiali_vars.deployment.instance_name }}" - namespace: "{{ namespace }}" - ... - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ kiali_vars.deployment.instance_name }}" - namespace: "{{ namespace }}" - ... - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ kiali_vars.deployment.instance_name }}-viewer" - namespace: "{{ namespace }}" - ... - {% endfor %} diff --git a/roles/v1.57/kiali-deploy/tasks/snake_camel_case.yaml b/roles/v1.57/kiali-deploy/tasks/snake_camel_case.yaml deleted file mode 100644 index b7e786045..000000000 --- a/roles/v1.57/kiali-deploy/tasks/snake_camel_case.yaml +++ /dev/null @@ -1,178 +0,0 @@ -# Because we are passing through some yaml directly to Kubernetes resources, we have to retain the camelCase keys. -# All CR parameters are converted to snake_case, but the original yaml is found in the special _kiali_io_kiali param. -# We need to copy that original yaml into our vars where appropriate to keep the camelCase. - -- name: Replace snake_case with camelCase in deployment.affinity.node - set_fact: - kiali_vars: | - {% set a=kiali_vars['deployment']['affinity'].pop('node') %} - {{ kiali_vars | combine({'deployment': {'affinity': {'node': current_cr.spec.deployment.affinity.node }}}, recursive=True) }} - when: - - kiali_vars.deployment.affinity is defined - - kiali_vars.deployment.affinity.node is defined - - kiali_vars.deployment.affinity.node | length > 0 - -- name: Replace snake_case with camelCase in deployment.affinity.pod - set_fact: - kiali_vars: | - {% set a=kiali_vars['deployment']['affinity'].pop('pod') %} - {{ kiali_vars | combine({'deployment': {'affinity': {'pod': current_cr.spec.deployment.affinity.pod }}}, recursive=True) }} - when: - - kiali_vars.deployment.affinity is defined - - kiali_vars.deployment.affinity.pod is defined - - kiali_vars.deployment.affinity.pod | length > 0 - -- name: Replace snake_case with camelCase in deployment.affinity.pod_anti - set_fact: - kiali_vars: | - {% set a=kiali_vars['deployment']['affinity'].pop('pod_anti') %} - {{ kiali_vars | combine({'deployment': {'affinity': {'pod_anti': current_cr.spec.deployment.affinity.pod_anti }}}, recursive=True) }} - when: - - kiali_vars.deployment.affinity is defined - - kiali_vars.deployment.affinity.pod_anti is defined - - kiali_vars.deployment.affinity.pod_anti | length > 0 - -- name: Replace snake_case with camelCase in deployment.tolerations - set_fact: - kiali_vars: | - {% set a=kiali_vars['deployment'].pop('tolerations') %} - {{ kiali_vars | combine({'deployment': {'tolerations': current_cr.spec.deployment.tolerations }}, recursive=True) }} - when: - - kiali_vars.deployment.tolerations is defined - - kiali_vars.deployment.tolerations | length > 0 - -- name: Replace snake_case with camelCase in deployment.additional_service_yaml - set_fact: - kiali_vars: | - {% set a=kiali_vars['deployment'].pop('additional_service_yaml') %} - {{ kiali_vars | combine({'deployment': {'additional_service_yaml': current_cr.spec.deployment.additional_service_yaml }}, recursive=True) }} - when: - - kiali_vars.deployment.additional_service_yaml is defined - - kiali_vars.deployment.additional_service_yaml | length > 0 - -- name: Replace snake_case with camelCase in deployment.resources - set_fact: - kiali_vars: | - {% set a=kiali_vars['deployment'].pop('resources') %} - {{ kiali_vars | combine({'deployment': {'resources': current_cr.spec.deployment.resources }}, recursive=True) }} - when: - - kiali_vars.deployment.resources is defined - - kiali_vars.deployment.resources | length > 0 - -- name: Replace snake_case with camelCase in deployment.ingress.override_yaml - set_fact: - kiali_vars: | - {% set a=kiali_vars['deployment']['ingress'].pop('override_yaml') %} - {{ kiali_vars | combine({'deployment': {'ingress': {'override_yaml': current_cr.spec.deployment.ingress.override_yaml }}}, recursive=True) }} - when: - - kiali_vars.deployment.ingress.override_yaml is defined - - kiali_vars.deployment.ingress.override_yaml | length > 0 - -- name: Replace snake_case with camelCase in deployment.pod_annotations - set_fact: - kiali_vars: | - {% set a=kiali_vars['deployment'].pop('pod_annotations') %} - {{ kiali_vars | combine({'deployment': {'pod_annotations': current_cr.spec.deployment.pod_annotations }}, recursive=True) }} - when: - - kiali_vars.deployment.pod_annotations is defined - - kiali_vars.deployment.pod_annotations | length > 0 - -- name: Replace snake_case with camelCase in deployment.pod_labels - set_fact: - kiali_vars: | - {% set a=kiali_vars['deployment'].pop('pod_labels') %} - {{ kiali_vars | combine({'deployment': {'pod_labels': current_cr.spec.deployment.pod_labels }}, recursive=True) }} - when: - - kiali_vars.deployment.pod_labels is defined - - kiali_vars.deployment.pod_labels | length > 0 - -- name: Replace snake_case with camelCase in deployment.service_annotations - set_fact: - kiali_vars: | - {% set a=kiali_vars['deployment'].pop('service_annotations') %} - {{ kiali_vars | combine({'deployment': {'service_annotations': current_cr.spec.deployment.service_annotations }}, recursive=True) }} - when: - - kiali_vars.deployment.service_annotations is defined - - kiali_vars.deployment.service_annotations | length > 0 - -- name: Replace snake_case with camelCase in deployment.hpa.spec - set_fact: - kiali_vars: | - {% set a=kiali_vars['deployment']['hpa'].pop('spec') %} - {{ kiali_vars | combine({'deployment': {'hpa': {'spec': current_cr.spec.deployment.hpa.spec }}}, recursive=True) }} - when: - - kiali_vars.deployment.hpa is defined - - kiali_vars.deployment.hpa.spec is defined - - kiali_vars.deployment.hpa.spec | length > 0 - -- name: Replace snake_case with camelCase in deployment.node_selector - set_fact: - kiali_vars: | - {% set a=kiali_vars['deployment'].pop('node_selector') %} - {{ kiali_vars | combine({'deployment': {'node_selector': current_cr.spec.deployment.node_selector }}, recursive=True) }} - when: - - kiali_vars.deployment.node_selector is defined - - kiali_vars.deployment.node_selector | length > 0 - -- name: Replace snake_case with camelCase in external_services.custom_dashboards.prometheus.custom_headers - set_fact: - kiali_vars: | - {% set a=kiali_vars['external_services']['custom_dashboards']['prometheus'].pop('custom_headers') %} - {{ kiali_vars | combine({'external_services': {'custom_dashboards': {'prometheus': {'custom_headers': current_cr.spec.external_services.custom_dashboards.prometheus.custom_headers }}}}, recursive=True) }} - when: - - kiali_vars.external_services.custom_dashboards.prometheus.custom_headers is defined - - kiali_vars.external_services.custom_dashboards.prometheus.custom_headers | length > 0 - -- name: Replace snake_case with camelCase in external_services.custom_dashboards.prometheus.query_scope - set_fact: - kiali_vars: | - {% set a=kiali_vars['external_services']['custom_dashboards']['prometheus'].pop('query_scope') %} - {{ kiali_vars | combine({'external_services': {'custom_dashboards': {'prometheus': {'query_scope': current_cr.spec.external_services.custom_dashboards.prometheus.query_scope }}}}, recursive=True) }} - when: - - kiali_vars.external_services.custom_dashboards.prometheus.query_scope is defined - - kiali_vars.external_services.custom_dashboards.prometheus.query_scope | length > 0 - -- name: Replace snake_case with camelCase in external_services.prometheus.custom_headers - set_fact: - kiali_vars: | - {% set a=kiali_vars['external_services']['prometheus'].pop('custom_headers') %} - {{ kiali_vars | combine({'external_services': {'prometheus': {'custom_headers': current_cr.spec.external_services.prometheus.custom_headers }}}, recursive=True) }} - when: - - kiali_vars.external_services.prometheus.custom_headers is defined - - kiali_vars.external_services.prometheus.custom_headers | length > 0 - -- name: Replace snake_case with camelCase in external_services.prometheus.query_scope - set_fact: - kiali_vars: | - {% set a=kiali_vars['external_services']['prometheus'].pop('query_scope') %} - {{ kiali_vars | combine({'external_services': {'prometheus': {'query_scope': current_cr.spec.external_services.prometheus.query_scope }}}, recursive=True) }} - when: - - kiali_vars.external_services.prometheus.query_scope is defined - - kiali_vars.external_services.prometheus.query_scope | length > 0 - -- name: Replace snake_case with camelCase in deployment.configmap_annotations - set_fact: - kiali_vars: | - {% set a=kiali_vars['deployment'].pop('configmap_annotations') %} - {{ kiali_vars | combine({'deployment': {'configmap_annotations': current_cr.spec.deployment.configmap_annotations }}, recursive=True) }} - when: - - kiali_vars.deployment.configmap_annotations is defined - - kiali_vars.deployment.configmap_annotations | length > 0 - -- name: Replace snake_case with camelCase in external_services.tracing.query_scope - set_fact: - kiali_vars: | - {% set a=kiali_vars['external_services']['tracing'].pop('query_scope') %} - {{ kiali_vars | combine({'external_services': {'tracing': {'query_scope': current_cr.spec.external_services.tracing.query_scope }}}, recursive=True) }} - when: - - kiali_vars.external_services.tracing.query_scope is defined - - kiali_vars.external_services.tracing.query_scope | length > 0 - -- name: Replace snake_case with camelCase in deployment.security_context - set_fact: - kiali_vars: | - {% set a=kiali_vars['deployment'].pop('security_context') %} - {{ kiali_vars | combine({'deployment': {'security_context': current_cr.spec.deployment.security_context}}, recursive=True) }} - when: - - kiali_vars.deployment.security_context is defined - - kiali_vars.deployment.security_context | length > 0 diff --git a/roles/v1.57/kiali-deploy/tasks/update-status-progress.yml b/roles/v1.57/kiali-deploy/tasks/update-status-progress.yml deleted file mode 100644 index 58570bceb..000000000 --- a/roles/v1.57/kiali-deploy/tasks/update-status-progress.yml +++ /dev/null @@ -1,16 +0,0 @@ -- name: Prepare status progress facts - ignore_errors: yes - set_fact: - status_progress_step: "{{ 1 if status_progress_step is not defined else (status_progress_step|int + 1) }}" - status_progress_start: "{{ ('%Y-%m-%d %H:%M:%S' | strftime) if status_progress_start is not defined else (status_progress_start) }}" - -- name: Update CR status progress field with any additional status fields - ignore_errors: yes - vars: - duration: "{{ ('%Y-%m-%d %H:%M:%S' | strftime | to_datetime) - (status_progress_start | to_datetime) }}" - operator_sdk.util.k8s_status: - api_version: "{{ current_cr.apiVersion }}" - kind: "{{ current_cr.kind }}" - name: "{{ current_cr.metadata.name }}" - namespace: "{{ current_cr.metadata.namespace }}" - status: "{{ status_vars | default({}) | combine({'progress':{'message': status_progress_step + '. ' + status_progress_message, 'duration': duration }}, recursive=True) }}" diff --git a/roles/v1.57/kiali-deploy/tasks/update-status.yml b/roles/v1.57/kiali-deploy/tasks/update-status.yml deleted file mode 100644 index fa7793085..000000000 --- a/roles/v1.57/kiali-deploy/tasks/update-status.yml +++ /dev/null @@ -1,8 +0,0 @@ -- name: Update CR status field - ignore_errors: yes - operator_sdk.util.k8s_status: - api_version: "{{ current_cr.apiVersion }}" - kind: "{{ current_cr.kind }}" - name: "{{ current_cr.metadata.name }}" - namespace: "{{ current_cr.metadata.namespace }}" - status: "{{ status_vars }}" diff --git a/roles/v1.57/kiali-deploy/templates/kubernetes/configmap.yaml b/roles/v1.57/kiali-deploy/templates/kubernetes/configmap.yaml deleted file mode 100644 index 6e1ca1986..000000000 --- a/roles/v1.57/kiali-deploy/templates/kubernetes/configmap.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} -{% if kiali_vars.deployment.configmap_annotations is defined and kiali_vars.deployment.configmap_annotations|length > 0 %} - annotations: - {{ kiali_vars.deployment.configmap_annotations | to_nice_yaml(indent=0) | trim | indent(4) }} -{% endif %} -data: - config.yaml: | - {{ kiali_vars | to_nice_yaml(indent=0) | trim | indent(4) }} diff --git a/roles/v1.57/kiali-deploy/templates/kubernetes/deployment.yaml b/roles/v1.57/kiali-deploy/templates/kubernetes/deployment.yaml deleted file mode 100644 index 71fc40177..000000000 --- a/roles/v1.57/kiali-deploy/templates/kubernetes/deployment.yaml +++ /dev/null @@ -1,190 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} -spec: - replicas: {{ kiali_vars.deployment.replicas }} - selector: - matchLabels: - app.kubernetes.io/name: kiali - app.kubernetes.io/instance: {{ kiali_vars.deployment.instance_name }} - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - name: {{ kiali_vars.deployment.instance_name }} - labels: {{ kiali_resource_metadata_labels | combine(kiali_vars.deployment.pod_labels) }} - annotations: -{% if kiali_vars.server.observability.metrics.enabled|bool == True %} - prometheus.io/scrape: "true" - prometheus.io/port: "{{ kiali_vars.server.observability.metrics.port }}" -{% else %} - prometheus.io/scrape: "false" - prometheus.io/port: null -{% endif %} - kiali.io/dashboards: go,kiali - operator.kiali.io/last-updated: "{{ deployment_last_updated }}" -{% if kiali_vars.deployment.pod_annotations|length > 0 %} - {{ kiali_vars.deployment.pod_annotations | to_nice_yaml(indent=0) | trim | indent(8) }} -{% endif %} - spec: - serviceAccount: {{ kiali_vars.deployment.instance_name }}-service-account -{% if kiali_vars.deployment.priority_class_name != "" %} - priorityClassName: "{{ kiali_vars.deployment.priority_class_name }}" -{% endif %} -{% if kiali_vars.deployment.image_pull_secrets | default([]) | length > 0 %} - imagePullSecrets: -{% for n in kiali_vars.deployment.image_pull_secrets %} - - name: {{ n }} -{% endfor %} -{% endif %} -{% if kiali_vars.deployment.host_aliases|length > 0 %} - hostAliases: - {{ kiali_vars.deployment.host_aliases | to_nice_yaml(indent=0) | trim | indent(8) }} -{% endif %} - containers: - - image: {{ kiali_vars.deployment.image_name }}{{ '@' + kiali_vars.deployment.image_digest if kiali_vars.deployment.image_digest != '' else '' }}:{{ kiali_vars.deployment.image_version }} - imagePullPolicy: {{ kiali_vars.deployment.image_pull_policy }} - name: kiali - command: - - "/opt/kiali/kiali" - - "-config" - - "/kiali-configuration/config.yaml" - securityContext: -{% if kiali_vars.deployment.security_context|length > 0 %} - {{ kiali_vars.deployment.security_context | to_nice_yaml(indent=0) | trim | indent(10) }} -{% endif %} -{% if kiali_vars.deployment.security_context|length == 0 or lookup('env', 'ALLOW_SECURITY_CONTEXT_OVERRIDE') != "true" %} - allowPrivilegeEscalation: false - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: true - capabilities: - drop: - - ALL -{% endif %} - ports: - - name: api-port - containerPort: {{ kiali_vars.server.port }} -{% if kiali_vars.server.observability.metrics.enabled|bool == True %} - - name: http-metrics - containerPort: {{ kiali_vars.server.observability.metrics.port }} -{% endif %} - readinessProbe: - httpGet: - path: {{ kiali_vars.server.web_root | regex_replace('\\/$', '') }}/healthz - port: api-port - scheme: {{ 'HTTP' if kiali_vars.identity.cert_file == "" else 'HTTPS' }} - initialDelaySeconds: 5 - periodSeconds: 30 - livenessProbe: - httpGet: - path: {{ kiali_vars.server.web_root | regex_replace('\\/$', '') }}/healthz - port: api-port - scheme: {{ 'HTTP' if kiali_vars.identity.cert_file == "" else 'HTTPS' }} - initialDelaySeconds: 5 - periodSeconds: 30 - env: - - name: ACTIVE_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LOG_FORMAT - value: "{{ kiali_vars.deployment.logger.log_format }}" - - name: LOG_LEVEL - value: "{{ kiali_vars.deployment.verbose_mode if kiali_vars.deployment.verbose_mode is defined else kiali_vars.deployment.logger.log_level }}" - - name: LOG_SAMPLER_RATE - value: "{{ kiali_vars.deployment.logger.sampler_rate }}" - - name: LOG_TIME_FIELD_FORMAT - value: "{{ kiali_vars.deployment.logger.time_field_format }}" - volumeMounts: - - name: kiali-configuration - mountPath: "/kiali-configuration" - - name: kiali-secret - mountPath: "/kiali-secret" - - name: kiali-cabundle - mountPath: "/kiali-cabundle" -{% for sec in kiali_deployment_secret_volumes %} - - name: {{ sec }} - mountPath: "/kiali-override-secrets/{{ sec }}" - readOnly: true -{% endfor %} -{% for secret in kiali_vars.deployment.custom_secrets %} - - name: {{ secret.name }} - mountPath: "{{ secret.mount }}" -{% endfor %} -{% if kiali_vars.deployment.resources|length > 0 %} - resources: - {{ kiali_vars.deployment.resources | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - resources: null -{% endif %} - volumes: - - name: kiali-configuration - configMap: - name: {{ kiali_vars.deployment.instance_name }} - - name: kiali-secret - secret: - secretName: {{ kiali_vars.deployment.secret_name }} - optional: true - - name: kiali-cabundle - configMap: - name: {{ kiali_vars.deployment.instance_name }}-cabundle - optional: true -{% for sec in kiali_deployment_secret_volumes %} - - name: {{ sec }} - secret: - secretName: {{ kiali_deployment_secret_volumes[sec].secret_name }} - items: - - key: {{ kiali_deployment_secret_volumes[sec].secret_key }} - path: value.txt - optional: false -{% endfor %} -{% for secret in kiali_vars.deployment.custom_secrets %} - - name: {{ secret.name }} - secret: - secretName: {{ secret.name }} -{% if secret.optional is defined %} - optional: {{ secret.optional }} -{% endif %} -{% endfor %} -{% if kiali_vars.deployment.affinity.node|length > 0 or kiali_vars.deployment.affinity.pod|length > 0 or kiali_vars.deployment.affinity.pod_anti|length > 0 %} - affinity: -{% if kiali_vars.deployment.affinity.node|length > 0 %} - nodeAffinity: - {{ kiali_vars.deployment.affinity.node | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - nodeAffinity: null -{% endif %} -{% if kiali_vars.deployment.affinity.pod|length > 0 %} - podAffinity: - {{ kiali_vars.deployment.affinity.pod | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - podAffinity: null -{% endif %} -{% if kiali_vars.deployment.affinity.pod_anti|length > 0 %} - podAntiAffinity: - {{ kiali_vars.deployment.affinity.pod_anti | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - podAntiAffinity: null -{% endif %} -{% else %} - affinity: null -{% endif %} -{% if kiali_vars.deployment.tolerations|length > 0 %} - tolerations: - {{ kiali_vars.deployment.tolerations | to_nice_yaml(indent=0) | trim | indent(6) }} -{% else %} - tolerations: null -{% endif %} -{% if kiali_vars.deployment.node_selector|length > 0 %} - nodeSelector: - {{ kiali_vars.deployment.node_selector | to_nice_yaml(indent=0) | trim | indent(8) }} -{% else %} - nodeSelector: null -{% endif %} diff --git a/roles/v1.57/kiali-deploy/templates/kubernetes/hpa.yaml b/roles/v1.57/kiali-deploy/templates/kubernetes/hpa.yaml deleted file mode 100644 index 2802d2169..000000000 --- a/roles/v1.57/kiali-deploy/templates/kubernetes/hpa.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{% if kiali_vars.deployment.hpa.spec | length > 0 %} -apiVersion: {{ kiali_vars.deployment.hpa.api_version }} -kind: HorizontalPodAutoscaler -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ kiali_vars.deployment.instance_name }} - {{ kiali_vars.deployment.hpa.spec | to_nice_yaml(indent=0) | trim | indent(2) }} -{% endif %} diff --git a/roles/v1.57/kiali-deploy/templates/kubernetes/ingress.yaml b/roles/v1.57/kiali-deploy/templates/kubernetes/ingress.yaml deleted file mode 100644 index 8e89d3654..000000000 --- a/roles/v1.57/kiali-deploy/templates/kubernetes/ingress.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: "networking.k8s.io/{{ 'v1' if (lookup(k8s_plugin, kind='Ingress', api_version='networking.k8s.io/v1', errors='ignore') is iterable) else 'v1beta1' }}" -kind: Ingress -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_vars.deployment.ingress.additional_labels | combine(kiali_resource_metadata_labels) }} -{% if kiali_vars.deployment.ingress.override_yaml is defined and kiali_vars.deployment.ingress.override_yaml.metadata is defined and kiali_vars.deployment.ingress.override_yaml.metadata.annotations is defined %} - {{ kiali_vars.deployment.ingress.override_yaml.metadata | to_nice_yaml(indent=0) | trim | indent(2) }} -{% else %} - annotations: - # For ingress-nginx versions older than 0.20.0 - # (see: https://github.com/kubernetes/ingress-nginx/issues/3416#issuecomment-438247948) - nginx.ingress.kubernetes.io/secure-backends: "{{ 'false' if kiali_vars.identity.cert_file == "" else 'true' }}" - # For ingress-nginx versions 0.20.0 and later - nginx.ingress.kubernetes.io/backend-protocol: "{{ 'HTTP' if kiali_vars.identity.cert_file == "" else 'HTTPS' }}" -{% endif %} -spec: -{% if kiali_vars.deployment.ingress.override_yaml is defined and kiali_vars.deployment.ingress.override_yaml.spec is defined %} - {{ kiali_vars.deployment.ingress.override_yaml.spec | to_nice_yaml(indent=0) | trim | indent(2) }} -{% else %} -{% if kiali_vars.deployment.ingress.class_name != "" %} - ingressClassName: {{ kiali_vars.deployment.ingress.class_name }} -{% endif %} - rules: - - http: - paths: - - path: {{ kiali_vars.server.web_root }} -{% if lookup(k8s_plugin, kind='Ingress', api_version='networking.k8s.io/v1', errors='ignore') is iterable %} - pathType: Prefix - backend: - service: - name: {{ kiali_vars.deployment.instance_name }} - port: - number: {{ kiali_vars.server.port }} -{% else %} - backend: - serviceName: {{ kiali_vars.deployment.instance_name }} - servicePort: {{ kiali_vars.server.port }} -{% endif %} -{% if kiali_vars.server.web_fqdn|length != 0 %} - host: {{ kiali_vars.server.web_fqdn }} -{% endif %} -{% endif %} diff --git a/roles/v1.57/kiali-deploy/templates/kubernetes/role-controlplane.yaml b/roles/v1.57/kiali-deploy/templates/kubernetes/role-controlplane.yaml deleted file mode 100644 index 35ad8b2a9..000000000 --- a/roles/v1.57/kiali-deploy/templates/kubernetes/role-controlplane.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ kiali_vars.deployment.instance_name }}-controlplane - namespace: "{{ kiali_vars.istio_namespace }}" - labels: {{ kiali_resource_metadata_labels }} -rules: -{% if kiali_vars.kiali_feature_flags.clustering.enabled|bool == True %} -- apiGroups: [""] - resources: - - secrets - verbs: - - list -{% endif %} -{% if kiali_vars.kiali_feature_flags.certificates_information_indicators.enabled|bool == True %} -- apiGroups: [""] - resourceNames: -{% for s in kiali_vars.kiali_feature_flags.certificates_information_indicators.secrets %} - - {{ s }} -{% endfor %} - resources: - - secrets - verbs: - - get - - list - - watch -{% endif %} \ No newline at end of file diff --git a/roles/v1.57/kiali-deploy/templates/kubernetes/role-viewer.yaml b/roles/v1.57/kiali-deploy/templates/kubernetes/role-viewer.yaml deleted file mode 100644 index 5a623b2ad..000000000 --- a/roles/v1.57/kiali-deploy/templates/kubernetes/role-viewer.yaml +++ /dev/null @@ -1,71 +0,0 @@ -{% for namespace in role_namespaces %} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: {{ role_kind }} -metadata: - name: {{ kiali_vars.deployment.instance_name }}-viewer - namespace: "{{ namespace }}" - labels: {{ kiali_resource_metadata_labels }} -rules: -- apiGroups: [""] - resources: - - configmaps - - endpoints -{% if 'logs-tab' not in kiali_vars.kiali_feature_flags.disabled_features %} - - pods/log -{% endif %} - verbs: - - get - - list - - watch -- apiGroups: [""] - resources: - - namespaces - - pods - - replicationcontrollers - - services - verbs: - - get - - list - - watch -- apiGroups: [""] - resources: - - pods/portforward - verbs: - - create - - post -- apiGroups: ["extensions", "apps"] - resources: - - daemonsets - - deployments - - replicasets - - statefulsets - verbs: - - get - - list - - watch -- apiGroups: ["batch"] - resources: - - cronjobs - - jobs - verbs: - - get - - list - - watch -- apiGroups: - - networking.istio.io - - security.istio.io - - extensions.istio.io - - telemetry.istio.io - - gateway.networking.k8s.io - resources: ["*"] - verbs: - - get - - list - - watch -- apiGroups: ["authentication.k8s.io"] - resources: - - tokenreviews - verbs: - - create -{% endfor %} diff --git a/roles/v1.57/kiali-deploy/templates/kubernetes/role.yaml b/roles/v1.57/kiali-deploy/templates/kubernetes/role.yaml deleted file mode 100644 index c74d3e4fc..000000000 --- a/roles/v1.57/kiali-deploy/templates/kubernetes/role.yaml +++ /dev/null @@ -1,77 +0,0 @@ -{% for namespace in role_namespaces %} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: {{ role_kind }} -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ namespace }}" - labels: {{ kiali_resource_metadata_labels }} -rules: -- apiGroups: [""] - resources: - - configmaps - - endpoints -{% if 'logs-tab' not in kiali_vars.kiali_feature_flags.disabled_features %} - - pods/log -{% endif %} - verbs: - - get - - list - - watch -- apiGroups: [""] - resources: - - namespaces - - pods - - replicationcontrollers - - services - verbs: - - get - - list - - watch - - patch -- apiGroups: [""] - resources: - - pods/portforward - verbs: - - create - - post -- apiGroups: ["extensions", "apps"] - resources: - - daemonsets - - deployments - - replicasets - - statefulsets - verbs: - - get - - list - - watch - - patch -- apiGroups: ["batch"] - resources: - - cronjobs - - jobs - verbs: - - get - - list - - watch - - patch -- apiGroups: - - networking.istio.io - - security.istio.io - - extensions.istio.io - - telemetry.istio.io - - gateway.networking.k8s.io - resources: ["*"] - verbs: - - get - - list - - watch - - create - - delete - - patch -- apiGroups: ["authentication.k8s.io"] - resources: - - tokenreviews - verbs: - - create -{% endfor %} diff --git a/roles/v1.57/kiali-deploy/templates/kubernetes/rolebinding-controlplane.yaml b/roles/v1.57/kiali-deploy/templates/kubernetes/rolebinding-controlplane.yaml deleted file mode 100644 index 583ad214e..000000000 --- a/roles/v1.57/kiali-deploy/templates/kubernetes/rolebinding-controlplane.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ kiali_vars.deployment.instance_name }}-controlplane - namespace: "{{ kiali_vars.istio_namespace }}" - labels: {{ kiali_resource_metadata_labels }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ kiali_vars.deployment.instance_name }}-controlplane -subjects: -- kind: ServiceAccount - name: {{ kiali_vars.deployment.instance_name }}-service-account - namespace: "{{ kiali_vars.deployment.namespace }}" diff --git a/roles/v1.57/kiali-deploy/templates/kubernetes/rolebinding.yaml b/roles/v1.57/kiali-deploy/templates/kubernetes/rolebinding.yaml deleted file mode 100644 index 274cdfcd0..000000000 --- a/roles/v1.57/kiali-deploy/templates/kubernetes/rolebinding.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{% for namespace in role_namespaces %} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: {{ role_binding_kind }} -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ namespace }}" - labels: {{ kiali_resource_metadata_labels }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: {{ role_kind }} - name: {{ (kiali_vars.deployment.instance_name + '-viewer') if kiali_vars.deployment.view_only_mode|bool == True else kiali_vars.deployment.instance_name }} -subjects: -- kind: ServiceAccount - name: {{ kiali_vars.deployment.instance_name }}-service-account - namespace: "{{ kiali_vars.deployment.namespace }}" -{% endfor %} diff --git a/roles/v1.57/kiali-deploy/templates/kubernetes/service.yaml b/roles/v1.57/kiali-deploy/templates/kubernetes/service.yaml deleted file mode 100644 index 4e0ac7675..000000000 --- a/roles/v1.57/kiali-deploy/templates/kubernetes/service.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} - annotations: -{% if kiali_vars.server.web_fqdn|length != 0 and kiali_vars.server.web_schema|length != 0 %} - kiali.io/external-url: {{ kiali_vars.server.web_schema + '://' + kiali_vars.server.web_fqdn + ((':' + kiali_vars.server.web_port | string) if (kiali_vars.server.web_port | string | length != 0) else '') + (kiali_vars.server.web_root | default('')) }} -{% endif %} -{% if kiali_vars.deployment.service_annotations|length > 0 %} - {{ kiali_vars.deployment.service_annotations | to_nice_yaml(indent=0) | trim | indent(4) }} -{% endif %} -spec: -{% if kiali_vars.deployment.service_type is defined %} - type: {{ kiali_vars.deployment.service_type }} -{% endif %} - ports: - - name: {{ 'http' if kiali_vars.identity.cert_file == "" else 'tcp' }} - protocol: TCP - port: {{ kiali_vars.server.port }} -{% if kiali_vars.server.observability.metrics.enabled|bool == True %} - - name: http-metrics - protocol: TCP - port: {{ kiali_vars.server.observability.metrics.port }} -{% endif %} - selector: -{% if query(k8s_plugin, kind='Service', resource_name=kiali_vars.deployment.instance_name, namespace=kiali_vars.deployment.namespace) | length > 0 %} - app: null - version: null -{% endif %} - app.kubernetes.io/name: kiali - app.kubernetes.io/instance: {{ kiali_vars.deployment.instance_name }} - {% if kiali_vars.deployment.additional_service_yaml is defined %}{{ kiali_vars.deployment.additional_service_yaml | to_nice_yaml(indent=0) | trim | indent(2) }}{% endif %} diff --git a/roles/v1.57/kiali-deploy/templates/kubernetes/serviceaccount.yaml b/roles/v1.57/kiali-deploy/templates/kubernetes/serviceaccount.yaml deleted file mode 100644 index 5feedce14..000000000 --- a/roles/v1.57/kiali-deploy/templates/kubernetes/serviceaccount.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ kiali_vars.deployment.instance_name }}-service-account - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} diff --git a/roles/v1.57/kiali-deploy/templates/openshift/cabundle.yaml b/roles/v1.57/kiali-deploy/templates/openshift/cabundle.yaml deleted file mode 100644 index c45c504fe..000000000 --- a/roles/v1.57/kiali-deploy/templates/openshift/cabundle.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ kiali_vars.deployment.instance_name }}-cabundle - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} - annotations: - service.beta.openshift.io/inject-cabundle: "true" diff --git a/roles/v1.57/kiali-deploy/templates/openshift/configmap.yaml b/roles/v1.57/kiali-deploy/templates/openshift/configmap.yaml deleted file mode 100644 index 6e1ca1986..000000000 --- a/roles/v1.57/kiali-deploy/templates/openshift/configmap.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} -{% if kiali_vars.deployment.configmap_annotations is defined and kiali_vars.deployment.configmap_annotations|length > 0 %} - annotations: - {{ kiali_vars.deployment.configmap_annotations | to_nice_yaml(indent=0) | trim | indent(4) }} -{% endif %} -data: - config.yaml: | - {{ kiali_vars | to_nice_yaml(indent=0) | trim | indent(4) }} diff --git a/roles/v1.57/kiali-deploy/templates/openshift/console-links.yaml b/roles/v1.57/kiali-deploy/templates/openshift/console-links.yaml deleted file mode 100644 index a51178c1d..000000000 --- a/roles/v1.57/kiali-deploy/templates/openshift/console-links.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{% for namespace in namespaces %} ---- -apiVersion: console.openshift.io/v1 -kind: ConsoleLink -metadata: - name: {{ kiali_vars.deployment.instance_name }}-namespace-{{ namespace }} - labels: {{ kiali_resource_metadata_labels | combine({'kiali.io/home': ((kiali_vars.deployment.instance_name + '.') if kiali_vars.deployment.instance_name != 'kiali' else '') + kiali_vars.deployment.namespace }) }} -spec: - href: {{ kiali_route_url }}{{ '/' if kiali_vars.server.web_root == '/' else (kiali_vars.server.web_root + '/') }}console/graph/namespaces?namespaces={{ namespace }} - location: NamespaceDashboard - text: Kiali - text: {{ ('Kiali [' + kiali_vars.deployment.instance_name + ']') if kiali_vars.deployment.instance_name != 'kiali' else 'Kiali' }} - namespaceDashboard: - namespaces: - - "{{ namespace }}" -{% endfor %} diff --git a/roles/v1.57/kiali-deploy/templates/openshift/deployment.yaml b/roles/v1.57/kiali-deploy/templates/openshift/deployment.yaml deleted file mode 100644 index 4e1e88cb6..000000000 --- a/roles/v1.57/kiali-deploy/templates/openshift/deployment.yaml +++ /dev/null @@ -1,198 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} -spec: - replicas: {{ kiali_vars.deployment.replicas }} - selector: - matchLabels: - app.kubernetes.io/name: kiali - app.kubernetes.io/instance: {{ kiali_vars.deployment.instance_name }} - template: - metadata: - name: {{ kiali_vars.deployment.instance_name }} - labels: {{ kiali_resource_metadata_labels | combine(kiali_vars.deployment.pod_labels) }} - annotations: -{% if kiali_vars.server.observability.metrics.enabled|bool == True %} - prometheus.io/scrape: "true" - prometheus.io/port: "{{ kiali_vars.server.observability.metrics.port }}" -{% else %} - prometheus.io/scrape: "false" - prometheus.io/port: null -{% endif %} - kiali.io/dashboards: go,kiali - operator.kiali.io/last-updated: "{{ deployment_last_updated }}" -{% if kiali_vars.deployment.pod_annotations|length > 0 %} - {{ kiali_vars.deployment.pod_annotations | to_nice_yaml(indent=0) | trim | indent(8) }} -{% endif %} - strategy: - rollingUpdate: - maxSurge: 1 - maxAvailable: 1 - type: RollingUpdate - spec: - serviceAccount: {{ kiali_vars.deployment.instance_name }}-service-account -{% if kiali_vars.deployment.priority_class_name != "" %} - priorityClassName: "{{ kiali_vars.deployment.priority_class_name }}" -{% endif %} -{% if kiali_vars.deployment.image_pull_secrets | default([]) | length > 0 %} - imagePullSecrets: -{% for n in kiali_vars.deployment.image_pull_secrets %} - - name: {{ n }} -{% endfor %} -{% endif %} -{% if kiali_vars.deployment.host_aliases|length > 0 %} - hostAliases: - {{ kiali_vars.deployment.host_aliases | to_nice_yaml(indent=0) | trim | indent(8) }} -{% endif %} - containers: - - image: {{ kiali_vars.deployment.image_name }}{{ '@' + kiali_vars.deployment.image_digest if kiali_vars.deployment.image_digest != '' else '' }}:{{ kiali_vars.deployment.image_version }} - imagePullPolicy: {{ kiali_vars.deployment.image_pull_policy }} - name: kiali - command: - - "/opt/kiali/kiali" - - "-config" - - "/kiali-configuration/config.yaml" - securityContext: -{% if kiali_vars.deployment.security_context|length > 0 %} - {{ kiali_vars.deployment.security_context | to_nice_yaml(indent=0) | trim | indent(10) }} -{% endif %} -{% if kiali_vars.deployment.security_context|length == 0 or lookup('env', 'ALLOW_SECURITY_CONTEXT_OVERRIDE') != "true" %} - allowPrivilegeEscalation: false - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: true - capabilities: - drop: - - ALL -{% endif %} - ports: - - name: api-port - containerPort: {{ kiali_vars.server.port }} -{% if kiali_vars.server.observability.metrics.enabled|bool == True %} - - name: http-metrics - containerPort: {{ kiali_vars.server.observability.metrics.port }} -{% endif %} - readinessProbe: - httpGet: - path: {{ kiali_vars.server.web_root | regex_replace('\\/$', '') }}/healthz - port: api-port - scheme: {{ 'HTTP' if kiali_vars.identity.cert_file == "" else 'HTTPS' }} - initialDelaySeconds: 5 - periodSeconds: 30 - livenessProbe: - httpGet: - path: {{ kiali_vars.server.web_root | regex_replace('\\/$', '') }}/healthz - port: api-port - scheme: {{ 'HTTP' if kiali_vars.identity.cert_file == "" else 'HTTPS' }} - initialDelaySeconds: 5 - periodSeconds: 30 - env: - - name: ACTIVE_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LOG_FORMAT - value: "{{ kiali_vars.deployment.logger.log_format }}" - - name: LOG_LEVEL - value: "{{ kiali_vars.deployment.verbose_mode if kiali_vars.deployment.verbose_mode is defined else kiali_vars.deployment.logger.log_level }}" - - name: LOG_SAMPLER_RATE - value: "{{ kiali_vars.deployment.logger.sampler_rate }}" - - name: LOG_TIME_FIELD_FORMAT - value: "{{ kiali_vars.deployment.logger.time_field_format }}" - volumeMounts: - - name: kiali-configuration - mountPath: "/kiali-configuration" -{% if kiali_vars.identity.cert_file == "/kiali-cert/tls.crt" %} - - name: kiali-cert - mountPath: "/kiali-cert" -{% endif %} - - name: kiali-secret - mountPath: "/kiali-secret" - - name: kiali-cabundle - mountPath: "/kiali-cabundle" -{% for sec in kiali_deployment_secret_volumes %} - - name: {{ sec }} - mountPath: "/kiali-override-secrets/{{ sec }}" - readOnly: true -{% endfor %} -{% for secret in kiali_vars.deployment.custom_secrets %} - - name: {{ secret.name }} - mountPath: "{{ secret.mount }}" -{% endfor %} -{% if kiali_vars.deployment.resources|length > 0 %} - resources: - {{ kiali_vars.deployment.resources | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - resources: null -{% endif %} - volumes: - - name: kiali-configuration - configMap: - name: {{ kiali_vars.deployment.instance_name }} -{% if kiali_vars.identity.cert_file == "/kiali-cert/tls.crt" %} - - name: kiali-cert - secret: - secretName: {{ kiali_vars.deployment.instance_name }}-cert-secret -{% endif %} - - name: kiali-secret - secret: - secretName: {{ kiali_vars.deployment.secret_name }} - optional: true - - name: kiali-cabundle - configMap: - name: {{ kiali_vars.deployment.instance_name }}-cabundle -{% for sec in kiali_deployment_secret_volumes %} - - name: {{ sec }} - secret: - secretName: {{ kiali_deployment_secret_volumes[sec].secret_name }} - items: - - key: {{ kiali_deployment_secret_volumes[sec].secret_key }} - path: value.txt - optional: false -{% endfor %} -{% for secret in kiali_vars.deployment.custom_secrets %} - - name: {{ secret.name }} - secret: - secretName: {{ secret.name }} -{% if secret.optional is defined %} - optional: {{ secret.optional }} -{% endif %} -{% endfor %} -{% if kiali_vars.deployment.affinity.node|length > 0 or kiali_vars.deployment.affinity.pod|length > 0 or kiali_vars.deployment.affinity.pod_anti|length > 0 %} - affinity: -{% if kiali_vars.deployment.affinity.node|length > 0 %} - nodeAffinity: - {{ kiali_vars.deployment.affinity.node | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - nodeAffinity: null -{% endif %} -{% if kiali_vars.deployment.affinity.pod|length > 0 %} - podAffinity: - {{ kiali_vars.deployment.affinity.pod | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - podAffinity: null -{% endif %} -{% if kiali_vars.deployment.affinity.pod_anti|length > 0 %} - podAntiAffinity: - {{ kiali_vars.deployment.affinity.pod_anti | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - podAntiAffinity: null -{% endif %} -{% else %} - affinity: null -{% endif %} -{% if kiali_vars.deployment.tolerations|length > 0 %} - tolerations: - {{ kiali_vars.deployment.tolerations | to_nice_yaml(indent=0) | trim | indent(6) }} -{% else %} - tolerations: null -{% endif %} -{% if kiali_vars.deployment.node_selector|length > 0 %} - nodeSelector: - {{ kiali_vars.deployment.node_selector | to_nice_yaml(indent=0) | trim | indent(8) }} -{% else %} - nodeSelector: null -{% endif %} diff --git a/roles/v1.57/kiali-deploy/templates/openshift/hpa.yaml b/roles/v1.57/kiali-deploy/templates/openshift/hpa.yaml deleted file mode 100644 index 2802d2169..000000000 --- a/roles/v1.57/kiali-deploy/templates/openshift/hpa.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{% if kiali_vars.deployment.hpa.spec | length > 0 %} -apiVersion: {{ kiali_vars.deployment.hpa.api_version }} -kind: HorizontalPodAutoscaler -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ kiali_vars.deployment.instance_name }} - {{ kiali_vars.deployment.hpa.spec | to_nice_yaml(indent=0) | trim | indent(2) }} -{% endif %} diff --git a/roles/v1.57/kiali-deploy/templates/openshift/oauth.yaml b/roles/v1.57/kiali-deploy/templates/openshift/oauth.yaml deleted file mode 100644 index 0542513d0..000000000 --- a/roles/v1.57/kiali-deploy/templates/openshift/oauth.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: oauth.openshift.io/v1 -kind: OAuthClient -metadata: - name: {{ kiali_vars.deployment.instance_name }}-{{ kiali_vars.deployment.namespace }} - labels: {{ kiali_resource_metadata_labels }} -redirectURIs: - - {{ kiali_route_url }} -{% if kiali_vars.server.web_port | length > 0 %} - - {{ kiali_route_url }}:{{ kiali_vars.server.web_port }} -{% endif %} -grantMethod: auto -{% if kiali_vars.auth.openshift.token_inactivity_timeout is defined %} -accessTokenInactivityTimeoutSeconds: {{ kiali_vars.auth.openshift.token_inactivity_timeout }} -{% endif %} -{% if kiali_vars.auth.openshift.token_max_age is defined %} -accessTokenMaxAgeSeconds: {{ kiali_vars.auth.openshift.token_max_age }} -{% endif %} diff --git a/roles/v1.57/kiali-deploy/templates/openshift/role-controlplane.yaml b/roles/v1.57/kiali-deploy/templates/openshift/role-controlplane.yaml deleted file mode 100644 index 35ad8b2a9..000000000 --- a/roles/v1.57/kiali-deploy/templates/openshift/role-controlplane.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ kiali_vars.deployment.instance_name }}-controlplane - namespace: "{{ kiali_vars.istio_namespace }}" - labels: {{ kiali_resource_metadata_labels }} -rules: -{% if kiali_vars.kiali_feature_flags.clustering.enabled|bool == True %} -- apiGroups: [""] - resources: - - secrets - verbs: - - list -{% endif %} -{% if kiali_vars.kiali_feature_flags.certificates_information_indicators.enabled|bool == True %} -- apiGroups: [""] - resourceNames: -{% for s in kiali_vars.kiali_feature_flags.certificates_information_indicators.secrets %} - - {{ s }} -{% endfor %} - resources: - - secrets - verbs: - - get - - list - - watch -{% endif %} \ No newline at end of file diff --git a/roles/v1.57/kiali-deploy/templates/openshift/role-viewer.yaml b/roles/v1.57/kiali-deploy/templates/openshift/role-viewer.yaml deleted file mode 100644 index 6324400dd..000000000 --- a/roles/v1.57/kiali-deploy/templates/openshift/role-viewer.yaml +++ /dev/null @@ -1,88 +0,0 @@ -{% for namespace in role_namespaces %} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: {{ role_kind }} -metadata: - name: {{ kiali_vars.deployment.instance_name }}-viewer - namespace: "{{ namespace }}" - labels: {{ kiali_resource_metadata_labels }} -rules: -- apiGroups: [""] - resources: - - configmaps - - endpoints -{% if 'logs-tab' not in kiali_vars.kiali_feature_flags.disabled_features %} - - pods/log -{% endif %} - verbs: - - get - - list - - watch -- apiGroups: [""] - resources: - - namespaces - - pods - - replicationcontrollers - - services - verbs: - - get - - list - - watch -- apiGroups: [""] - resources: - - pods/portforward - verbs: - - create - - post -- apiGroups: ["extensions", "apps"] - resources: - - daemonsets - - deployments - - replicasets - - statefulsets - verbs: - - get - - list - - watch -- apiGroups: ["batch"] - resources: - - cronjobs - - jobs - verbs: - - get - - list - - watch -- apiGroups: - - networking.istio.io - - security.istio.io - - extensions.istio.io - - telemetry.istio.io - - gateway.networking.k8s.io - resources: ["*"] - verbs: - - get - - list - - watch -- apiGroups: ["apps.openshift.io"] - resources: - - deploymentconfigs - verbs: - - get - - list - - watch -- apiGroups: ["project.openshift.io"] - resources: - - projects - verbs: - - get -- apiGroups: ["route.openshift.io"] - resources: - - routes - verbs: - - get -- apiGroups: ["authentication.k8s.io"] - resources: - - tokenreviews - verbs: - - create -{% endfor %} diff --git a/roles/v1.57/kiali-deploy/templates/openshift/role.yaml b/roles/v1.57/kiali-deploy/templates/openshift/role.yaml deleted file mode 100644 index 788ab151a..000000000 --- a/roles/v1.57/kiali-deploy/templates/openshift/role.yaml +++ /dev/null @@ -1,95 +0,0 @@ -{% for namespace in role_namespaces %} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: {{ role_kind }} -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ namespace }}" - labels: {{ kiali_resource_metadata_labels }} -rules: -- apiGroups: [""] - resources: - - configmaps - - endpoints -{% if 'logs-tab' not in kiali_vars.kiali_feature_flags.disabled_features %} - - pods/log -{% endif %} - verbs: - - get - - list - - watch -- apiGroups: [""] - resources: - - namespaces - - pods - - replicationcontrollers - - services - verbs: - - get - - list - - watch - - patch -- apiGroups: [""] - resources: - - pods/portforward - verbs: - - create - - post -- apiGroups: ["extensions", "apps"] - resources: - - daemonsets - - deployments - - replicasets - - statefulsets - verbs: - - get - - list - - watch - - patch -- apiGroups: ["batch"] - resources: - - cronjobs - - jobs - verbs: - - get - - list - - watch - - patch -- apiGroups: - - networking.istio.io - - security.istio.io - - extensions.istio.io - - telemetry.istio.io - - gateway.networking.k8s.io - resources: ["*"] - verbs: - - get - - list - - watch - - create - - delete - - patch -- apiGroups: ["apps.openshift.io"] - resources: - - deploymentconfigs - verbs: - - get - - list - - watch - - patch -- apiGroups: ["project.openshift.io"] - resources: - - projects - verbs: - - get -- apiGroups: ["route.openshift.io"] - resources: - - routes - verbs: - - get -- apiGroups: ["authentication.k8s.io"] - resources: - - tokenreviews - verbs: - - create -{% endfor %} diff --git a/roles/v1.57/kiali-deploy/templates/openshift/rolebinding-controlplane.yaml b/roles/v1.57/kiali-deploy/templates/openshift/rolebinding-controlplane.yaml deleted file mode 100644 index 583ad214e..000000000 --- a/roles/v1.57/kiali-deploy/templates/openshift/rolebinding-controlplane.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ kiali_vars.deployment.instance_name }}-controlplane - namespace: "{{ kiali_vars.istio_namespace }}" - labels: {{ kiali_resource_metadata_labels }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ kiali_vars.deployment.instance_name }}-controlplane -subjects: -- kind: ServiceAccount - name: {{ kiali_vars.deployment.instance_name }}-service-account - namespace: "{{ kiali_vars.deployment.namespace }}" diff --git a/roles/v1.57/kiali-deploy/templates/openshift/rolebinding.yaml b/roles/v1.57/kiali-deploy/templates/openshift/rolebinding.yaml deleted file mode 100644 index 274cdfcd0..000000000 --- a/roles/v1.57/kiali-deploy/templates/openshift/rolebinding.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{% for namespace in role_namespaces %} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: {{ role_binding_kind }} -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ namespace }}" - labels: {{ kiali_resource_metadata_labels }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: {{ role_kind }} - name: {{ (kiali_vars.deployment.instance_name + '-viewer') if kiali_vars.deployment.view_only_mode|bool == True else kiali_vars.deployment.instance_name }} -subjects: -- kind: ServiceAccount - name: {{ kiali_vars.deployment.instance_name }}-service-account - namespace: "{{ kiali_vars.deployment.namespace }}" -{% endfor %} diff --git a/roles/v1.57/kiali-deploy/templates/openshift/route.yaml b/roles/v1.57/kiali-deploy/templates/openshift/route.yaml deleted file mode 100644 index a8156d375..000000000 --- a/roles/v1.57/kiali-deploy/templates/openshift/route.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_vars.deployment.ingress.additional_labels | combine(kiali_resource_metadata_labels) }} -{% if kiali_vars.deployment.ingress.override_yaml is defined and kiali_vars.deployment.ingress.override_yaml.metadata is defined and kiali_vars.deployment.ingress.override_yaml.metadata.annotations is defined %} - {{ kiali_vars.deployment.ingress.override_yaml.metadata | to_nice_yaml(indent=0) | trim | indent(2) }} -{% endif %} -spec: -{% if kiali_vars.deployment.ingress.override_yaml is defined and kiali_vars.deployment.ingress.override_yaml.spec is defined %} - {{ kiali_vars.deployment.ingress.override_yaml.spec | to_nice_yaml(indent=0) | trim | indent(2) }} -{% else %} - tls: - termination: reencrypt - insecureEdgeTerminationPolicy: Redirect - to: - kind: Service - name: {{ kiali_vars.deployment.instance_name }} - port: - targetPort: {{ kiali_vars.server.port }} -{% endif %} diff --git a/roles/v1.57/kiali-deploy/templates/openshift/service.yaml b/roles/v1.57/kiali-deploy/templates/openshift/service.yaml deleted file mode 100644 index 4520e746f..000000000 --- a/roles/v1.57/kiali-deploy/templates/openshift/service.yaml +++ /dev/null @@ -1,32 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} - annotations: - service.beta.openshift.io/serving-cert-secret-name: {{ kiali_vars.deployment.instance_name }}-cert-secret -{% if kiali_vars.deployment.service_annotations|length > 0 %} - {{ kiali_vars.deployment.service_annotations | to_nice_yaml(indent=0) | trim | indent(4) }} -{% endif %} -spec: -{% if kiali_vars.deployment.service_type is defined %} - type: {{ kiali_vars.deployment.service_type }} -{% endif %} - ports: - - name: {{ 'http' if kiali_vars.identity.cert_file == "" else 'tcp' }} - protocol: TCP - port: {{ kiali_vars.server.port }} -{% if kiali_vars.server.observability.metrics.enabled|bool == True %} - - name: http-metrics - protocol: TCP - port: {{ kiali_vars.server.observability.metrics.port }} -{% endif %} - selector: -{% if query(k8s_plugin, kind='Service', resource_name=kiali_vars.deployment.instance_name, namespace=kiali_vars.deployment.namespace) | length > 0 %} - app: null - version: null -{% endif %} - app.kubernetes.io/name: kiali - app.kubernetes.io/instance: {{ kiali_vars.deployment.instance_name }} - {% if kiali_vars.deployment.additional_service_yaml is defined %}{{ kiali_vars.deployment.additional_service_yaml | to_nice_yaml(indent=0) | trim | indent(2) }}{% endif %} diff --git a/roles/v1.57/kiali-deploy/templates/openshift/serviceaccount.yaml b/roles/v1.57/kiali-deploy/templates/openshift/serviceaccount.yaml deleted file mode 100644 index 5feedce14..000000000 --- a/roles/v1.57/kiali-deploy/templates/openshift/serviceaccount.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ kiali_vars.deployment.instance_name }}-service-account - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} diff --git a/roles/v1.57/kiali-deploy/vars/main.yml b/roles/v1.57/kiali-deploy/vars/main.yml deleted file mode 100644 index 0931404f6..000000000 --- a/roles/v1.57/kiali-deploy/vars/main.yml +++ /dev/null @@ -1,110 +0,0 @@ -# These are the actual variables used by the role. You will notice it is -# one big dictionary (key="kiali_vars") whose child dictionaries mimic those -# as defined in defaults/main.yml. -# The child dictionaries below will have values that are a combination of the default values -# (as found in defaults/main.yaml) and user-supplied values. -# Without this magic, a user supplying only one key/value pair in a child dictionary will -# clear out (make undefined) all the rest of the key/value pairs in that child dictionary. -# This is not what we want. We want the rest of the dictionary to keep the defaults, -# thus allowing the user to override only a subset of key/values in a dictionary. -# -# I found this trick at https://groups.google.com/forum/#!topic/Ansible-project/pGbRYZyqxZ4 -# I tweeked that solution a little bit because I did not want to require the user to supply -# everything under a main "kiali_vars" dictionary. - -kiali_vars: - installation_tag: "{{ installation_tag | default(kiali_defaults.installation_tag) }}" - istio_namespace: "{{ istio_namespace | default(kiali_defaults.istio_namespace) }}" - version: "{{ version | default(kiali_defaults.version) }}" - - additional_display_details: | - {%- if additional_display_details is defined and additional_display_details is iterable -%} - {{ additional_display_details }} - {%- else -%} - {{ kiali_defaults.additional_display_details }} - {%- endif -%} - - api: | - {%- if api is defined and api is iterable -%} - {{ kiali_defaults.api | combine((api | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.api }} - {%- endif -%} - - auth: | - {%- if auth is defined and auth is iterable -%} - {{ kiali_defaults.auth | combine((auth | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.auth }} - {%- endif -%} - - custom_dashboards: | - {%- if custom_dashboards is defined and custom_dashboards is iterable -%} - {{ custom_dashboards }} - {%- else -%} - {{ kiali_defaults.custom_dashboards }} - {%- endif -%} - - deployment: | - {%- if deployment is defined and deployment is iterable -%} - {{ kiali_defaults.deployment | combine((deployment | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.deployment }} - {%- endif -%} - - external_services: | - {%- if external_services is defined and external_services is iterable -%} - {{ kiali_defaults.external_services | combine((external_services | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.external_services }} - {%- endif -%} - - health_config: | - {%- if health_config is defined and health_config is iterable -%} - {{ kiali_defaults.health_config | combine((health_config | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.health_config }} - {%- endif -%} - - identity: | - {%- if identity is defined and identity is iterable -%} - {{ kiali_defaults.identity | combine((identity | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.identity }} - {%- endif -%} - - istio_labels: | - {%- if istio_labels is defined and istio_labels is iterable -%} - {{ kiali_defaults.istio_labels | combine((istio_labels | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.istio_labels }} - {%- endif -%} - - kiali_feature_flags: | - {%- if kiali_feature_flags is defined and kiali_feature_flags is iterable -%} - {{ kiali_defaults.kiali_feature_flags | combine((kiali_feature_flags | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.kiali_feature_flags }} - {%- endif -%} - - kubernetes_config: | - {%- if kubernetes_config is defined and kubernetes_config is iterable -%} - {{ kiali_defaults.kubernetes_config | combine((kubernetes_config | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.kubernetes_config }} - {%- endif -%} - - login_token: | - {%- if login_token is defined and login_token is iterable -%} - {{ kiali_defaults.login_token | combine((login_token | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.login_token }} - {%- endif -%} - - server: | - {%- if server is defined and server is iterable -%} - {{ kiali_defaults.server | combine((server | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.server }} - {%- endif -%} - diff --git a/roles/v1.57/kiali-remove/defaults/main.yml b/roles/v1.57/kiali-remove/defaults/main.yml deleted file mode 100644 index 312dd9f6b..000000000 --- a/roles/v1.57/kiali-remove/defaults/main.yml +++ /dev/null @@ -1,12 +0,0 @@ -kiali_defaults_remove: - istio_namespace: "" - - deployment: - accessible_namespaces: [] - hpa: - api_version: "" - instance_name: "kiali" - -# Will be auto-detected, but for debugging purposes you can force one of these to true -is_k8s: false -is_openshift: false diff --git a/roles/v1.57/kiali-remove/filter_plugins/stripnone.py b/roles/v1.57/kiali-remove/filter_plugins/stripnone.py deleted file mode 100644 index 4dbd53033..000000000 --- a/roles/v1.57/kiali-remove/filter_plugins/stripnone.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} - -# Process recursively the given value if it is a dict and remove all keys that have a None value -def strip_none(value): - if isinstance(value, dict): - dicts = {} - for k,v in value.items(): - if isinstance(v, dict): - dicts[k] = strip_none(v) - elif v is not None: - dicts[k] = v - return dicts - else: - return value - -# ---- Ansible filters ---- -class FilterModule(object): - def filters(self): - return { - 'stripnone': strip_none - } diff --git a/roles/v1.57/kiali-remove/meta/main.yml b/roles/v1.57/kiali-remove/meta/main.yml deleted file mode 100644 index e9334e3c7..000000000 --- a/roles/v1.57/kiali-remove/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -collections: -- kubernetes.core diff --git a/roles/v1.57/kiali-remove/tasks/main.yml b/roles/v1.57/kiali-remove/tasks/main.yml deleted file mode 100644 index 3c472f40e..000000000 --- a/roles/v1.57/kiali-remove/tasks/main.yml +++ /dev/null @@ -1,290 +0,0 @@ -# These tasks remove all Kiali resources such that no remnants of Kiali will remain. -# -# Note that we ignore_errors everywhere - we do not want these tasks to ever abort with a failure. -# This is because these are run within a finalizer and if a failure aborts any task here -# the user will never be able to delete the Kiali CR - in fact, the delete will hang indefinitely -# and the user will need to do an ugly hack to fix it. - -- ignore_errors: yes - set_fact: - k8s_plugin: kubernetes.core.k8s - -- name: Get the original CR that was deleted - ignore_errors: yes - set_fact: - current_cr: "{{ _kiali_io_kiali }}" - -- name: Get information about the cluster - ignore_errors: yes - set_fact: - api_groups: "{{ lookup(k8s_plugin, cluster_info='api_groups') }}" - when: - - is_openshift == False - - is_k8s == False - -- name: Determine the cluster type - ignore_errors: yes - set_fact: - is_openshift: "{{ True if 'route.openshift.io' in api_groups else False }}" - is_k8s: "{{ False if 'route.openshift.io' in api_groups else True }}" - when: - - is_openshift == False - - is_k8s == False - -# Indicate what kind of cluster we are in (OpenShift or Kubernetes). -- ignore_errors: yes - debug: - msg: "CLUSTER TYPE: is_openshift={{ is_openshift }}; is_k8s={{ is_k8s }}" - -- name: Print some debug information - ignore_errors: yes - vars: - msg: | - Kiali Variables: - -------------------------------- - {{ kiali_vars_remove | to_nice_yaml }} - debug: - msg: "{{ msg.split('\n') }}" - -- name: Set default HPA api_version - ignore_errors: yes - set_fact: - kiali_vars_remove: "{{ kiali_vars_remove | combine({'deployment': {'hpa': {'api_version': 'autoscaling/v2' if lookup(k8s_plugin, api_version='autoscaling/v2', kind='horizontalpodautoscalers', errors='ignore') | type_debug == 'list' else 'autoscaling/v2beta2' }}}, recursive=True) }}" - when: - - kiali_vars_remove.deployment.hpa.api_version == "" - -# There is an edge case where a user installed Kiali with one instance name, then changed the instance name in the CR. -# This is not allowed. When this happens, the operator will abort with an error message telling the user to uninstall Kiali. -# The user will do this by deleting the Kiali CR, at which time this ansible role is executed. -# In this case we must use the instance name stored in the status not the spec because the spec will have the bad name -# and the status will have the correct name that was used to initially install Kiali. -- name: Ensure the correct instance_name is used - ignore_errors: yes - set_fact: - kiali_vars_remove: "{{ kiali_vars_remove | combine({'deployment': {'instance_name': current_cr.status.deployment.instanceName}}, recursive=True) }}" - when: - - current_cr.status is defined - - current_cr.status.deployment is defined - - current_cr.status.deployment.instanceName is defined - - current_cr.status.deployment.instanceName != kiali_vars_remove.deployment.instance_name - -- name: Set default deployment namespace to the same namespace where the CR lives - ignore_errors: yes - set_fact: - kiali_vars_remove: "{{ kiali_vars_remove | combine({'deployment': {'namespace': current_cr.metadata.namespace}}, recursive=True) }}" - when: - - kiali_vars_remove.deployment.namespace is not defined or kiali_vars_remove.deployment.namespace == "" - -- name: Set default istio namespace - ignore_errors: yes - set_fact: - kiali_vars_remove: "{{ kiali_vars_remove | combine({'istio_namespace': kiali_vars_remove.deployment.namespace}, recursive=True) }}" - when: - - kiali_vars_remove.istio_namespace == "" - -- name: Find all namespaces (this is limited to what the operator has permission to see) - ignore_errors: yes - set_fact: - all_namespaces: "{{ lookup(k8s_plugin, api_version='v1', kind='Namespace') | default({}) | json_query('[].metadata.name') }}" - -# When the Operator installed Kiali, the configmap has accessible_namespaces set. -# There are no regexes in the configmap; they are all full namespace names. -# NOTE: there is a special value of accessible_namespaces of two asterisks ("**") -# which indicates Kiali is given access to all namespaces via a single cluster role -# not individual roles in each accessible namespace. - -- name: Find current configmap, if it exists - ignore_errors: yes - set_fact: - current_configmap: "{{ lookup(k8s_plugin, resource_name=kiali_vars_remove.deployment.instance_name, namespace=kiali_vars_remove.deployment.namespace, api_version='v1', kind='ConfigMap') }}" -- name: Find currently accessible namespaces - ignore_errors: yes - set_fact: - current_accessible_namespaces: "{{ current_configmap.data['config.yaml'] | from_yaml | json_query('deployment.accessible_namespaces') }}" - when: - - current_configmap is defined - - current_configmap.data is defined - - current_configmap.data['config.yaml'] is defined - -- name: Delete all additional Kiali roles in current accessible namespaces - ignore_errors: yes - k8s: - state: absent - definition: | - {% for namespace in current_accessible_namespaces %} - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: "{{ kiali_vars_remove.deployment.instance_name }}" - namespace: "{{ namespace }}" - ... - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ kiali_vars_remove.deployment.instance_name }}" - namespace: "{{ namespace }}" - ... - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ kiali_vars_remove.deployment.instance_name }}-viewer" - namespace: "{{ namespace }}" - ... - {% endfor %} - when: - - current_accessible_namespaces is defined - - '"**" not in current_accessible_namespaces' - -- name: Find currently configured label selector - ignore_errors: yes - set_fact: - current_label_selector: "{{ current_configmap.data['config.yaml'] | from_yaml | json_query('api.namespaces.label_selector') }}" - when: - - current_configmap is defined - - current_configmap.data is defined - - current_configmap.data['config.yaml'] is defined - -- name: Remove Kiali label from namespaces found in current accessible namespaces - ignore_errors: yes - vars: - # everything to the left of the = is the name of the label we want to remove - the_namespace_label_name: "{{ current_label_selector | regex_replace('^(.*)=.*$', '\\1') }}" - # if a namespace happened to have been deleted, we do not want to (nor can we) resurrect it, hence we use state=patched - k8s: - state: patched - definition: | - {% for namespace in current_accessible_namespaces %} - --- - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ namespace }}" - labels: - {{ the_namespace_label_name }}: null - ... - {% endfor %} - when: - - current_accessible_namespaces is defined - - '"**" not in current_accessible_namespaces' - - current_label_selector is defined - -- name: Delete Kiali cluster roles - ignore_errors: yes - include_tasks: remove-clusterroles.yml - when: - - current_accessible_namespaces is defined - - '"**" in current_accessible_namespaces' - -- name: Delete Kiali resources - ignore_errors: yes - k8s: - state: absent - api_version: "{{ k8s_item.apiVersion }}" - kind: "{{ k8s_item.kind }}" - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - name: "{{ k8s_item.metadata.name }}" - register: delete_result - until: delete_result.result == {} or (delete_result.result.status is defined and delete_result.result.status == "Success") - retries: 6 - delay: 10 - when: - - k8s_item is defined - - k8s_item.apiVersion is defined - - k8s_item.kind is defined - - k8s_item.metadata is defined - - k8s_item.metadata.name is defined - with_items: - - "{{ query(k8s_plugin, namespace=kiali_vars_remove.deployment.namespace, kind='HorizontalPodAutoscaler', resource_name=kiali_vars_remove.deployment.instance_name, api_version=kiali_vars_remove.deployment.hpa.api_version) }}" - - "{{ query(k8s_plugin, namespace=kiali_vars_remove.deployment.namespace, kind='Ingress', resource_name=kiali_vars_remove.deployment.instance_name, api_version='networking.k8s.io/' + ('v1' if (lookup(k8s_plugin, kind='Ingress', api_version='networking.k8s.io/v1', errors='ignore') is iterable) else 'v1beta1')) }}" - - "{{ query(k8s_plugin, namespace=kiali_vars_remove.deployment.namespace, kind='Deployment', resource_name=kiali_vars_remove.deployment.instance_name, api_version='apps/v1') }}" - - "{{ query(k8s_plugin, namespace=kiali_vars_remove.deployment.namespace, kind='ReplicaSet', resource_name=kiali_vars_remove.deployment.instance_name, api_version='v1') }}" - - "{{ query(k8s_plugin, namespace=kiali_vars_remove.deployment.namespace, kind='Pod', resource_name=kiali_vars_remove.deployment.instance_name, api_version='v1') }}" - - "{{ query(k8s_plugin, namespace=kiali_vars_remove.deployment.namespace, kind='Service', resource_name=kiali_vars_remove.deployment.instance_name, api_version='v1') }}" - - "{{ query(k8s_plugin, namespace=kiali_vars_remove.deployment.namespace, kind='ServiceAccount', resource_name=kiali_vars_remove.deployment.instance_name + '-service-account', api_version='v1') }}" - - "{{ query(k8s_plugin, namespace=kiali_vars_remove.deployment.namespace, kind='RoleBinding', resource_name=kiali_vars_remove.deployment.instance_name, api_version='rbac.authorization.k8s.io/v1') }}" - - "{{ query(k8s_plugin, namespace=kiali_vars_remove.deployment.namespace, kind='Role', resource_name=kiali_vars_remove.deployment.instance_name, api_version='rbac.authorization.k8s.io/v1') }}" - - "{{ query(k8s_plugin, namespace=kiali_vars_remove.deployment.namespace, kind='Role', resource_name=kiali_vars_remove.deployment.instance_name + '-viewer', api_version='rbac.authorization.k8s.io/v1') }}" - - "{{ query(k8s_plugin, namespace=kiali_vars_remove.deployment.namespace, kind='ConfigMap', resource_name=kiali_vars_remove.deployment.instance_name, api_version='v1') }}" - - "{{ query(k8s_plugin, namespace=kiali_vars_remove.istio_namespace, kind='RoleBinding', resource_name=kiali_vars_remove.deployment.instance_name + '-controlplane', api_version='rbac.authorization.k8s.io/v1') }}" - - "{{ query(k8s_plugin, namespace=kiali_vars_remove.istio_namespace, kind='Role', resource_name=kiali_vars_remove.deployment.instance_name + '-controlplane', api_version='rbac.authorization.k8s.io/v1') }}" - loop_control: - loop_var: k8s_item - -- name: Unlabel the signing key secret if it exists to indicate this Kiali instance no longer uses it - ignore_errors: yes - vars: - doomed_label: "{{ 'kiali.io/' + ((kiali_vars_remove.deployment.instance_name + '.') if kiali_vars_remove.deployment.instance_name != 'kiali' else '') + 'member-of' }}" - k8s: - state: present - definition: | - apiVersion: "{{ k8s_item.apiVersion }}" - kind: "{{ k8s_item.kind }}" - metadata: - name: "{{ k8s_item.metadata.name }}" - namespace: "{{ k8s_item.metadata.namespace }}" - labels: - {{ doomed_label }}: null - with_items: - - "{{ query(k8s_plugin, namespace=kiali_vars_remove.deployment.namespace, kind='Secret', resource_name='kiali-signing-key', api_version='v1') }}" - loop_control: - loop_var: k8s_item - -- name: Delete the signing key secret if no other Kiali installation is using it - ignore_errors: yes - vars: - signing_key_secret_labels: "{{ lookup(k8s_plugin, namespace=kiali_vars_remove.deployment.namespace, kind='Secret', resource_name='kiali-signing-key', api_version='v1') | default({}) | json_query('metadata.labels') }}" - k8s: - state: absent - definition: - apiVersion: v1 - kind: Secret - metadata: - name: kiali-signing-key - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - when: - - (signing_key_secret_labels is not defined) or (signing_key_secret_labels | length == 0) or (signing_key_secret_labels | dict2items | selectattr('key', 'match', 'kiali.io/.*member-of') | list | length == 0) - -- name: Delete OpenShift-specific Kiali resources - ignore_errors: yes - k8s: - state: absent - api_version: "{{ os_item.apiVersion }}" - kind: "{{ os_item.kind }}" - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - name: "{{ os_item.metadata.name }}" - register: delete_result - until: delete_result.result is defined - retries: 10 - delay: 2 - when: - - is_openshift == True - - os_item is defined - - os_item.apiVersion is defined - - os_item.kind is defined - - os_item.metadata is defined - - os_item.metadata.name is defined - with_items: - - "{{ query(k8s_plugin, kind='OAuthClient', resource_name=kiali_vars_remove.deployment.instance_name + '-' + kiali_vars_remove.deployment.namespace, api_version='oauth.openshift.io/v1') if is_openshift == True else [] }}" - - "{{ query(k8s_plugin, namespace=kiali_vars_remove.deployment.namespace, kind='Route', resource_name=kiali_vars_remove.deployment.instance_name, api_version='route.openshift.io/v1') if is_openshift == True else [] }}" - - "{{ query(k8s_plugin, namespace=kiali_vars_remove.deployment.namespace, kind='ConfigMap', resource_name=kiali_vars_remove.deployment.instance_name + '-cabundle', api_version='v1') if is_openshift == True else [] }}" - loop_control: - loop_var: os_item - -- name: Delete OpenShift-specific Kiali ConsoleLinks - ignore_errors: yes - k8s: - state: absent - definition: | - {% for cl in query(k8s_plugin, kind='ConsoleLink', label_selector='kiali.io/home=' + ((kiali_vars_remove.deployment.instance_name + '.') if kiali_vars_remove.deployment.instance_name != 'kiali' else '') + kiali_vars_remove.deployment.namespace) %} - --- - apiVersion: "{{ cl.apiVersion }}" - kind: "{{ cl.kind }}" - metadata: - name: "{{ cl.metadata.name }}" - ... - {% endfor %} - when: - - is_openshift == True diff --git a/roles/v1.57/kiali-remove/tasks/remove-clusterroles.yml b/roles/v1.57/kiali-remove/tasks/remove-clusterroles.yml deleted file mode 100644 index 2fbb29cf3..000000000 --- a/roles/v1.57/kiali-remove/tasks/remove-clusterroles.yml +++ /dev/null @@ -1,24 +0,0 @@ -- name: "Delete Kiali cluster roles" - ignore_errors: yes - k8s: - state: absent - api_version: "{{ k8s_item.apiVersion }}" - kind: "{{ k8s_item.kind }}" - name: "{{ k8s_item.metadata.name }}" - register: delete_result - until: delete_result.result == {} or (delete_result.result.status is defined and delete_result.result.status == "Success") - retries: 6 - delay: 10 - when: - - is_openshift == True or is_k8s == True - - k8s_item is defined - - k8s_item.apiVersion is defined - - k8s_item.kind is defined - - k8s_item.metadata is defined - - k8s_item.metadata.name is defined - with_items: - - "{{ query(k8s_plugin, kind='ClusterRoleBinding', resource_name=kiali_vars_remove.deployment.instance_name, api_version='rbac.authorization.k8s.io/v1') }}" - - "{{ query(k8s_plugin, kind='ClusterRole', resource_name=kiali_vars_remove.deployment.instance_name, api_version='rbac.authorization.k8s.io/v1') }}" - - "{{ query(k8s_plugin, kind='ClusterRole', resource_name=kiali_vars_remove.deployment.instance_name + '-viewer', api_version='rbac.authorization.k8s.io/v1') }}" - loop_control: - loop_var: k8s_item diff --git a/roles/v1.57/kiali-remove/vars/main.yml b/roles/v1.57/kiali-remove/vars/main.yml deleted file mode 100644 index 4771d535d..000000000 --- a/roles/v1.57/kiali-remove/vars/main.yml +++ /dev/null @@ -1,9 +0,0 @@ -kiali_vars_remove: - istio_namespace: "{{ istio_namespace | default(kiali_defaults_remove.istio_namespace) }}" - - deployment: | - {%- if deployment is defined and deployment is iterable -%} - {{ kiali_defaults_remove.deployment | combine((deployment | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults_remove.deployment }} - {%- endif -%} diff --git a/roles/v2.1/kiali-deploy/defaults/main.yml b/roles/v2.1/kiali-deploy/defaults/main.yml deleted file mode 100644 index 72e2d9c93..000000000 --- a/roles/v2.1/kiali-deploy/defaults/main.yml +++ /dev/null @@ -1,341 +0,0 @@ -# Defaults for all user-facing Kiali settings. -# -# Note that these are under the main dictionary group "kiali_defaults". -# The actual vars used by the role are found in the vars/ directory. -# These defaults (the dictionaries under "kiali_defaults") are merged into the vars such that the values -# below (e.g. deployment, server, etc.) are merged in rather than completely replaced by user-supplied values. -# -# If new groups are added to these defaults, you must remember to add the merge code to vars/main.yml. - -kiali_defaults: - installation_tag: "" - istio_namespace: "" - version: "default" - - additional_display_details: - - title: "API Documentation" - annotation: "kiali.io/api-spec" - icon_annotation: "kiali.io/api-type" - - auth: - openid: - additional_request_params: {} - allowed_domains: [] - api_proxy: "" - api_proxy_ca_data: "" - api_token: "id_token" - authentication_timeout: 300 - authorization_endpoint: "" - client_id: "" - disable_rbac: false - http_proxy: "" - https_proxy: "" - insecure_skip_verify_tls: false - issuer_uri: "" - scopes: ["openid", "profile", "email"] - username_claim: "sub" - openshift: - #token_inactivity_timeout: - #token_max_age: - strategy: "" - - clustering: - autodetect_secrets: - enabled: true - label: "kiali.io/multiCluster=true" - clusters: [] - kiali_urls: [] - - custom_dashboards: [] - - deployment: - #additional_service_yaml: - affinity: - node: {} - pod: {} - pod_anti: {} - cluster_wide_access: true - configmap_annotations: {} - custom_envs: [] - custom_secrets: [] - discovery_selectors: {} - dns: - config: {} - policy: "" - host_aliases: [] - hpa: - api_version: "" - spec: {} - image_digest: "" - image_name: "" - image_pull_policy: "IfNotPresent" - image_pull_secrets: [] - image_version: "" - ingress: - additional_labels: {} - class_name: "nginx" - #enabled: - #override_yaml: - instance_name: "kiali" - logger: - log_format: "text" - log_level: "info" - sampler_rate: "1" - time_field_format: "2006-01-02T15:04:05Z07:00" - namespace: "" - node_selector: {} - pod_annotations: {} - pod_labels: {} - priority_class_name: "" - replicas: 1 - #resources: - secret_name: "kiali" - security_context: {} - service_annotations: {} - #service_type: "NodePort" - tolerations: [] - version_label: "" - view_only_mode: false - - extensions: [] - - external_services: - custom_dashboards: - discovery_auto_threshold: 10 - discovery_enabled: "auto" - enabled: true - is_core: false - namespace_label: "" - prometheus: - auth: - ca_file: "" - insecure_skip_verify: false - password: "" - token: "" - type: "none" - use_kiali_token: false - username: "" - cache_duration: 7 - cache_enabled: true - cache_expiration: 300 - custom_headers: {} - health_check_url: "" - is_core: true - query_scope: {} - thanos_proxy: - enabled: false - retention_period: "7d" - scrape_interval: "30s" - url: "" - grafana: - auth: - ca_file: "" - insecure_skip_verify: false - password: "" - token: "" - type: "none" - use_kiali_token: false - username: "" - dashboards: - - name: "Istio Service Dashboard" - variables: - namespace: "var-namespace" - service: "var-service" - - name: "Istio Workload Dashboard" - variables: - namespace: "var-namespace" - workload: "var-workload" - - name: "Istio Mesh Dashboard" - - name: "Istio Control Plane Dashboard" - - name: "Istio Performance Dashboard" - - name: "Istio Wasm Extension Dashboard" - enabled: true - external_url: "" - health_check_url: "" - #internal_url - is_core: false - istio: - component_status: - enabled: true - egress_gateway_namespace: "" - envoy_admin_local_port: 15000 - gateway_api_classes: [] - ingress_gateway_namespace: "" - istio_api_enabled: true - #istio_canary_revision: - #current: prod - #upgrade: canary - istio_identity_domain: "svc.cluster.local" - istio_injection_annotation: "sidecar.istio.io/inject" - istio_sidecar_annotation: "sidecar.istio.io/status" - istio_sidecar_injector_config_map_name: "istio-sidecar-injector" - istiod_pod_monitoring_port: 15014 - root_namespace: "" - prometheus: - auth: - ca_file: "" - insecure_skip_verify: false - password: "" - token: "" - type: "none" - use_kiali_token: false - username: "" - cache_duration: 7 - cache_enabled: true - cache_expiration: 300 - custom_headers: {} - health_check_url: "" - is_core: true - query_scope: {} - thanos_proxy: - enabled: false - retention_period: "7d" - scrape_interval: "30s" - url: "" - tracing: - auth: - ca_file: "" - insecure_skip_verify: false - password: "" - token: "" - type: "none" - use_kiali_token: false - username: "" - custom_headers: {} - enabled: false - external_url: "" - grpc_port: 9095 - health_check_url: "" - internal_url: "" - is_core: false - namespace_selector: true - provider: "jaeger" - query_scope: {} - query_timeout: 5 - tempo_config: - datasource_uid: "" - org_id: "" - url_format: "" - use_grpc: true - whitelist_istio_system: ["jaeger-query", "istio-ingressgateway"] - - health_config: - rate: [] - - identity: {} - #cert_file: - #private_key_file: - - istio_labels: - app_label_name: "app" - egress_gateway_label: "istio=egressgateway" - ingress_gateway_label: "istio=ingressgateway" - injection_label_name: "istio-injection" - injection_label_rev: "istio.io/rev" - version_label_name: "version" - - kiali_feature_flags: - disabled_features: [] - istio_annotation_action: true - istio_injection_action: true - istio_upgrade_action: false - ui_defaults: - graph: - find_options: - - auto_select: false - description: "Find: slow edges (> 1s)" - expression: "rt > 1000" - - auto_select: false - description: "Find: unhealthy nodes" - expression: "! healthy" - - auto_select: false - description: "Find: unknown nodes" - expression: "name = unknown" - - auto_select: false - description: "Find: nodes with the 2 top rankings" - expression: "rank <= 2" - hide_options: - - auto_select: false - description: "Hide: healthy nodes" - expression: "healthy" - - auto_select: false - description: "Hide: unknown nodes" - expression: "name = unknown" - - auto_select: false - description: "Hide: nodes ranked lower than the 2 top rankings" - expression: "rank > 2" - settings: - font_label: 13 - min_font_badge: 7 - min_font_label: 10 - traffic: - grpc: "requests" - http: "requests" - tcp: "sent" - list: - include_health: true - include_istio_resources: true - include_validations: true - show_include_toggles: false - metrics_inbound: - aggregations: [] - metrics_outbound: - aggregations: [] - metrics_per_refresh: "1m" - namespaces: [] - refresh_interval: "60s" - validations: - ignore: ["KIA1301"] - skip_wildcard_gateway_hosts: false - - kubernetes_config: - burst: 200 - cache_duration: 300 - cache_token_namespace_duration: 10 - cluster_name: "" - excluded_workloads: - - "CronJob" - - "DeploymentConfig" - - "Job" - - "ReplicationController" - qps: 175 - - login_token: - expiration_seconds: 86400 - signing_key: "" - - server: - address: "" - audit_log: true - cors_allow_all: false - gzip_enabled: true - #node_port: - observability: - metrics: - enabled: true - port: 9090 - tracing: - collector_type: "jaeger" - collector_url: http://jaeger-collector.istio-system:14268/api/traces - enabled: false - otel: - ca_name: "" - protocol: "http" - skip_verify: false - tls_enabled: false - port: 20001 - profiler: - enabled: false - web_fqdn: "" - web_history_mode: "" - web_port: "" - web_root: "" - web_schema: "" - write_timeout: 30 - -# These variables are outside of the kiali_defaults. Their values will be -# auto-detected by the role and are not meant to be set by the user. -# However, for debugging purposes you can change these. - -is_k8s: false -is_openshift: false diff --git a/roles/v2.1/kiali-deploy/filter_plugins/parse_selectors.py b/roles/v2.1/kiali-deploy/filter_plugins/parse_selectors.py deleted file mode 100644 index ee44f1cde..000000000 --- a/roles/v2.1/kiali-deploy/filter_plugins/parse_selectors.py +++ /dev/null @@ -1,110 +0,0 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.errors import AnsibleFilterError - -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} - -# Given a list of label selectors in the standard k8s format, convert to the format that the k8s ansible collection wants. -# For example, given this input: -# - matchLabels: -# foo: bar -# - matchLabels: -# color: blue -# matchExpressions: -# - key: region -# operator: In -# values: -# - east -# - west -# an array will be returned with two items. -# The first is a list with one item that is "foo=bar". -# The second is a list with two items. The first item being "color=blue" and the second item being "region in (east, west)" -# -# See: -# * https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors -# * https://docs.ansible.com/ansible/latest/collections/kubernetes/core/k8s_info_module.html#parameter-label_selectors -def parse_selectors(value): - # these are the selectors that should be OR'ed together - this is the final result returned back from this function - selectorOr = [] - selectorOrIndex = 0 - - # for each item in the selectors list, there can be one matchLabels and one matchExpressions (both can be there, or just one of them). - for selectors in value: - selectorOr.append([]) - - # process the matchLabels (or matchLabels) - each results in "labelName=labelValue" strings - matchLabelsString = "matchLabels" - if matchLabelsString in selectors: - if (selectors[matchLabelsString] is None) or (len(selectors[matchLabelsString]) == 0): - raise AnsibleFilterError("Selector matchLabels is empty") - for k, v in selectors[matchLabelsString].items(): - expr = k + "=" + v - selectorOr[selectorOrIndex].append(expr) - - # process the matchExpressions - each results in something like "labelName notin (labelValue, labelValue2)" - matchExpressionsString = "matchExpressions" - if matchExpressionsString in selectors: - for me in selectors[matchExpressionsString]: - if "key" not in me: - raise AnsibleFilterError("Selector matchExpression is missing 'key'") - key = me["key"] - - if "operator" not in me: - raise AnsibleFilterError("Selector matchExpression is missing 'operator'") - operator = me["operator"].lower() - - if (operator == "in" or operator == "notin") and ("values" not in me or me["values"] is None or (len(me["values"]) == 0)): - raise AnsibleFilterError("Selector matchExpression is missing a non-empty 'values'") - values = me["values"] if "values" in me else [] - valuesStr = "(" - for i, v in enumerate(values): - if i > 0: - valuesStr += "," - valuesStr += v - valuesStr += ")" - - if operator == "in": - selectorOr[selectorOrIndex].append(key + " in " + valuesStr) - elif operator == "notin": - selectorOr[selectorOrIndex].append(key + " notin " + valuesStr) - elif operator == "exists": - selectorOr[selectorOrIndex].append(key) - elif operator == "doesnotexist": - selectorOr[selectorOrIndex].append("!" + key) - else: - raise AnsibleFilterError("Selector matchExpression has invalid operator: " + operator) - - selectorOrIndex = selectorOrIndex + 1 - - return selectorOr - -# ---- Ansible filters ---- -class FilterModule(object): - def filters(self): - return { - 'parse_selectors': parse_selectors - } - -# TEST -# first = { -# "matchLabels": { "sport": "football", "region": "west" }, -# "matchExpressions": [{ "key": "region", "operator": "In", "values": ["east" ]}, { "key": "sport", "operator": "Exists"}] -# } -# second = { -# "matchLabels": { "region": "east", "sport": "golf" }, -# } -# third = { -# "matchExpressions": [{ "key": "sport", "operator": "In", "values": ["baseball", "football" ]},{ "key": "region", "operator": "NotIn", "values": ["east" ]}] -# } -# fourth = { -# "matchExpressions": [{ "key": "sport", "operator": "NotIn", "values": ["baseball", "football" ]},{ "key": "region", "operator": "Exists"},{ "key": "foo", "operator": "DoesNotExist"}] -# } -# print ("\n=====The following should be successful:\n") -# print (parse_selectors([first, second, third, fourth])) -# print ("\n=====The following should result in an error:\n") -# print (parse_selectors([{"matchExpressions": [{ "key": "sport", "operator": "XIn"}]}])) diff --git a/roles/v2.1/kiali-deploy/filter_plugins/stripnone.py b/roles/v2.1/kiali-deploy/filter_plugins/stripnone.py deleted file mode 100644 index 4dbd53033..000000000 --- a/roles/v2.1/kiali-deploy/filter_plugins/stripnone.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} - -# Process recursively the given value if it is a dict and remove all keys that have a None value -def strip_none(value): - if isinstance(value, dict): - dicts = {} - for k,v in value.items(): - if isinstance(v, dict): - dicts[k] = strip_none(v) - elif v is not None: - dicts[k] = v - return dicts - else: - return value - -# ---- Ansible filters ---- -class FilterModule(object): - def filters(self): - return { - 'stripnone': strip_none - } diff --git a/roles/v2.1/kiali-deploy/meta/main.yml b/roles/v2.1/kiali-deploy/meta/main.yml deleted file mode 100644 index e9334e3c7..000000000 --- a/roles/v2.1/kiali-deploy/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -collections: -- kubernetes.core diff --git a/roles/v2.1/kiali-deploy/tasks/clusterroles-to-remove.yml b/roles/v2.1/kiali-deploy/tasks/clusterroles-to-remove.yml deleted file mode 100644 index 7d1d8d952..000000000 --- a/roles/v2.1/kiali-deploy/tasks/clusterroles-to-remove.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ kiali_vars.deployment.instance_name }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ kiali_vars.deployment.instance_name }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ kiali_vars.deployment.instance_name }}-viewer ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ kiali_vars.deployment.instance_name }}-{{ kiali_vars.deployment.namespace }}-oauth ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ kiali_vars.deployment.instance_name }}-{{ kiali_vars.deployment.namespace }}-oauth diff --git a/roles/v2.1/kiali-deploy/tasks/get-discovery-selector-namespaces.yml b/roles/v2.1/kiali-deploy/tasks/get-discovery-selector-namespaces.yml deleted file mode 100644 index 8db6e7b30..000000000 --- a/roles/v2.1/kiali-deploy/tasks/get-discovery-selector-namespaces.yml +++ /dev/null @@ -1,65 +0,0 @@ -# These tasks are not performed if cluster_wide_access is true - this is because the operator will -# grant Kiali permission to see all namespaces via ClusterRole, so the operator does not need to -# process discovery selectors. -# -# These tasks are performed if cluster wide access is false - this is because the operator will -# need to create Roles in all the namespaces found by the discovery selectors so Kiali can be -# granted permission to see those namespaces (but only those namespaces). -# -# These tasks will use discovery selectors found in the Kiali configuration setting -# spec.deployment.discovery_selectors["default"]. These discovery selectors will be used to discover -# namespaces that Kiali should be given access to. -# -# When these tasks finish, "discovery_selector_namespaces" will be a list of namespaces discovered by the selectors. -# -# NOTE: Regardless of what discovery selectors are defined, the Kiali Operator should always give the server -# access to the Kiali Server deployment namespace and the Istio control plane namespace. But that is -# not done here - these tasks simply scan all namespaces and match them to selectors. The operator -# will add those two namespaces later if appropriate. -# -# NOTE: These tasks specifically ignore Istio's own discovery selectors found in Istio meshConfig. - -- name: Get Kiali discovery selectors if they are defined - set_fact: - discovery_selectors: "{{ kiali_vars.deployment.discovery_selectors.default }}" - when: - - kiali_vars.deployment.cluster_wide_access == False - - kiali_vars.deployment.discovery_selectors.default is defined - -- name: If cluster wide access is disabled and no discovery selectors are found, warn the user that this is probably not what they want - debug: - msg: "Cluster wide access is disabled, but no discovery selectors were specified. You likely will want to define discovery selectors in the Kiali CR." - when: - - kiali_vars.deployment.cluster_wide_access == False - - kiali_vars.discovery_selectors.default is not defined - - discovery_selectors is not defined - -- name: Find namespaces selected by the discovery selectors - set_fact: - discovery_selector_namespaces_raw: "{{ (discovery_selector_namespaces_raw|default([]) + query(k8s_plugin, kind='Namespace', label_selector=(item|join(',')))) | unique }}" - loop: "{{ discovery_selectors | parse_selectors }}" - when: - - kiali_vars.deployment.cluster_wide_access == False - - discovery_selectors is defined - -- name: Get just the names of the discovered namespaces - set_fact: - discovery_selector_namespaces: "{{ discovery_selector_namespaces|default([]) + [item.metadata.name] }}" - loop: "{{ discovery_selector_namespaces_raw|default([]) }}" - when: - - kiali_vars.deployment.cluster_wide_access == False - - discovery_selector_namespaces_raw is defined - -- name: Garbage collect discovered namespaces to free up space - set_fact: - discovery_selector_namespaces_raw: [] - when: - - kiali_vars.deployment.cluster_wide_access == False - - discovery_selector_namespaces_raw is defined - -- name: If no namespaces were discovered, make sure discovery_selector_namespaces is set to an empty list - set_fact: - discovery_selector_namespaces: [] - when: - - kiali_vars.deployment.cluster_wide_access == False - - discovery_selector_namespaces is not defined \ No newline at end of file diff --git a/roles/v2.1/kiali-deploy/tasks/kubernetes/k8s-main.yml b/roles/v2.1/kiali-deploy/tasks/kubernetes/k8s-main.yml deleted file mode 100644 index a6b27375e..000000000 --- a/roles/v2.1/kiali-deploy/tasks/kubernetes/k8s-main.yml +++ /dev/null @@ -1,61 +0,0 @@ -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Creating core resources" - when: - - is_k8s == True - -- name: Remove HPA if disabled on Kubernetes - k8s: - state: absent - api_version: "{{ kiali_vars.deployment.hpa.api_version }}" - kind: "HorizontalPodAutoscaler" - namespace: "{{ kiali_vars.deployment.namespace }}" - name: "{{ kiali_vars.deployment.instance_name }}" - when: - - is_k8s == True - - kiali_vars.deployment.hpa.spec | length == 0 - -- name: Create Kiali objects on Kubernetes - include_tasks: process-resource.yml - vars: - role_namespaces: "{{ [ kiali_vars.deployment.namespace ] }}" - process_resource_templates: - - "templates/kubernetes/serviceaccount.yaml" - - "templates/kubernetes/configmap.yaml" - - "templates/kubernetes/{{ 'role-viewer' if ((kiali_vars.deployment.view_only_mode|bool == True) or (kiali_vars.auth.strategy != 'anonymous')) else 'role' }}.yaml" - - "templates/kubernetes/rolebinding.yaml" - - "templates/kubernetes/deployment.yaml" - - "templates/kubernetes/service.yaml" - - "{{ 'templates/kubernetes/hpa.yaml' if kiali_vars.deployment.hpa.spec | length > 0 else '' }}" - - "{{ 'templates/kubernetes/ingress.yaml' if kiali_vars.deployment.ingress.enabled|bool == True else '' }}" - when: - - is_k8s == True - -- name: Delete Ingress on Kubernetes if disabled - k8s: - state: absent - api_version: "networking.k8s.io/{{ 'v1' if (lookup(k8s_plugin, kind='Ingress', api_version='networking.k8s.io/v1', errors='ignore') is iterable) else 'v1beta1' }}" - kind: "Ingress" - namespace: "{{ kiali_vars.deployment.namespace }}" - name: "{{ kiali_vars.deployment.instance_name }}" - when: - - is_k8s == True - - kiali_vars.deployment.ingress.enabled|bool == False - -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Creating additional roles" - when: - - is_k8s == True - - kiali_vars.deployment.cluster_wide_access == False - -- name: Create additional Kiali roles/bindings on all namespaces that are accessible on Kubernetes - vars: - role_namespaces: "{{ discovery_selector_namespaces }}" - k8s: - template: - - "templates/kubernetes/{{ 'role-viewer' if ((kiali_vars.deployment.view_only_mode|bool == True) or (kiali_vars.auth.strategy != 'anonymous')) else 'role' }}.yaml" - - "templates/kubernetes/rolebinding.yaml" - when: - - is_k8s == True - - kiali_vars.deployment.cluster_wide_access == False diff --git a/roles/v2.1/kiali-deploy/tasks/main.yml b/roles/v2.1/kiali-deploy/tasks/main.yml deleted file mode 100644 index f2ad964c0..000000000 --- a/roles/v2.1/kiali-deploy/tasks/main.yml +++ /dev/null @@ -1,904 +0,0 @@ -- set_fact: - k8s_plugin: kubernetes.core.k8s - -- name: Get the original CR as-is for the camelCase keys and so we can update its status field - set_fact: - current_cr: "{{ _kiali_io_kiali }}" - -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Initializing" - status_vars: - specVersion: "{{ kiali_vars.version }}" - deployment: - discoverySelectorNamespaces: null - -- name: Get api group information from the cluster - set_fact: - api_groups: "{{ lookup(k8s_plugin, cluster_info='api_groups') }}" - when: - - is_openshift == False - - is_k8s == False - -- name: Get api version information from the cluster - k8s_cluster_info: - register: api_status - -- name: Determine the cluster type - set_fact: - is_openshift: "{{ True if 'operator.openshift.io' in api_groups else False }}" - is_k8s: "{{ False if 'operator.openshift.io' in api_groups else True }}" - when: - - is_openshift == False - - is_k8s == False - -# Indicate what kind of cluster we are in (OpenShift or Kubernetes). -- debug: - msg: "CLUSTER TYPE: is_openshift={{ is_openshift }}; is_k8s={{ is_k8s }}" -- fail: - msg: "Cannot determine what type of cluster we are in" - when: - - is_openshift == False - - is_k8s == False - -- name: Determine the Kubernetes version - set_fact: - k8s_version: "{{ lookup(k8s_plugin, cluster_info='version').kubernetes.gitVersion | regex_replace('^v', '') }}" - ignore_errors: yes - -- name: Determine the OpenShift version - vars: - kube_apiserver_cluster_op_raw: "{{ lookup(k8s_plugin, api_version='config.openshift.io/v1', kind='ClusterOperator', resource_name='kube-apiserver') | default({}) }}" - ri_query: "status.versions[?name == 'raw-internal'].version" - set_fact: - openshift_version: "{{ kube_apiserver_cluster_op_raw | json_query(ri_query) | join }}" - when: - - is_openshift == True - -- name: Get information about the operator - k8s_info: - api_version: v1 - kind: Pod - namespace: "{{ lookup('env', 'POD_NAMESPACE') }}" - name: "{{ lookup('env', 'POD_NAME') }}" - register: operator_pod_raw - ignore_errors: yes -- name: Determine the version of the operator based on the version label - set_fact: - operator_version: "{{ operator_pod_raw.resources[0].metadata.labels.version }}" - when: - - operator_pod_raw is defined - - operator_pod_raw.resources[0] is defined - - operator_pod_raw.resources[0].metadata is defined - - operator_pod_raw.resources[0].metadata.labels is defined - - operator_pod_raw.resources[0].metadata.labels.version is defined -- set_fact: - operator_version: "unknown" - when: - - operator_version is not defined -- debug: - msg: "OPERATOR VERSION: [{{ operator_version }}]" - -# To remain backward compatible with some settings that have changed in later releases, -# let's take some deprecated settings and set the current settings appropriately. - -- name: deployment.ingress_enabled is deprecated but if deployment.ingress.enabled is not set then use the old setting - set_fact: - kiali_vars: | - {% set ie=kiali_vars['deployment'].pop('ingress_enabled') %} - {{ kiali_vars | combine({'deployment': {'ingress': {'enabled': ie|bool }}}, recursive=True) }} - when: - - kiali_vars.deployment.ingress_enabled is defined - - kiali_vars.deployment.ingress is not defined or kiali_vars.deployment.ingress.enabled is not defined - -# convert snake case to camelCase where appropriate -- include_tasks: snake_camel_case.yaml - -- name: Print some debug information - vars: - msg: | - Kiali Variables: - -------------------------------- - {{ kiali_vars | to_nice_yaml }} - debug: - msg: "{{ msg.split('\n') }}" - -- name: Set default deployment namespace to the same namespace where the CR lives - set_fact: - kiali_vars: "{{ kiali_vars | combine({'deployment': {'namespace': current_cr.metadata.namespace}}, recursive=True) }}" - when: - - kiali_vars.deployment.namespace is not defined or kiali_vars.deployment.namespace == "" - -# Never allow the deployment.instance_name or deployment.namespace to change to avoid leaking resources - to uninstall resources you must delete the Kiali CR -- name: Ensure the deployment.instance_name has not changed - fail: - msg: "The deployment.instance_name cannot be changed to a different value. It was [{{ current_cr.status.deployment.instanceName }}] but is now [{{ kiali_vars.deployment.instance_name }}]. In order to install Kiali with a different deployment.instance_name, please uninstall Kiali first." - when: - - current_cr.status is defined - - current_cr.status.deployment is defined - - current_cr.status.deployment.instanceName is defined - - current_cr.status.deployment.instanceName != kiali_vars.deployment.instance_name - -- name: Ensure the deployment.namespace has not changed - fail: - msg: "The deployment.namespace cannot be changed to a different value. It was [{{ current_cr.status.deployment.namespace }}] but is now [{{ kiali_vars.deployment.namespace }}]. In order to install Kiali with a different deployment.namespace, please uninstall Kiali first." - when: - - current_cr.status is defined - - current_cr.status.deployment is defined - - current_cr.status.deployment.namespace is defined - - current_cr.status.deployment.namespace != kiali_vars.deployment.namespace - -- name: Only allow ad-hoc kiali namespace when appropriate - fail: - msg: "The operator is forbidden from installing Kiali in a namespace [{{ kiali_vars.deployment.namespace }}] that is different from the namespace where the CR was created [{{ current_cr.metadata.namespace }}]" - when: - - kiali_vars.deployment.namespace != current_cr.metadata.namespace - - lookup('env', 'ALLOW_AD_HOC_KIALI_NAMESPACE') | default('false', True) != "true" - -- name: Make sure instance_name follows the DNS label standard because it will be a Service name - fail: - msg: "The value for deployment.instance_name [{{ kiali_vars.deployment.instance_name }}] does not follow the DNS label standard as defined in RFC 1123. In short, it must only contain lowercase alphanumeric characters or '-'." - when: - # regex must follow https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names - # restrict to 40 chars, not 63, because instance_name is a prefix and we need to prepend additional chars for some resource names (like "-service-account") - - kiali_vars.deployment.instance_name is not regex('^(?![0-9]+$)(?!-)[a-z0-9-]{,40}(? /dev/null | grep "tag_name" | sed -e 's/.*://' -e 's/ *"//' -e 's/",//' | grep -v "snapshot" | sort -t "." -k 1.2g,1 -k 2g,2 -k 3g | tail -n 1) - register: github_lastrelease - when: - - kiali_vars.deployment.image_version == "lastrelease" -- set_fact: - kiali_vars: "{{ kiali_vars | combine({'deployment': {'image_version': github_lastrelease.stdout}}, recursive=True) }}" - when: - - kiali_vars.deployment.image_version == "lastrelease" - -- name: Determine image version when it explicitly was configured as the operator_version - set_fact: - kiali_vars: "{{ kiali_vars | combine({'deployment': {'image_version': 'latest' if operator_version == 'master' else operator_version}}, recursive=True) }}" - when: - - kiali_vars.deployment.image_version == "operator_version" - -- fail: - msg: "Could not determine what the image version should be. Set deployment.image_version to a valid value" - when: - - kiali_vars.deployment.image_version == "" or kiali_vars.deployment.image_version == "unknown" - -- name: Determine version_label based on image_version - set_fact: - kiali_vars: "{{ kiali_vars | combine({'deployment': {'version_label': 'master' if kiali_vars.deployment.image_version == 'latest' else kiali_vars.deployment.image_version}}, recursive=True) }}" - when: - - kiali_vars.deployment.version_label == "" - -# Kubernetes limits the length of version label strings to 63 characters or less - make sure the label is valid. -- name: Trim version_label when appropriate - set_fact: - kiali_vars: "{{ kiali_vars | combine({'deployment': {'version_label': kiali_vars.deployment.version_label[:60] + 'XXX' }}, recursive=True) }}" - when: - - kiali_vars.deployment.version_label | length > 63 - -# Indicate which Kiali image we are going to use. -- debug: - msg: "IMAGE_NAME={{ kiali_vars.deployment.image_name }}; IMAGE VERSION={{ kiali_vars.deployment.image_version }}; VERSION LABEL={{ kiali_vars.deployment.version_label }}" - -- name: Determine what metadata labels to apply to all created resources - set_fact: - kiali_resource_metadata_labels: - app: kiali - version: "{{ kiali_vars.deployment.version_label }}" - app.kubernetes.io/name: kiali - app.kubernetes.io/version: "{{ kiali_vars.deployment.version_label }}" - app.kubernetes.io/instance: "{{ kiali_vars.deployment.instance_name }}" - app.kubernetes.io/part-of: kiali - -# Determine the namespaces Kiali is to be given access. -# If the user did not specify Kiali's own namespace in the discovery selectors, it will be added to the list automatically. -# NOTE: if deployment.cluster_wide_access is true, that means Kiali is to be given access to all namespaces via ClusterRoles -# (as opposed to individual roles in each accessible namespace). If deployment.cluster_wide_access is False then we -# create individual Roles per namespace. - -- name: Determine the Role and RoleBinding kinds that the operator will create and that the role templates will use - set_fact: - role_kind: "{{ 'ClusterRole' if kiali_vars.deployment.cluster_wide_access == True else 'Role' }}" - role_binding_kind: "{{ 'ClusterRoleBinding' if kiali_vars.deployment.cluster_wide_access == True else 'RoleBinding' }}" - -- name: Determine if the operator can support cluster wide access - can_i create clusterroles - register: can_i_create_clusterroles - ignore_errors: yes - k8s: - state: present - definition: - apiVersion: authorization.k8s.io/v1 - kind: SelfSubjectAccessReview - spec: - resourceAttributes: - group: rbac.authorization.k8s.io - resource: clusterroles - verb: create - when: - - kiali_vars.deployment.cluster_wide_access == True - -- name: Determine if the operator can support cluster wide access - can_i create clusterrolebindings - register: can_i_create_clusterrolebindings - ignore_errors: yes - k8s: - state: present - definition: - apiVersion: authorization.k8s.io/v1 - kind: SelfSubjectAccessReview - spec: - resourceAttributes: - group: rbac.authorization.k8s.io - resource: clusterrolebindings - verb: create - when: - - kiali_vars.deployment.cluster_wide_access == True - -- fail: - msg: "The operator cannot support deployment.cluster_wide_access set to 'true' because it does not have permissions to create ClusterRoles" - when: - - kiali_vars.deployment.cluster_wide_access == True - - can_i_create_clusterroles is defined - - can_i_create_clusterroles.result is defined - - can_i_create_clusterroles.result.status is defined - - can_i_create_clusterroles.result.status.allowed is defined - - can_i_create_clusterroles.result.status.allowed == False - -- fail: - msg: "The operator cannot support deployment.cluster_wide_access set to 'true' because it does not have permissions to create ClusterRoleBindings" - when: - - kiali_vars.deployment.cluster_wide_access == True - - can_i_create_clusterrolebindings is defined - - can_i_create_clusterrolebindings.result is defined - - can_i_create_clusterrolebindings.result.status is defined - - can_i_create_clusterrolebindings.result.status.allowed is defined - - can_i_create_clusterrolebindings.result.status.allowed == False - -- include_tasks: get-discovery-selector-namespaces.yml - -- name: Make sure the Kiali deployment namespace and the Istio control plane namespace are accessible - set_fact: - discovery_selector_namespaces: "{{ ((discovery_selector_namespaces | default([])) + [ kiali_vars.deployment.namespace, kiali_vars.istio_namespace, kiali_vars.external_services.istio.root_namespace ]) | unique | sort }}" - -- name: Listing of all namespaces that are accessible to Kiali - debug: - msg: "Cluster-wide Access=[{{ kiali_vars.deployment.cluster_wide_access }}], Discovered Namespaces={{ discovery_selector_namespaces }}" - -- name: Abort if all namespace access is not allowed - fail: - msg: "The operator is forbidden from installing Kiali with deployment.cluster_wide_access set to 'true'" - when: - - kiali_vars.deployment.cluster_wide_access == True - - lookup('env', 'ALLOW_ALL_ACCESSIBLE_NAMESPACES') | default('false', True) != "true" - -# We want to convert discovery selectors so they use only matchExpressions values on namespace names so the server can find the exact -# namespaces we found since these namespaces are the only ones that the Kiali Server will be granted permission to see. -# Note that we only do this if cluster_wide_access is False, because that is when the operator will create the Roles for each namespace. -# If the server will have cluster wide access, all namespaces can be accessed via the main ClusterRole, so it is OK if the server -# discovers different namespaces using the original selectors. -- name: Convert the discovery selectors to all matchExpressions values so they match the namespace names. - set_fact: - discovery_selectors_match_expressions: "{{ (discovery_selectors_match_expressions|default([])) + [{'matchExpressions': [{'key': 'kubernetes.io/metadata.name', 'operator': 'In', 'values': discovery_selector_namespaces }] }] }}" - when: - - kiali_vars.deployment.cluster_wide_access == False -- set_fact: - kiali_vars: "{{ kiali_vars | combine({'deployment': {'discovery_selectors': {'default': discovery_selectors_match_expressions}}}, recursive=True) }}" - when: - - discovery_selectors_match_expressions is defined - -- name: Define the namespace labels that will be used when needed - set_fact: - kiali_instance_label_name: "kiali.io/{{ kiali_vars.deployment.instance_name }}.home" - kiali_instance_label_value: "{{ kiali_vars.deployment.namespace }}" - -# If the signing key is not empty string, and is not of the special value secret:name:key, -# do some validation on it's length -- name: Validate signing key, if it is set in the CR - fail: - msg: "Signing key must be 16, 24 or 32 byte length" - when: - - kiali_vars.auth.strategy != 'anonymous' - - kiali_vars.login_token.signing_key != "" - - not(kiali_vars.login_token.signing_key | regex_search('secret:.+:.+')) - - kiali_vars.login_token.signing_key | length != 16 - - kiali_vars.login_token.signing_key | length != 24 - - kiali_vars.login_token.signing_key | length != 32 - -# If the signing key is empty string, we need to ensure a signing key secret exists. If one does not exist, we need to generate one. -# Note that to avoid granting to the operator the very powerful permission to CRUD all secrets in all namespaces, we always generate -# a signing key secret with the name "kiali-signing-key" regardless of the value of kiali_vars.deployment.instance_name. -# Thus, all Kiali instances will be using the same signing key secret name. If the user does not want this, they can generate their -# own secret with their own key (which is a smart thing to do anyway). The user tells the operator what the name of that secret -# signing key is via "login_token.signing_key" with value "secret::". - -- name: Get information about any existing signing key secret if we need to know if it exists or not - k8s_info: - api_version: v1 - kind: Secret - namespace: "{{ kiali_vars.deployment.namespace }}" - name: kiali-signing-key - register: signing_key_secret_raw - when: - - kiali_vars.login_token.signing_key == "" - -- name: Create kiali-signing-key secret to store a random signing key if a secret does not already exist and we need one - k8s: - state: present - definition: - apiVersion: v1 - kind: Secret - metadata: - namespace: "{{ kiali_vars.deployment.namespace }}" - name: kiali-signing-key - labels: "{{ kiali_resource_metadata_labels }}" - type: Opaque - data: - key: "{{ lookup('password', '/dev/null length=16 chars=ascii_letters,digits') | b64encode }}" - when: - - kiali_vars.login_token.signing_key == "" - - signing_key_secret_raw is defined - - signing_key_secret_raw.resources is defined - - signing_key_secret_raw.resources | length == 0 - -# Because we must use a fixed name for the secret, we need to attach a label to indicate this Kiali install will be using it. -# This allows multiple Kiali instances deployed in the same namespace to share the secret. This secret won't be removed -# as long as our label exists on the secret resource. -- name: Add label to kiali-signing-key secret to make it known this Kiali instance will be using it - k8s: - state: present - definition: | - apiVersion: v1 - kind: Secret - metadata: - namespace: "{{ kiali_vars.deployment.namespace }}" - name: kiali-signing-key - labels: - {{ kiali_instance_label_name }}: {{ kiali_instance_label_value }} - when: - - kiali_vars.login_token.signing_key == "" - -- name: Point signing key to the generated secret - set_fact: - kiali_vars: "{{ kiali_vars | combine({'login_token': {'signing_key': 'secret:kiali-signing-key:key'}}, recursive=True) }}" - when: - - kiali_vars.login_token.signing_key == "" - -# Some credentials in the config can be overridden by secrets that are to be mounted on the file system. -# Prepare these overrides that need to be defined as volumes in the deployment. - -- name: Prepare secret volumes for external services - vars: - kiali_deployment_secret_volumes_yaml: |- - {# Initialize #} - {% set d = {} %} - - {# Prepare the secret volume for prometheus username #} - {% if kiali_vars.external_services.prometheus.auth.username | regex_search('secret:.+:.+') %} - {% set d = d | combine({'prometheus-username': {'secret_name': kiali_vars.external_services.prometheus.auth.username | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.prometheus.auth.username | regex_replace('secret:.+:(.+)', '\\1') }}) %} - {% endif %} - - {# Prepare the secret volume for prometheus password #} - {% if kiali_vars.external_services.prometheus.auth.password | regex_search('secret:.+:.+') %} - {% set d = d | combine({'prometheus-password': {'secret_name': kiali_vars.external_services.prometheus.auth.password | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.prometheus.auth.password | regex_replace('secret:.+:(.+)', '\\1') }}) %} - {% endif %} - - {# Prepare the secret volume for prometheus token #} - {% if kiali_vars.external_services.prometheus.auth.token | regex_search('secret:.+:.+') %} - {% set d = d | combine({'prometheus-token': {'secret_name': kiali_vars.external_services.prometheus.auth.token | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.prometheus.auth.token | regex_replace('secret:.+:(.+)', '\\1') }}) %} - {% endif %} - - {# Prepare the secret volume for tracing username #} - {% if kiali_vars.external_services.tracing.enabled|bool == True and kiali_vars.external_services.tracing.auth.username | regex_search('secret:.+:.+') %} - {% set d = d | combine({'tracing-username': {'secret_name': kiali_vars.external_services.tracing.auth.username | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.tracing.auth.username | regex_replace('secret:.+:(.+)', '\\1') }}) %} - {% endif %} - - {# Prepare the secret volume for tracing password #} - {% if kiali_vars.external_services.tracing.enabled|bool == True and kiali_vars.external_services.tracing.auth.password | regex_search('secret:.+:.+') %} - {% set d = d | combine({'tracing-password': {'secret_name': kiali_vars.external_services.tracing.auth.password | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.tracing.auth.password | regex_replace('secret:.+:(.+)', '\\1') }}) %} - {% endif %} - - {# Prepare the secret volume for tracing token #} - {% if kiali_vars.external_services.tracing.enabled|bool == True and kiali_vars.external_services.tracing.auth.token | regex_search('secret:.+:.+') %} - {% set d = d | combine({'tracing-token': {'secret_name': kiali_vars.external_services.tracing.auth.token | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.tracing.auth.token | regex_replace('secret:.+:(.+)', '\\1') }}) %} - {% endif %} - - {# Prepare the secret volume for grafana username #} - {% if kiali_vars.external_services.grafana.enabled|bool == True and kiali_vars.external_services.grafana.auth.username | regex_search('secret:.+:.+') %} - {% set d = d | combine({'grafana-username': {'secret_name': kiali_vars.external_services.grafana.auth.username | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.grafana.auth.username | regex_replace('secret:.+:(.+)', '\\1') }}) %} - {% endif %} - - {# Prepare the secret volume for grafana password #} - {% if kiali_vars.external_services.grafana.enabled|bool == True and kiali_vars.external_services.grafana.auth.password | regex_search('secret:.+:.+') %} - {% set d = d | combine({'grafana-password': {'secret_name': kiali_vars.external_services.grafana.auth.password | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.grafana.auth.password | regex_replace('secret:.+:(.+)', '\\1') }}) %} - {% endif %} - - {# Prepare the secret volume for grafana token #} - {% if kiali_vars.external_services.grafana.enabled|bool == True and kiali_vars.external_services.grafana.auth.token | regex_search('secret:.+:.+') %} - {% set d = d | combine({'grafana-token': {'secret_name': kiali_vars.external_services.grafana.auth.token | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.grafana.auth.token | regex_replace('secret:.+:(.+)', '\\1') }}) %} - {% endif %} - - {# Prepare the secret volume for login token signing key #} - {% if kiali_vars.login_token.signing_key | regex_search('secret:.+:.+') %} - {% set d = d | combine({'login-token-signing-key': {'secret_name': kiali_vars.login_token.signing_key | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.login_token.signing_key | regex_replace('secret:.+:(.+)', '\\1') }}) %} - {% endif %} - - {# Prepare the secret volume for customdashboards prometheus username #} - {% if kiali_vars.external_services.custom_dashboards.prometheus.auth.username | regex_search('secret:.+:.+') %} - {% set d = d | combine({'customdashboards-prometheus-username': {'secret_name': kiali_vars.external_services.custom_dashboards.prometheus.auth.username | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.custom_dashboards.prometheus.auth.username | regex_replace('secret:.+:(.+)', '\\1') }}) %} - {% endif %} - - {# Prepare the secret volume for customdashboards prometheus password #} - {% if kiali_vars.external_services.custom_dashboards.prometheus.auth.password | regex_search('secret:.+:.+') %} - {% set d = d | combine({'customdashboards-prometheus-password': {'secret_name': kiali_vars.external_services.custom_dashboards.prometheus.auth.password | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.custom_dashboards.prometheus.auth.password | regex_replace('secret:.+:(.+)', '\\1') }}) %} - {% endif %} - - {# Prepare the secret volume for customdashboards prometheus token #} - {% if kiali_vars.external_services.custom_dashboards.prometheus.auth.token | regex_search('secret:.+:.+') %} - {% set d = d | combine({'customdashboards-prometheus-token': {'secret_name': kiali_vars.external_services.custom_dashboards.prometheus.auth.token | regex_replace('secret:(.+):.+', '\\1'), 'secret_key': kiali_vars.external_services.custom_dashboards.prometheus.auth.token | regex_replace('secret:.+:(.+)', '\\1') }}) %} - {% endif %} - - {# Set the yaml to the new dict #} - {{ d | to_nice_yaml }} - set_fact: - kiali_deployment_secret_volumes: "{{ kiali_deployment_secret_volumes_yaml | from_yaml }}" - -# Prepare to mount remote cluster secrets. These must exist in the Kiali deployment namespace because that is required in order to mount them to the pod. -- set_fact: - kiali_deployment_remote_cluster_secret_volumes: {} - -- name: Autodetect remote cluster secrets within the Kiali deployment namespace - vars: - all_remote_cluster_secrets: "{{ query(k8s_plugin, namespace=kiali_vars.deployment.namespace, api_version='v1', kind='Secret', label_selector=kiali_vars.clustering.autodetect_secrets.label) }}" - loop: "{{ all_remote_cluster_secrets }}" - set_fact: - kiali_deployment_remote_cluster_secret_volumes: "{{ kiali_deployment_remote_cluster_secret_volumes | combine({ item.metadata.annotations['kiali.io/cluster']|default(item.metadata.name): {'secret_name': item.metadata.name }}) }}" - when: - - kiali_vars.clustering.autodetect_secrets.enabled - -- name: Prepare the manually declared remote clusters - loop: "{{ kiali_vars.clustering.clusters }}" - set_fact: - kiali_deployment_remote_cluster_secret_volumes: "{{ kiali_deployment_remote_cluster_secret_volumes | combine(({ item.name: {'secret_name': item.secret_name }}) if (item.secret_name is defined and item.secret_name | length > 0) else {}) }}" - when: - - kiali_vars.clustering.clusters | length > 0 - -# The following few tasks read the current Kiali configmap (if one exists) in order to find out how Kiali is currently configured. - -- name: Find current configmap, if it exists - set_fact: - current_configmap: "{{ lookup(k8s_plugin, resource_name=kiali_vars.deployment.instance_name, namespace=kiali_vars.deployment.namespace, api_version='v1', kind='ConfigMap') }}" - -- name: Find some current configuration settings, if they exist - set_fact: - current_view_only_mode: "{{ current_configmap.data['config.yaml'] | from_yaml | json_query('deployment.view_only_mode') }}" - current_image_name: "{{ current_configmap.data['config.yaml'] | from_yaml | json_query('deployment.image_name') }}" - current_image_version: "{{ current_configmap.data['config.yaml'] | from_yaml | json_query('deployment.image_version') }}" - current_instance_name: "{{ current_configmap.data['config.yaml'] | from_yaml | json_query('deployment.instance_name') }}" - current_auth_strategy: "{{ current_configmap.data['config.yaml'] | from_yaml | json_query('auth.strategy') }}" - current_cluster_wide_access: "{{ current_configmap.data['config.yaml'] | from_yaml | json_query('deployment.cluster_wide_access') }}" - when: - - current_configmap is defined - - current_configmap.data is defined - - current_configmap.data['config.yaml'] is defined - -- name: Examine namespace labels in order to determine the namespaces that Kiali currently has access to - vars: - label: "{{ kiali_instance_label_name }}={{ kiali_instance_label_value }}" - set_fact: - namespaces_currently_accessible: "{{ query(k8s_plugin, kind='Namespace', label_selector=label) | default({}) | json_query('[].metadata.name') }}" - -- name: Determine if we are moving to cluster-wide-access in which case we need to pretend to make all namespaces inaccessible so the Roles are removed - set_fact: - namespaces_no_longer_accessible: "{{ namespaces_currently_accessible }}" - when: - - namespaces_currently_accessible is defined - - current_cluster_wide_access is defined - - current_cluster_wide_access|bool == False - - kiali_vars.deployment.cluster_wide_access == True - -- name: Determine the namespaces that were previously accessible but are now inaccessible - set_fact: - namespaces_no_longer_accessible: "{{ namespaces_currently_accessible | difference(discovery_selector_namespaces) }}" - when: - - namespaces_no_longer_accessible is not defined - - namespaces_currently_accessible is defined - - discovery_selector_namespaces is defined - - current_cluster_wide_access is defined - - current_cluster_wide_access|bool == False - - kiali_vars.deployment.cluster_wide_access == False - -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Deleting obsolete roles" - -- name: Delete all additional Kiali roles from namespaces that Kiali no longer has access to - include_tasks: remove-roles.yml - vars: - role_namespaces: "{{ namespaces_no_longer_accessible }}" - when: - - namespaces_no_longer_accessible is defined - -- name: Delete Kiali cluster roles if no longer given special access to all namespaces - include_tasks: remove-clusterroles.yml - when: - - current_cluster_wide_access is defined - - current_cluster_wide_access|bool == True - - kiali_vars.deployment.cluster_wide_access == False - -# Role Bindings are always "view-only" unless auth.strategy is anonymous and view_only_mode is false. -# If the view_only_mode or auth.strategy changes, we'll delete the roles to make sure we create the correct ones. -# We need to see if the currently installed role binding is view-only - this is used to not break upgrades. See: https://github.com/kiali/kiali/issues/5695 -- name: Determine if the currently installed role binding in the deployment namespace is view-only - vars: - current_rolebinding: "{{ query(k8s_plugin, resource_name=kiali_vars.deployment.instance_name, namespace=kiali_vars.deployment.namespace, api_version='rbac.authorization.k8s.io/v1', kind=role_binding_kind, errors='ignore') }}" - set_fact: - current_rolebinding_view_only: "{{ (current_rolebinding | length == 1) and (current_rolebinding[0].roleRef.name is regex('^.*-viewer$')) }}" - -- name: Delete all Kiali roles from namespaces if view_only_mode or auth.strategy is changing since role bindings are immutable - include_tasks: remove-roles.yml - vars: - role_namespaces: "{{ discovery_selector_namespaces }}" - when: - - current_cluster_wide_access is defined - - current_cluster_wide_access|bool == False - - kiali_vars.deployment.cluster_wide_access == False - - current_view_only_mode is defined - - current_auth_strategy is defined - - (current_view_only_mode|bool != kiali_vars.deployment.view_only_mode|bool) or (current_auth_strategy != kiali_vars.auth.strategy) or (current_rolebinding_view_only|bool == False and kiali_vars.auth.strategy != 'anonymous') - -- name: Delete Kiali cluster roles if view_only_mode or auth.strategy is changing since cluster role bindings are immutable - include_tasks: remove-clusterroles.yml - when: - - current_cluster_wide_access is defined - - current_cluster_wide_access|bool == True - - kiali_vars.deployment.cluster_wide_access == True - - current_view_only_mode is defined - - current_auth_strategy is defined - - (current_view_only_mode|bool != kiali_vars.deployment.view_only_mode|bool) or (current_auth_strategy != kiali_vars.auth.strategy) or (current_rolebinding_view_only|bool == False and kiali_vars.auth.strategy != 'anonymous') - -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Processing namespace labels" - -- name: Remove Kiali label from namespaces that Kiali no longer has access to - # if a namespace happened to have been deleted, we do not want to (nor can we) resurrect it, hence we use state=patched - k8s: - state: patched - definition: | - {% for namespace in namespaces_no_longer_accessible %} - --- - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ namespace }}" - labels: - {{ kiali_instance_label_name }}: null - ... - {% endfor %} - when: - - namespaces_no_longer_accessible is defined - -- name: Create additional Kiali labels on all accessible namespaces - vars: - namespaces: "{{ discovery_selector_namespaces }}" - k8s: - state: patched - definition: | - {% for namespace in namespaces %} - --- - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ namespace }}" - labels: - {{ kiali_instance_label_name }}: "{{ kiali_instance_label_value }}" - ... - {% endfor %} - when: - - kiali_vars.deployment.cluster_wide_access == False - -- name: Delete Kiali deployment if image is changing - this uninstalled any old version of Kiali that might be running - k8s: - state: absent - api_version: apps/v1 - kind: Deployment - namespace: "{{ kiali_vars.deployment.namespace }}" - name: "{{ kiali_vars.deployment.instance_name }}" - when: - - current_image_name is defined and current_image_version is defined - - (current_image_name != kiali_vars.deployment.image_name) or (current_image_version != kiali_vars.deployment.image_version) - -# Get the deployment's custom annotation we set that tells us when we last updated the Deployment. -# We need this to ensure the Deployment we update retains this same timestamp unless changes are made -# that requires a pod restart - in which case we update this timestamp. -- name: Find current deployment, if it exists - set_fact: - current_deployment: "{{ lookup(k8s_plugin, resource_name=kiali_vars.deployment.instance_name, namespace=kiali_vars.deployment.namespace, api_version='apps/v1', kind='Deployment') }}" - -- name: Get current deployment last-updated annotation timestamp from existing deployment - set_fact: - current_deployment_last_updated: "{{ current_deployment.spec.template.metadata.annotations['operator.kiali.io/last-updated'] if current_deployment.spec.template.metadata.annotations['operator.kiali.io/last-updated'] is defined else lookup('pipe','date') }}" - deployment_is_new: false - when: - - current_deployment is defined - - current_deployment.spec is defined - - current_deployment.spec.template is defined - - current_deployment.spec.template.metadata is defined - - current_deployment.spec.template.metadata.annotations is defined - -- name: Set current deployment last-updated annotation timestamp for new deployments - set_fact: - current_deployment_last_updated: "{{ lookup('pipe','date') }}" - deployment_is_new: true - when: - - current_deployment_last_updated is not defined - -# Now deploy all resources for the specific cluster environment - -- name: Execute for OpenShift environment - include_tasks: openshift/os-main.yml - vars: - deployment_last_updated: "{{ current_deployment_last_updated }}" - when: - - is_openshift == True - -- name: Execute for Kubernetes environment - include_tasks: kubernetes/k8s-main.yml - vars: - deployment_last_updated: "{{ current_deployment_last_updated }}" - when: - - is_k8s == True - -# If something changed that can only be picked up when the Kiali pod starts up, then restart the Kiali pod using a rolling restart -# We do this by checking the processed_resources_dict created by process-resources.yml task. If there is a map key -# with the kind (ConfigMap) with the name of our config map appended to it ("-" + the kiali instanance name) see if that config map changed. -# If it did, we need to restart the kiali pod so it can re-read the new config. -- name: Force the Kiali pod to restart if necessary - vars: - keyname: "{{ 'ConfigMap-' + kiali_vars.deployment.instance_name }}" - updated_deployment: "{{ lookup(k8s_plugin, resource_name=kiali_vars.deployment.instance_name, namespace=kiali_vars.deployment.namespace, api_version='apps/v1', kind='Deployment') | combine({'spec': {'template': {'metadata': {'annotations': {'operator.kiali.io/last-updated': lookup('pipe','date') }}}}}, recursive=True) }}" - k8s: - state: "present" - definition: "{{ updated_deployment }}" - when: - - deployment_is_new == False - - processed_resources_dict is defined - - processed_resources_dict[keyname] is defined - - processed_resources_dict[keyname].changed == True - - processed_resources_dict[keyname].method == "update" - -# If the list of namespaces is manageable, store them in a comma-separate list. -# Otherwise, we'll just log the count. The purpose of this discoverySelectorNamespaces status field is -# just to inform the user how many namespaces the operator processed. -# Note that we only populate the discoverySelectorNamespaces if we are NOT in cluster wide access mode. -# This is because we really only care about what namespaces the operator discovered when -# not in cluster wide access mode as these are the namespaces where the Roles are created (they are -# the namespaces Kiali is granted permission to see). -- include_tasks: update-status-progress.yml - vars: - noDsn: - discoverySelectorNamespaces: null - listDsn: - discoverySelectorNamespaces: "{{ ('Number of namespaces (including control plane namespace): ' + (discovery_selector_namespaces | length | string)) if (discovery_selector_namespaces | length > 20) else (discovery_selector_namespaces | join(',')) }}" - status_progress_message: "Finished all resource creation" - status_vars: - deployment: "{{ listDsn if (kiali_vars.deployment.cluster_wide_access == False and discovery_selector_namespaces is defined) else noDsn }}" diff --git a/roles/v2.1/kiali-deploy/tasks/openshift/os-get-kiali-route-url.yml b/roles/v2.1/kiali-deploy/tasks/openshift/os-get-kiali-route-url.yml deleted file mode 100644 index 4610f0d3c..000000000 --- a/roles/v2.1/kiali-deploy/tasks/openshift/os-get-kiali-route-url.yml +++ /dev/null @@ -1,48 +0,0 @@ -# All of this is ultimately to obtain the kiali_route_url - -# Give some time for the route to come up - -- name: Detect Kiali route on OpenShift - k8s_info: - api_version: route.openshift.io/v1 - kind: Route - name: "{{ kiali_vars.deployment.instance_name }}" - namespace: "{{ kiali_vars.deployment.namespace }}" - register: kiali_route_raw - until: - - kiali_route_raw['resources'] is defined - - kiali_route_raw['resources'][0] is defined - - kiali_route_raw['resources'][0]['status'] is defined - - kiali_route_raw['resources'][0]['status']['ingress'] is defined - - kiali_route_raw['resources'][0]['status']['ingress'][0] is defined - - kiali_route_raw['resources'][0]['status']['ingress'][0]['host'] is defined - retries: 30 - delay: 10 - when: - - is_openshift == True - -- name: Set Kiali TLS Termination from OpenShift route - set_fact: - kiali_route_tls_termination: "{{ kiali_route_raw['resources'][0]['spec']['tls']['termination'] }}" - when: - - is_openshift == True - -- name: Detect HTTP Kiali OpenShift route protocol - set_fact: - kiali_route_protocol: "http" - when: - - is_openshift == True - - kiali_route_tls_termination == "" - -- name: Detect HTTPS Kiali OpenShift route protocol - set_fact: - kiali_route_protocol: "https" - when: - - is_openshift == True - - kiali_route_tls_termination != "" - -- name: Create URL for Kiali OpenShift route - set_fact: - kiali_route_url: "{{ kiali_route_protocol }}://{{ kiali_route_raw['resources'][0]['status']['ingress'][0]['host'] }}" - when: - - is_openshift == True diff --git a/roles/v2.1/kiali-deploy/tasks/openshift/os-main.yml b/roles/v2.1/kiali-deploy/tasks/openshift/os-main.yml deleted file mode 100644 index 605a975c9..000000000 --- a/roles/v2.1/kiali-deploy/tasks/openshift/os-main.yml +++ /dev/null @@ -1,120 +0,0 @@ -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Creating core resources" - when: - - is_openshift == True - -# If it is Removed, the web console is disabled. -# See: https://docs.openshift.com/container-platform/4.13/web_console/disabling-web-console.html -- name: Determine if OpenShift Console is installed and enabled - vars: - console_res: "{{ query(k8s_plugin, resource_name='cluster', api_version='operator.openshift.io/v1', kind='Console', errors='ignore') }}" - set_fact: - has_openshift_console: "{{ console_res | length > 0 and console_res[0].spec.managementState != 'Removed' }}" - when: - - is_openshift == True - -- name: Remove HPA if disabled on OpenShift - k8s: - state: absent - api_version: "{{ kiali_vars.deployment.hpa.api_version }}" - kind: "HorizontalPodAutoscaler" - namespace: "{{ kiali_vars.deployment.namespace }}" - name: "{{ kiali_vars.deployment.instance_name }}" - when: - - is_openshift == True - - kiali_vars.deployment.hpa.spec | length == 0 - -- name: Create Kiali objects on OpenShift - include_tasks: process-resource.yml - vars: - role_namespaces: "{{ [ kiali_vars.deployment.namespace ] }}" - process_resource_templates: - - "templates/openshift/serviceaccount.yaml" - - "templates/openshift/configmap.yaml" - - "templates/openshift/cabundle.yaml" - - "templates/openshift/{{ 'role-viewer' if ((kiali_vars.deployment.view_only_mode|bool == True) or (kiali_vars.auth.strategy != 'anonymous')) else 'role' }}.yaml" - - "templates/openshift/rolebinding.yaml" - - "{{ 'templates/openshift/clusterrole-oauth.yaml' if kiali_vars.auth.strategy == 'openshift' else '' }}" - - "{{ 'templates/openshift/clusterrolebinding-oauth.yaml' if kiali_vars.auth.strategy == 'openshift' else '' }}" - - "templates/openshift/deployment.yaml" - - "templates/openshift/service.yaml" - - "{{ 'templates/openshift/hpa.yaml' if kiali_vars.deployment.hpa.spec | length > 0 else '' }}" - - "{{ 'templates/openshift/route.yaml' if kiali_vars.deployment.ingress.enabled|bool == True else '' }}" - when: - - is_openshift == True - -- name: Delete Route on OpenShift if disabled - k8s: - state: absent - api_version: "route.openshift.io/v1" - kind: "Route" - namespace: "{{ kiali_vars.deployment.namespace }}" - name: "{{ kiali_vars.deployment.instance_name }}" - when: - - is_openshift == True - - kiali_vars.deployment.ingress.enabled|bool == False - -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Creating additional roles" - when: - - is_openshift == True - - kiali_vars.deployment.cluster_wide_access == False - -- name: Create additional Kiali roles/bindings on all namespaces that are accessible on OpenShift - vars: - role_namespaces: "{{ discovery_selector_namespaces }}" - k8s: - template: - - "templates/openshift/{{ 'role-viewer' if ((kiali_vars.deployment.view_only_mode|bool == True) or (kiali_vars.auth.strategy != 'anonymous')) else 'role' }}.yaml" - - "templates/openshift/rolebinding.yaml" - when: - - is_openshift == True - - kiali_vars.deployment.cluster_wide_access == False - -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Creating OpenShift resources" - when: - - is_openshift == True - -- name: Get the Kiali Route URL - include_tasks: openshift/os-get-kiali-route-url.yml - when: - - is_openshift == True - -- name: Process OpenShift OAuth client - k8s: - definition: "{{ lookup('template', 'templates/openshift/oauth.yaml') }}" - when: - - is_openshift == True - - kiali_vars.auth.strategy == "openshift" - -- name: Delete all ConsoleLinks for namespaces that are no longer accessible - k8s: - state: absent - definition: | - {% for namespace in namespaces_no_longer_accessible %} - --- - apiVersion: console.openshift.io/v1 - kind: ConsoleLink - metadata: - name: "{{ kiali_vars.deployment.instance_name }}-namespace-{{ namespace }}" - ... - {% endfor %} - when: - - is_openshift == True - - has_openshift_console is defined - - has_openshift_console == True - - namespaces_no_longer_accessible is defined - -- name: Process OpenShift Console Links - k8s: - definition: "{{ lookup('template', 'templates/openshift/console-links.yaml') }}" - vars: - namespaces: "{{ discovery_selector_namespaces }}" - when: - - is_openshift == True - - kiali_vars.deployment.cluster_wide_access == False - - openshift_version is version('4.3', '>=') diff --git a/roles/v2.1/kiali-deploy/tasks/process-resource.yml b/roles/v2.1/kiali-deploy/tasks/process-resource.yml deleted file mode 100644 index bc3289e83..000000000 --- a/roles/v2.1/kiali-deploy/tasks/process-resource.yml +++ /dev/null @@ -1,31 +0,0 @@ -# process all template names found in process_resource_templates - any empty strings in the list are ignored. -# This will keep a running tally of all processed resources in "processed_resources_dict". -- name: "Create Kiali resources from templates" - k8s: - state: "present" - continue_on_error: false - template: "{{ process_resource_templates | select() | list }}" - register: process_resource_templates_result - retries: 6 - delay: 10 - -# Store the results of the processed resources so they can be examined later (e.g. to know if something changed or stayed the same) -- vars: - kinds: "{{ process_resource_templates_result.result.results | map(attribute='result.kind') | list }}" - names: "{{ process_resource_templates_result.result.results | map(attribute='result.metadata.name') | list }}" - changed: "{{ process_resource_templates_result.result.results | map(attribute='changed') | list }}" - method: "{{ process_resource_templates_result.result.results | map(attribute='method') | list }}" - thedict: "{{ processed_resources_dict | default({}) }}" - set_fact: - processed_resources_dict: | - {% for kind in kinds %} - {% set _ = thedict.update({ (kind + '-' + names[loop.index0]): {'name': names[loop.index0], 'changed': changed[loop.index0], 'method': method[loop.index0]}}) %} - {% endfor %} - {{ thedict }} - when: - - process_resource_templates_result is defined - - process_resource_templates_result | length > 0 - -- name: "Kiali resource creation results" - debug: - msg: "{{ processed_resources_dict }}" \ No newline at end of file diff --git a/roles/v2.1/kiali-deploy/tasks/remove-clusterroles.yml b/roles/v2.1/kiali-deploy/tasks/remove-clusterroles.yml deleted file mode 100644 index 1e01603ee..000000000 --- a/roles/v2.1/kiali-deploy/tasks/remove-clusterroles.yml +++ /dev/null @@ -1,9 +0,0 @@ -- name: Delete unused Kiali cluster roles - ignore_errors: yes - k8s: - state: absent - continue_on_error: false - template: - - clusterroles-to-remove.yml - retries: 6 - delay: 10 \ No newline at end of file diff --git a/roles/v2.1/kiali-deploy/tasks/remove-roles.yml b/roles/v2.1/kiali-deploy/tasks/remove-roles.yml deleted file mode 100644 index c1bf4e0c4..000000000 --- a/roles/v2.1/kiali-deploy/tasks/remove-roles.yml +++ /dev/null @@ -1,27 +0,0 @@ -- name: Delete Kiali roles from previously accessible namespaces - k8s: - state: absent - definition: | - {% for namespace in role_namespaces %} - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: "{{ kiali_vars.deployment.instance_name }}" - namespace: "{{ namespace }}" - ... - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ kiali_vars.deployment.instance_name }}" - namespace: "{{ namespace }}" - ... - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ kiali_vars.deployment.instance_name }}-viewer" - namespace: "{{ namespace }}" - ... - {% endfor %} diff --git a/roles/v2.1/kiali-deploy/tasks/snake_camel_case.yaml b/roles/v2.1/kiali-deploy/tasks/snake_camel_case.yaml deleted file mode 100644 index a6e548596..000000000 --- a/roles/v2.1/kiali-deploy/tasks/snake_camel_case.yaml +++ /dev/null @@ -1,152 +0,0 @@ -# Because we are passing through some yaml directly to Kubernetes resources, we have to retain the camelCase keys. -# All CR parameters are converted to snake_case, but the original yaml is found in the special _kiali_io_kiali param. -# We need to copy that original yaml into our vars where appropriate to keep the camelCase. - -- name: Replace snake_case with camelCase in all appropriate fields - set_fact: - kiali_vars: | - {# deployment.affinity.node #} - {% if kiali_vars.deployment.affinity is defined and kiali_vars.deployment.affinity.node is defined and kiali_vars.deployment.affinity.node | length > 0 %} - {% set _=kiali_vars['deployment']['affinity'].pop('node') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'affinity': {'node': current_cr.spec.deployment.affinity.node }}}, recursive=True) %} - {% endif %} - {# #} - {# deployment.affinity.pod #} - {% if kiali_vars.deployment.affinity is defined and kiali_vars.deployment.affinity.pod is defined and kiali_vars.deployment.affinity.pod | length > 0 %} - {% set _=kiali_vars['deployment']['affinity'].pop('pod') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'affinity': {'pod': current_cr.spec.deployment.affinity.pod }}}, recursive=True) %} - {% endif %} - {# #} - {# custom_dashboards #} - {% if kiali_vars.custom_dashboards is defined and kiali_vars.custom_dashboards | length > 0 %} - {% set _=kiali_vars.pop('custom_dashboards') %} - {% set kiali_vars=kiali_vars | combine({'custom_dashboards': current_cr.spec.custom_dashboards }, recursive=True) %} - {% endif %} - {# #} - {# deployment.affinity.pod_anti #} - {% if kiali_vars.deployment.affinity is defined and kiali_vars.deployment.affinity.pod_anti is defined and kiali_vars.deployment.affinity.pod_anti | length > 0 %} - {% set _=kiali_vars['deployment']['affinity'].pop('pod_anti') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'affinity': {'pod_anti': current_cr.spec.deployment.affinity.pod_anti }}}, recursive=True) %} - {% endif %} - {# #} - {# deployment.tolerations #} - {% if kiali_vars.deployment.tolerations is defined and kiali_vars.deployment.tolerations | length > 0 %} - {% set _=kiali_vars['deployment'].pop('tolerations') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'tolerations': current_cr.spec.deployment.tolerations }}, recursive=True) %} - {% endif %} - {# #} - {# deployment.additional_service_yaml #} - {% if kiali_vars.deployment.additional_service_yaml is defined and kiali_vars.deployment.additional_service_yaml | length > 0 %} - {% set _=kiali_vars['deployment'].pop('additional_service_yaml') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'additional_service_yaml': current_cr.spec.deployment.additional_service_yaml }}, recursive=True) %} - {% endif %} - {# #} - {# deployment.resources #} - {% if kiali_vars.deployment.resources is defined and kiali_vars.deployment.resources | length > 0 %} - {% set _=kiali_vars['deployment'].pop('resources') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'resources': current_cr.spec.deployment.resources }}, recursive=True) %} - {% endif %} - {# #} - {# deployment.ingress.override_yaml #} - {% if kiali_vars.deployment.ingress.override_yaml is defined and kiali_vars.deployment.ingress.override_yaml | length > 0 %} - {% set _=kiali_vars['deployment']['ingress'].pop('override_yaml') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'ingress': {'override_yaml': current_cr.spec.deployment.ingress.override_yaml }}}, recursive=True) %} - {% endif %} - {# #} - {# deployment.ingress.additional_labels #} - {% if kiali_vars.deployment.ingress.additional_labels is defined and kiali_vars.deployment.ingress.additional_labels | length > 0 %} - {% set _=kiali_vars['deployment']['ingress'].pop('additional_labels') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'ingress': {'additional_labels': current_cr.spec.deployment.ingress.additional_labels }}}, recursive=True) %} - {% endif %} - {# #} - {# deployment.pod_annotations #} - {% if kiali_vars.deployment.pod_annotations is defined and kiali_vars.deployment.pod_annotations | length > 0 %} - {% set _=kiali_vars['deployment'].pop('pod_annotations') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'pod_annotations': current_cr.spec.deployment.pod_annotations }}, recursive=True) %} - {% endif %} - {# #} - {# deployment.pod_labels #} - {% if kiali_vars.deployment.pod_labels is defined and kiali_vars.deployment.pod_labels | length > 0 %} - {% set _=kiali_vars['deployment'].pop('pod_labels') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'pod_labels': current_cr.spec.deployment.pod_labels }}, recursive=True) %} - {% endif %} - {# #} - {# deployment.service_annotations #} - {% if kiali_vars.deployment.service_annotations is defined and kiali_vars.deployment.service_annotations | length > 0 %} - {% set _=kiali_vars['deployment'].pop('service_annotations') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'service_annotations': current_cr.spec.deployment.service_annotations }}, recursive=True) %} - {% endif %} - {# #} - {# deployment.hpa.spec #} - {% if kiali_vars.deployment.hpa is defined and kiali_vars.deployment.hpa.spec is defined and kiali_vars.deployment.hpa.spec | length > 0 %} - {% set _=kiali_vars['deployment']['hpa'].pop('spec') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'hpa': {'spec': current_cr.spec.deployment.hpa.spec }}}, recursive=True) %} - {% endif %} - {# #} - {# deployment.node_selector #} - {% if kiali_vars.deployment.node_selector is defined and kiali_vars.deployment.node_selector | length > 0 %} - {% set _=kiali_vars['deployment'].pop('node_selector') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'node_selector': current_cr.spec.deployment.node_selector }}, recursive=True) %} - {% endif %} - {# #} - {# external_services.custom_dashboards.prometheus.custom_headers #} - {% if kiali_vars.external_services.custom_dashboards.prometheus.custom_headers is defined and kiali_vars.external_services.custom_dashboards.prometheus.custom_headers | length > 0 %} - {% set _=kiali_vars['external_services']['custom_dashboards']['prometheus'].pop('custom_headers') %} - {% set kiali_vars=kiali_vars | combine({'external_services': {'custom_dashboards': {'prometheus': {'custom_headers': current_cr.spec.external_services.custom_dashboards.prometheus.custom_headers }}}}, recursive=True) %} - {% endif %} - {# #} - {# external_services.custom_dashboards.prometheus.query_scope #} - {% if kiali_vars.external_services.custom_dashboards.prometheus.query_scope is defined and kiali_vars.external_services.custom_dashboards.prometheus.query_scope | length > 0 %} - {% set _=kiali_vars['external_services']['custom_dashboards']['prometheus'].pop('query_scope') %} - {% set kiali_vars=kiali_vars | combine({'external_services': {'custom_dashboards': {'prometheus': {'query_scope': current_cr.spec.external_services.custom_dashboards.prometheus.query_scope }}}}, recursive=True) %} - {% endif %} - {# #} - {# external_services.prometheus.custom_headers #} - {% if kiali_vars.external_services.prometheus.custom_headers is defined and kiali_vars.external_services.prometheus.custom_headers | length > 0 %} - {% set _=kiali_vars['external_services']['prometheus'].pop('custom_headers') %} - {% set kiali_vars=kiali_vars | combine({'external_services': {'prometheus': {'custom_headers': current_cr.spec.external_services.prometheus.custom_headers }}}, recursive=True) %} - {% endif %} - {# #} - {# external_services.prometheus.query_scope #} - {% if kiali_vars.external_services.prometheus.query_scope is defined and kiali_vars.external_services.prometheus.query_scope | length > 0 %} - {% set _=kiali_vars['external_services']['prometheus'].pop('query_scope') %} - {% set kiali_vars=kiali_vars | combine({'external_services': {'prometheus': {'query_scope': current_cr.spec.external_services.prometheus.query_scope }}}, recursive=True) %} - {% endif %} - {# #} - {# deployment.configmap_annotations #} - {% if kiali_vars.deployment.configmap_annotations is defined and kiali_vars.deployment.configmap_annotations | length > 0 %} - {% set _=kiali_vars['deployment'].pop('configmap_annotations') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'configmap_annotations': current_cr.spec.deployment.configmap_annotations }}, recursive=True) %} - {% endif %} - {# #} - {# external_services.tracing.query_scope #} - {% if kiali_vars.external_services.tracing.query_scope is defined and kiali_vars.external_services.tracing.query_scope | length > 0 %} - {% set _=kiali_vars['external_services']['tracing'].pop('query_scope') %} - {% set kiali_vars=kiali_vars | combine({'external_services': {'tracing': {'query_scope': current_cr.spec.external_services.tracing.query_scope }}}, recursive=True) %} - {% endif %} - {# #} - {# deployment.security_context #} - {% if kiali_vars.deployment.security_context is defined and kiali_vars.deployment.security_context | length > 0 %} - {% set _=kiali_vars['deployment'].pop('security_context') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'security_context': current_cr.spec.deployment.security_context}}, recursive=True) %} - {% endif %} - {# #} - {# deployment.custom_secrets[].csi #} - {% if kiali_vars.deployment.custom_secrets is defined and kiali_vars.deployment.custom_secrets | length > 0 %} - {% set _=kiali_vars['deployment'].pop('custom_secrets') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'custom_secrets': current_cr.spec.deployment.custom_secrets}}, recursive=True) %} - {% endif %} - {# #} - {# external_services.tracing.custom_headers #} - {% if kiali_vars.external_services.tracing.custom_headers is defined and kiali_vars.external_services.tracing.custom_headers | length > 0 %} - {% set _=kiali_vars['external_services']['tracing'].pop('custom_headers') %} - {% set kiali_vars=kiali_vars | combine({'external_services': {'tracing': {'custom_headers': current_cr.spec.external_services.tracing.custom_headers }}}, recursive=True) %} - {% endif %} - {# #} - {# deployment.discovery_selectors #} - {% if kiali_vars.deployment.discovery_selectors is defined and kiali_vars.deployment.discovery_selectors | length > 0 %} - {% set _=kiali_vars['deployment'].pop('discovery_selectors') %} - {% set kiali_vars=kiali_vars | combine({'deployment': {'discovery_selectors': current_cr.spec.deployment.discovery_selectors}}, recursive=True) %} - {% endif %} - {# #} - {{ kiali_vars }} diff --git a/roles/v2.1/kiali-deploy/tasks/update-status-progress.yml b/roles/v2.1/kiali-deploy/tasks/update-status-progress.yml deleted file mode 100644 index 58570bceb..000000000 --- a/roles/v2.1/kiali-deploy/tasks/update-status-progress.yml +++ /dev/null @@ -1,16 +0,0 @@ -- name: Prepare status progress facts - ignore_errors: yes - set_fact: - status_progress_step: "{{ 1 if status_progress_step is not defined else (status_progress_step|int + 1) }}" - status_progress_start: "{{ ('%Y-%m-%d %H:%M:%S' | strftime) if status_progress_start is not defined else (status_progress_start) }}" - -- name: Update CR status progress field with any additional status fields - ignore_errors: yes - vars: - duration: "{{ ('%Y-%m-%d %H:%M:%S' | strftime | to_datetime) - (status_progress_start | to_datetime) }}" - operator_sdk.util.k8s_status: - api_version: "{{ current_cr.apiVersion }}" - kind: "{{ current_cr.kind }}" - name: "{{ current_cr.metadata.name }}" - namespace: "{{ current_cr.metadata.namespace }}" - status: "{{ status_vars | default({}) | combine({'progress':{'message': status_progress_step + '. ' + status_progress_message, 'duration': duration }}, recursive=True) }}" diff --git a/roles/v2.1/kiali-deploy/tasks/update-status.yml b/roles/v2.1/kiali-deploy/tasks/update-status.yml deleted file mode 100644 index fa7793085..000000000 --- a/roles/v2.1/kiali-deploy/tasks/update-status.yml +++ /dev/null @@ -1,8 +0,0 @@ -- name: Update CR status field - ignore_errors: yes - operator_sdk.util.k8s_status: - api_version: "{{ current_cr.apiVersion }}" - kind: "{{ current_cr.kind }}" - name: "{{ current_cr.metadata.name }}" - namespace: "{{ current_cr.metadata.namespace }}" - status: "{{ status_vars }}" diff --git a/roles/v2.1/kiali-deploy/templates/kubernetes/configmap.yaml b/roles/v2.1/kiali-deploy/templates/kubernetes/configmap.yaml deleted file mode 100644 index 6e1ca1986..000000000 --- a/roles/v2.1/kiali-deploy/templates/kubernetes/configmap.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} -{% if kiali_vars.deployment.configmap_annotations is defined and kiali_vars.deployment.configmap_annotations|length > 0 %} - annotations: - {{ kiali_vars.deployment.configmap_annotations | to_nice_yaml(indent=0) | trim | indent(4) }} -{% endif %} -data: - config.yaml: | - {{ kiali_vars | to_nice_yaml(indent=0) | trim | indent(4) }} diff --git a/roles/v2.1/kiali-deploy/templates/kubernetes/deployment.yaml b/roles/v2.1/kiali-deploy/templates/kubernetes/deployment.yaml deleted file mode 100644 index 86387709d..000000000 --- a/roles/v2.1/kiali-deploy/templates/kubernetes/deployment.yaml +++ /dev/null @@ -1,220 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} -spec: -{% if kiali_vars.deployment.hpa.spec | length == 0 %} - replicas: {{ kiali_vars.deployment.replicas }} -{% endif %} - selector: - matchLabels: - app.kubernetes.io/name: kiali - app.kubernetes.io/instance: {{ kiali_vars.deployment.instance_name }} - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - name: {{ kiali_vars.deployment.instance_name }} - labels: {{ kiali_resource_metadata_labels | combine(kiali_vars.deployment.pod_labels) }} - annotations: -{% if kiali_vars.server.observability.metrics.enabled|bool == True %} - prometheus.io/scrape: "true" - prometheus.io/port: "{{ kiali_vars.server.observability.metrics.port }}" -{% else %} - prometheus.io/scrape: "false" - prometheus.io/port: null -{% endif %} - kiali.io/dashboards: go,kiali - operator.kiali.io/last-updated: "{{ deployment_last_updated }}" -{% if kiali_vars.deployment.pod_annotations|length > 0 %} - {{ kiali_vars.deployment.pod_annotations | to_nice_yaml(indent=0) | trim | indent(8) }} -{% endif %} - spec: - serviceAccount: {{ kiali_vars.deployment.instance_name }}-service-account -{% if kiali_vars.deployment.priority_class_name != "" %} - priorityClassName: "{{ kiali_vars.deployment.priority_class_name }}" -{% endif %} -{% if kiali_vars.deployment.image_pull_secrets | default([]) | length > 0 %} - imagePullSecrets: -{% for n in kiali_vars.deployment.image_pull_secrets %} - - name: {{ n }} -{% endfor %} -{% endif %} -{% if kiali_vars.deployment.host_aliases|length > 0 %} - hostAliases: - {{ kiali_vars.deployment.host_aliases | to_nice_yaml(indent=0) | trim | indent(8) }} -{% endif %} -{% if kiali_vars.deployment.dns | length > 0 %} -{% if kiali_vars.deployment.dns.policy | length > 0 %} - dnsPolicy: "{{ kiali_vars.deployment.dns.policy }}" -{% endif %} -{% if kiali_vars.deployment.dns.config | length > 0 %} - dnsConfig: - {{ kiali_vars.deployment.dns.config | to_nice_yaml(indent=0) | trim | indent(8) }} -{% endif %} -{% endif %} - containers: - - image: {{ kiali_vars.deployment.image_name }}{{ '@' + kiali_vars.deployment.image_digest if kiali_vars.deployment.image_digest != '' else '' }}:{{ kiali_vars.deployment.image_version }} - imagePullPolicy: {{ kiali_vars.deployment.image_pull_policy }} - name: kiali - command: - - "/opt/kiali/kiali" - - "-config" - - "/kiali-configuration/config.yaml" - securityContext: -{% if kiali_vars.deployment.security_context|length > 0 %} - {{ kiali_vars.deployment.security_context | to_nice_yaml(indent=0) | trim | indent(10) }} -{% endif %} -{% if kiali_vars.deployment.security_context|length == 0 or lookup('env', 'ALLOW_SECURITY_CONTEXT_OVERRIDE') != "true" %} - allowPrivilegeEscalation: false - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: true - capabilities: - drop: - - ALL -{% endif %} - ports: - - name: api-port - containerPort: {{ kiali_vars.server.port }} -{% if kiali_vars.server.observability.metrics.enabled|bool == True %} - - name: http-metrics - containerPort: {{ kiali_vars.server.observability.metrics.port }} -{% endif %} - readinessProbe: - httpGet: - path: {{ kiali_vars.server.web_root | regex_replace('\\/$', '') }}/healthz - port: api-port - scheme: {{ 'HTTP' if kiali_vars.identity.cert_file == "" else 'HTTPS' }} - initialDelaySeconds: 5 - periodSeconds: 30 - livenessProbe: - httpGet: - path: {{ kiali_vars.server.web_root | regex_replace('\\/$', '') }}/healthz - port: api-port - scheme: {{ 'HTTP' if kiali_vars.identity.cert_file == "" else 'HTTPS' }} - initialDelaySeconds: 5 - periodSeconds: 30 - env: - - name: ACTIVE_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LOG_FORMAT - value: "{{ kiali_vars.deployment.logger.log_format }}" - - name: LOG_LEVEL - value: "{{ kiali_vars.deployment.logger.log_level }}" - - name: LOG_SAMPLER_RATE - value: "{{ kiali_vars.deployment.logger.sampler_rate }}" - - name: LOG_TIME_FIELD_FORMAT - value: "{{ kiali_vars.deployment.logger.time_field_format }}" -{% for env in kiali_vars.deployment.custom_envs %} - - name: "{{ env.name }}" - value: "{{ env.value }}" -{% endfor %} - volumeMounts: - - name: kiali-configuration - mountPath: "/kiali-configuration" - - name: kiali-secret - mountPath: "/kiali-secret" - - name: kiali-cabundle - mountPath: "/kiali-cabundle" -{% for sec in kiali_deployment_secret_volumes %} - - name: {{ sec }} - mountPath: "/kiali-override-secrets/{{ sec }}" - readOnly: true -{% endfor %} -{% for secret in kiali_vars.deployment.custom_secrets %} - - name: {{ secret.name }} - mountPath: "{{ secret.mount }}" -{% endfor %} -{% for sec in kiali_deployment_remote_cluster_secret_volumes %} - - name: {{ sec }} - mountPath: "/kiali-remote-cluster-secrets/{{ kiali_deployment_remote_cluster_secret_volumes[sec].secret_name }}" - readOnly: true -{% endfor %} -{% if kiali_vars.deployment.resources|length > 0 %} - resources: - {{ kiali_vars.deployment.resources | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - resources: null -{% endif %} - volumes: - - name: kiali-configuration - configMap: - name: {{ kiali_vars.deployment.instance_name }} - - name: kiali-secret - secret: - secretName: {{ kiali_vars.deployment.secret_name }} - optional: true - - name: kiali-cabundle - configMap: - name: {{ kiali_vars.deployment.instance_name }}-cabundle - optional: true -{% for sec in kiali_deployment_secret_volumes %} - - name: {{ sec }} - secret: - secretName: {{ kiali_deployment_secret_volumes[sec].secret_name }} - items: - - key: {{ kiali_deployment_secret_volumes[sec].secret_key }} - path: value.txt - optional: false -{% endfor %} -{% for secret in kiali_vars.deployment.custom_secrets %} - - name: {{ secret.name }} -{% if secret.csi is defined %} - csi: - {{ secret.csi | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - secret: - secretName: {{ secret.name }} -{% if secret.optional is defined %} - optional: {{ secret.optional }} -{% endif %} -{% endif %} -{% endfor %} -{% for sec in kiali_deployment_remote_cluster_secret_volumes %} - - name: {{ sec }} - secret: - secretName: {{ kiali_deployment_remote_cluster_secret_volumes[sec].secret_name }} -{% endfor %} -{% if kiali_vars.deployment.affinity.node|length > 0 or kiali_vars.deployment.affinity.pod|length > 0 or kiali_vars.deployment.affinity.pod_anti|length > 0 %} - affinity: -{% if kiali_vars.deployment.affinity.node|length > 0 %} - nodeAffinity: - {{ kiali_vars.deployment.affinity.node | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - nodeAffinity: null -{% endif %} -{% if kiali_vars.deployment.affinity.pod|length > 0 %} - podAffinity: - {{ kiali_vars.deployment.affinity.pod | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - podAffinity: null -{% endif %} -{% if kiali_vars.deployment.affinity.pod_anti|length > 0 %} - podAntiAffinity: - {{ kiali_vars.deployment.affinity.pod_anti | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - podAntiAffinity: null -{% endif %} -{% else %} - affinity: null -{% endif %} -{% if kiali_vars.deployment.tolerations|length > 0 %} - tolerations: - {{ kiali_vars.deployment.tolerations | to_nice_yaml(indent=0) | trim | indent(6) }} -{% else %} - tolerations: null -{% endif %} -{% if kiali_vars.deployment.node_selector|length > 0 %} - nodeSelector: - {{ kiali_vars.deployment.node_selector | to_nice_yaml(indent=0) | trim | indent(8) }} -{% else %} - nodeSelector: null -{% endif %} diff --git a/roles/v2.1/kiali-deploy/templates/kubernetes/hpa.yaml b/roles/v2.1/kiali-deploy/templates/kubernetes/hpa.yaml deleted file mode 100644 index 2802d2169..000000000 --- a/roles/v2.1/kiali-deploy/templates/kubernetes/hpa.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{% if kiali_vars.deployment.hpa.spec | length > 0 %} -apiVersion: {{ kiali_vars.deployment.hpa.api_version }} -kind: HorizontalPodAutoscaler -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ kiali_vars.deployment.instance_name }} - {{ kiali_vars.deployment.hpa.spec | to_nice_yaml(indent=0) | trim | indent(2) }} -{% endif %} diff --git a/roles/v2.1/kiali-deploy/templates/kubernetes/ingress.yaml b/roles/v2.1/kiali-deploy/templates/kubernetes/ingress.yaml deleted file mode 100644 index 8e89d3654..000000000 --- a/roles/v2.1/kiali-deploy/templates/kubernetes/ingress.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: "networking.k8s.io/{{ 'v1' if (lookup(k8s_plugin, kind='Ingress', api_version='networking.k8s.io/v1', errors='ignore') is iterable) else 'v1beta1' }}" -kind: Ingress -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_vars.deployment.ingress.additional_labels | combine(kiali_resource_metadata_labels) }} -{% if kiali_vars.deployment.ingress.override_yaml is defined and kiali_vars.deployment.ingress.override_yaml.metadata is defined and kiali_vars.deployment.ingress.override_yaml.metadata.annotations is defined %} - {{ kiali_vars.deployment.ingress.override_yaml.metadata | to_nice_yaml(indent=0) | trim | indent(2) }} -{% else %} - annotations: - # For ingress-nginx versions older than 0.20.0 - # (see: https://github.com/kubernetes/ingress-nginx/issues/3416#issuecomment-438247948) - nginx.ingress.kubernetes.io/secure-backends: "{{ 'false' if kiali_vars.identity.cert_file == "" else 'true' }}" - # For ingress-nginx versions 0.20.0 and later - nginx.ingress.kubernetes.io/backend-protocol: "{{ 'HTTP' if kiali_vars.identity.cert_file == "" else 'HTTPS' }}" -{% endif %} -spec: -{% if kiali_vars.deployment.ingress.override_yaml is defined and kiali_vars.deployment.ingress.override_yaml.spec is defined %} - {{ kiali_vars.deployment.ingress.override_yaml.spec | to_nice_yaml(indent=0) | trim | indent(2) }} -{% else %} -{% if kiali_vars.deployment.ingress.class_name != "" %} - ingressClassName: {{ kiali_vars.deployment.ingress.class_name }} -{% endif %} - rules: - - http: - paths: - - path: {{ kiali_vars.server.web_root }} -{% if lookup(k8s_plugin, kind='Ingress', api_version='networking.k8s.io/v1', errors='ignore') is iterable %} - pathType: Prefix - backend: - service: - name: {{ kiali_vars.deployment.instance_name }} - port: - number: {{ kiali_vars.server.port }} -{% else %} - backend: - serviceName: {{ kiali_vars.deployment.instance_name }} - servicePort: {{ kiali_vars.server.port }} -{% endif %} -{% if kiali_vars.server.web_fqdn|length != 0 %} - host: {{ kiali_vars.server.web_fqdn }} -{% endif %} -{% endif %} diff --git a/roles/v2.1/kiali-deploy/templates/kubernetes/role-viewer.yaml b/roles/v2.1/kiali-deploy/templates/kubernetes/role-viewer.yaml deleted file mode 100644 index d560c6ff5..000000000 --- a/roles/v2.1/kiali-deploy/templates/kubernetes/role-viewer.yaml +++ /dev/null @@ -1,78 +0,0 @@ -{% for namespace in role_namespaces %} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: {{ role_kind }} -metadata: - name: {{ kiali_vars.deployment.instance_name }}-viewer - namespace: "{{ namespace }}" - labels: {{ kiali_resource_metadata_labels }} -rules: -- apiGroups: [""] - resources: - - configmaps - - endpoints -{% if 'logs-tab' not in kiali_vars.kiali_feature_flags.disabled_features %} - - pods/log -{% endif %} - verbs: - - get - - list - - watch -- apiGroups: [""] - resources: - - namespaces - - pods - - replicationcontrollers - - services - verbs: - - get - - list - - watch -- apiGroups: [""] - resources: - - pods/portforward - verbs: - - create - - post -- apiGroups: ["extensions", "apps"] - resources: - - daemonsets - - deployments - - replicasets - - statefulsets - verbs: - - get - - list - - watch -- apiGroups: ["batch"] - resources: - - cronjobs - - jobs - verbs: - - get - - list - - watch -- apiGroups: - - networking.istio.io - - security.istio.io - - extensions.istio.io - - telemetry.istio.io - - gateway.networking.k8s.io - resources: ["*"] - verbs: - - get - - list - - watch -- apiGroups: ["authentication.k8s.io"] - resources: - - tokenreviews - verbs: - - create -- apiGroups: ["admissionregistration.k8s.io"] - resources: - - mutatingwebhookconfigurations - verbs: - - get - - list - - watch -{% endfor %} diff --git a/roles/v2.1/kiali-deploy/templates/kubernetes/role.yaml b/roles/v2.1/kiali-deploy/templates/kubernetes/role.yaml deleted file mode 100644 index eba6f6fb8..000000000 --- a/roles/v2.1/kiali-deploy/templates/kubernetes/role.yaml +++ /dev/null @@ -1,84 +0,0 @@ -{% for namespace in role_namespaces %} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: {{ role_kind }} -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ namespace }}" - labels: {{ kiali_resource_metadata_labels }} -rules: -- apiGroups: [""] - resources: - - configmaps - - endpoints -{% if 'logs-tab' not in kiali_vars.kiali_feature_flags.disabled_features %} - - pods/log -{% endif %} - verbs: - - get - - list - - watch -- apiGroups: [""] - resources: - - namespaces - - pods - - replicationcontrollers - - services - verbs: - - get - - list - - watch - - patch -- apiGroups: [""] - resources: - - pods/portforward - verbs: - - create - - post -- apiGroups: ["extensions", "apps"] - resources: - - daemonsets - - deployments - - replicasets - - statefulsets - verbs: - - get - - list - - watch - - patch -- apiGroups: ["batch"] - resources: - - cronjobs - - jobs - verbs: - - get - - list - - watch - - patch -- apiGroups: - - networking.istio.io - - security.istio.io - - extensions.istio.io - - telemetry.istio.io - - gateway.networking.k8s.io - resources: ["*"] - verbs: - - get - - list - - watch - - create - - delete - - patch -- apiGroups: ["authentication.k8s.io"] - resources: - - tokenreviews - verbs: - - create -- apiGroups: ["admissionregistration.k8s.io"] - resources: - - mutatingwebhookconfigurations - verbs: - - get - - list - - watch -{% endfor %} diff --git a/roles/v2.1/kiali-deploy/templates/kubernetes/rolebinding.yaml b/roles/v2.1/kiali-deploy/templates/kubernetes/rolebinding.yaml deleted file mode 100644 index 19ec73a59..000000000 --- a/roles/v2.1/kiali-deploy/templates/kubernetes/rolebinding.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{% for namespace in role_namespaces %} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: {{ role_binding_kind }} -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ namespace }}" - labels: {{ kiali_resource_metadata_labels }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: {{ role_kind }} - name: {{ (kiali_vars.deployment.instance_name + '-viewer') if ((kiali_vars.deployment.view_only_mode|bool == True) or (kiali_vars.auth.strategy != 'anonymous')) else kiali_vars.deployment.instance_name }} -subjects: -- kind: ServiceAccount - name: {{ kiali_vars.deployment.instance_name }}-service-account - namespace: "{{ kiali_vars.deployment.namespace }}" -{% endfor %} diff --git a/roles/v2.1/kiali-deploy/templates/kubernetes/service.yaml b/roles/v2.1/kiali-deploy/templates/kubernetes/service.yaml deleted file mode 100644 index 299f6394d..000000000 --- a/roles/v2.1/kiali-deploy/templates/kubernetes/service.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} - annotations: -{% if kiali_vars.server.web_fqdn|length != 0 and kiali_vars.server.web_schema|length != 0 %} - kiali.io/external-url: {{ kiali_vars.server.web_schema + '://' + kiali_vars.server.web_fqdn + ((':' + kiali_vars.server.web_port | string) if (kiali_vars.server.web_port | string | length != 0) else '') + (kiali_vars.server.web_root | default('')) }} -{% endif %} -{% if kiali_vars.deployment.service_annotations|length > 0 %} - {{ kiali_vars.deployment.service_annotations | to_nice_yaml(indent=0) | trim | indent(4) }} -{% endif %} -spec: -{% if kiali_vars.deployment.service_type is defined %} - type: {{ kiali_vars.deployment.service_type }} -{% endif %} - ports: - - name: {{ 'http' if kiali_vars.identity.cert_file == "" else 'tcp' }} - protocol: TCP -{% if k8s_version is defined and k8s_version is version('1.20', '>=') %} - appProtocol: {{ 'http' if kiali_vars.identity.cert_file == "" else 'https' }} -{% endif %} - port: {{ kiali_vars.server.port }} -{% if kiali_vars.deployment.service_type is defined and kiali_vars.deployment.service_type == "NodePort" and kiali_vars.server.node_port is defined %} - nodePort: {{ kiali_vars.server.node_port }} -{% endif %} -{% if kiali_vars.server.observability.metrics.enabled|bool == True %} - - name: http-metrics - protocol: TCP -{% if k8s_version is defined and k8s_version is version('1.20', '>=') %} - appProtocol: http -{% endif %} - port: {{ kiali_vars.server.observability.metrics.port }} -{% endif %} - selector: -{% if query(k8s_plugin, kind='Service', resource_name=kiali_vars.deployment.instance_name, namespace=kiali_vars.deployment.namespace) | length > 0 %} - app: null - version: null -{% endif %} - app.kubernetes.io/name: kiali - app.kubernetes.io/instance: {{ kiali_vars.deployment.instance_name }} - {% if kiali_vars.deployment.additional_service_yaml is defined %}{{ kiali_vars.deployment.additional_service_yaml | to_nice_yaml(indent=0) | trim | indent(2) }}{% endif %} diff --git a/roles/v2.1/kiali-deploy/templates/kubernetes/serviceaccount.yaml b/roles/v2.1/kiali-deploy/templates/kubernetes/serviceaccount.yaml deleted file mode 100644 index 5feedce14..000000000 --- a/roles/v2.1/kiali-deploy/templates/kubernetes/serviceaccount.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ kiali_vars.deployment.instance_name }}-service-account - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} diff --git a/roles/v2.1/kiali-deploy/templates/openshift/cabundle.yaml b/roles/v2.1/kiali-deploy/templates/openshift/cabundle.yaml deleted file mode 100644 index c45c504fe..000000000 --- a/roles/v2.1/kiali-deploy/templates/openshift/cabundle.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ kiali_vars.deployment.instance_name }}-cabundle - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} - annotations: - service.beta.openshift.io/inject-cabundle: "true" diff --git a/roles/v2.1/kiali-deploy/templates/openshift/clusterrole-oauth.yaml b/roles/v2.1/kiali-deploy/templates/openshift/clusterrole-oauth.yaml deleted file mode 100644 index 8e94d3297..000000000 --- a/roles/v2.1/kiali-deploy/templates/openshift/clusterrole-oauth.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ kiali_vars.deployment.instance_name }}-{{ kiali_vars.deployment.namespace }}-oauth - labels: {{ kiali_resource_metadata_labels }} -rules: -- apiGroups: ["oauth.openshift.io"] - resources: - - oauthclients - resourceNames: - - {{ kiali_vars.deployment.instance_name }}-{{ kiali_vars.deployment.namespace }} - verbs: - - get diff --git a/roles/v2.1/kiali-deploy/templates/openshift/clusterrolebinding-oauth.yaml b/roles/v2.1/kiali-deploy/templates/openshift/clusterrolebinding-oauth.yaml deleted file mode 100644 index dfd03ee88..000000000 --- a/roles/v2.1/kiali-deploy/templates/openshift/clusterrolebinding-oauth.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ kiali_vars.deployment.instance_name }}-{{ kiali_vars.deployment.namespace }}-oauth - labels: {{ kiali_resource_metadata_labels }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ kiali_vars.deployment.instance_name }}-{{ kiali_vars.deployment.namespace }}-oauth -subjects: -- kind: ServiceAccount - name: {{ kiali_vars.deployment.instance_name }}-service-account - namespace: "{{ kiali_vars.deployment.namespace }}" diff --git a/roles/v2.1/kiali-deploy/templates/openshift/configmap.yaml b/roles/v2.1/kiali-deploy/templates/openshift/configmap.yaml deleted file mode 100644 index 6e1ca1986..000000000 --- a/roles/v2.1/kiali-deploy/templates/openshift/configmap.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} -{% if kiali_vars.deployment.configmap_annotations is defined and kiali_vars.deployment.configmap_annotations|length > 0 %} - annotations: - {{ kiali_vars.deployment.configmap_annotations | to_nice_yaml(indent=0) | trim | indent(4) }} -{% endif %} -data: - config.yaml: | - {{ kiali_vars | to_nice_yaml(indent=0) | trim | indent(4) }} diff --git a/roles/v2.1/kiali-deploy/templates/openshift/console-links.yaml b/roles/v2.1/kiali-deploy/templates/openshift/console-links.yaml deleted file mode 100644 index c0723f77a..000000000 --- a/roles/v2.1/kiali-deploy/templates/openshift/console-links.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{% for namespace in namespaces %} ---- -apiVersion: console.openshift.io/v1 -kind: ConsoleLink -metadata: - name: {{ kiali_vars.deployment.instance_name }}-namespace-{{ namespace }} - labels: {{ kiali_resource_metadata_labels | combine({ kiali_instance_label_name: kiali_instance_label_value }) }} -spec: - href: {{ kiali_route_url }}{{ '/' if kiali_vars.server.web_root == '/' else (kiali_vars.server.web_root + '/') }}console/graph/namespaces?namespaces={{ namespace }} - location: NamespaceDashboard - text: Kiali - text: {{ ('Kiali [' + kiali_vars.deployment.instance_name + ']') if kiali_vars.deployment.instance_name != 'kiali' else 'Kiali' }} - namespaceDashboard: - namespaces: - - "{{ namespace }}" -{% endfor %} diff --git a/roles/v2.1/kiali-deploy/templates/openshift/deployment.yaml b/roles/v2.1/kiali-deploy/templates/openshift/deployment.yaml deleted file mode 100644 index de40c8ced..000000000 --- a/roles/v2.1/kiali-deploy/templates/openshift/deployment.yaml +++ /dev/null @@ -1,228 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} -spec: -{% if kiali_vars.deployment.hpa.spec | length == 0 %} - replicas: {{ kiali_vars.deployment.replicas }} -{% endif %} - selector: - matchLabels: - app.kubernetes.io/name: kiali - app.kubernetes.io/instance: {{ kiali_vars.deployment.instance_name }} - template: - metadata: - name: {{ kiali_vars.deployment.instance_name }} - labels: {{ kiali_resource_metadata_labels | combine(kiali_vars.deployment.pod_labels) }} - annotations: -{% if kiali_vars.server.observability.metrics.enabled|bool == True %} - prometheus.io/scrape: "true" - prometheus.io/port: "{{ kiali_vars.server.observability.metrics.port }}" -{% else %} - prometheus.io/scrape: "false" - prometheus.io/port: null -{% endif %} - kiali.io/dashboards: go,kiali - operator.kiali.io/last-updated: "{{ deployment_last_updated }}" -{% if kiali_vars.deployment.pod_annotations|length > 0 %} - {{ kiali_vars.deployment.pod_annotations | to_nice_yaml(indent=0) | trim | indent(8) }} -{% endif %} - strategy: - rollingUpdate: - maxSurge: 1 - maxAvailable: 1 - type: RollingUpdate - spec: - serviceAccount: {{ kiali_vars.deployment.instance_name }}-service-account -{% if kiali_vars.deployment.priority_class_name != "" %} - priorityClassName: "{{ kiali_vars.deployment.priority_class_name }}" -{% endif %} -{% if kiali_vars.deployment.image_pull_secrets | default([]) | length > 0 %} - imagePullSecrets: -{% for n in kiali_vars.deployment.image_pull_secrets %} - - name: {{ n }} -{% endfor %} -{% endif %} -{% if kiali_vars.deployment.host_aliases|length > 0 %} - hostAliases: - {{ kiali_vars.deployment.host_aliases | to_nice_yaml(indent=0) | trim | indent(8) }} -{% endif %} -{% if kiali_vars.deployment.dns | length > 0 %} -{% if kiali_vars.deployment.dns.policy | length > 0 %} - dnsPolicy: "{{ kiali_vars.deployment.dns.policy }}" -{% endif %} -{% if kiali_vars.deployment.dns.config | length > 0 %} - dnsConfig: - {{ kiali_vars.deployment.dns.config | to_nice_yaml(indent=0) | trim | indent(8) }} -{% endif %} -{% endif %} - containers: - - image: {{ kiali_vars.deployment.image_name }}{{ '@' + kiali_vars.deployment.image_digest if kiali_vars.deployment.image_digest != '' else '' }}:{{ kiali_vars.deployment.image_version }} - imagePullPolicy: {{ kiali_vars.deployment.image_pull_policy }} - name: kiali - command: - - "/opt/kiali/kiali" - - "-config" - - "/kiali-configuration/config.yaml" - securityContext: -{% if kiali_vars.deployment.security_context|length > 0 %} - {{ kiali_vars.deployment.security_context | to_nice_yaml(indent=0) | trim | indent(10) }} -{% endif %} -{% if kiali_vars.deployment.security_context|length == 0 or lookup('env', 'ALLOW_SECURITY_CONTEXT_OVERRIDE') != "true" %} - allowPrivilegeEscalation: false - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: true - capabilities: - drop: - - ALL -{% endif %} - ports: - - name: api-port - containerPort: {{ kiali_vars.server.port }} -{% if kiali_vars.server.observability.metrics.enabled|bool == True %} - - name: http-metrics - containerPort: {{ kiali_vars.server.observability.metrics.port }} -{% endif %} - readinessProbe: - httpGet: - path: {{ kiali_vars.server.web_root | regex_replace('\\/$', '') }}/healthz - port: api-port - scheme: {{ 'HTTP' if kiali_vars.identity.cert_file == "" else 'HTTPS' }} - initialDelaySeconds: 5 - periodSeconds: 30 - livenessProbe: - httpGet: - path: {{ kiali_vars.server.web_root | regex_replace('\\/$', '') }}/healthz - port: api-port - scheme: {{ 'HTTP' if kiali_vars.identity.cert_file == "" else 'HTTPS' }} - initialDelaySeconds: 5 - periodSeconds: 30 - env: - - name: ACTIVE_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: LOG_FORMAT - value: "{{ kiali_vars.deployment.logger.log_format }}" - - name: LOG_LEVEL - value: "{{ kiali_vars.deployment.logger.log_level }}" - - name: LOG_SAMPLER_RATE - value: "{{ kiali_vars.deployment.logger.sampler_rate }}" - - name: LOG_TIME_FIELD_FORMAT - value: "{{ kiali_vars.deployment.logger.time_field_format }}" -{% for env in kiali_vars.deployment.custom_envs %} - - name: "{{ env.name }}" - value: "{{ env.value }}" -{% endfor %} - volumeMounts: - - name: kiali-configuration - mountPath: "/kiali-configuration" -{% if kiali_vars.identity.cert_file == "/kiali-cert/tls.crt" %} - - name: kiali-cert - mountPath: "/kiali-cert" -{% endif %} - - name: kiali-secret - mountPath: "/kiali-secret" - - name: kiali-cabundle - mountPath: "/kiali-cabundle" -{% for sec in kiali_deployment_secret_volumes %} - - name: {{ sec }} - mountPath: "/kiali-override-secrets/{{ sec }}" - readOnly: true -{% endfor %} -{% for secret in kiali_vars.deployment.custom_secrets %} - - name: {{ secret.name }} - mountPath: "{{ secret.mount }}" -{% endfor %} -{% for sec in kiali_deployment_remote_cluster_secret_volumes %} - - name: {{ sec }} - mountPath: "/kiali-remote-cluster-secrets/{{ kiali_deployment_remote_cluster_secret_volumes[sec].secret_name }}" - readOnly: true -{% endfor %} -{% if kiali_vars.deployment.resources|length > 0 %} - resources: - {{ kiali_vars.deployment.resources | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - resources: null -{% endif %} - volumes: - - name: kiali-configuration - configMap: - name: {{ kiali_vars.deployment.instance_name }} -{% if kiali_vars.identity.cert_file == "/kiali-cert/tls.crt" %} - - name: kiali-cert - secret: - secretName: {{ kiali_vars.deployment.instance_name }}-cert-secret -{% endif %} - - name: kiali-secret - secret: - secretName: {{ kiali_vars.deployment.secret_name }} - optional: true - - name: kiali-cabundle - configMap: - name: {{ kiali_vars.deployment.instance_name }}-cabundle -{% for sec in kiali_deployment_secret_volumes %} - - name: {{ sec }} - secret: - secretName: {{ kiali_deployment_secret_volumes[sec].secret_name }} - items: - - key: {{ kiali_deployment_secret_volumes[sec].secret_key }} - path: value.txt - optional: false -{% endfor %} -{% for secret in kiali_vars.deployment.custom_secrets %} - - name: {{ secret.name }} -{% if secret.csi is defined %} - csi: - {{ secret.csi | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - secret: - secretName: {{ secret.name }} -{% if secret.optional is defined %} - optional: {{ secret.optional }} -{% endif %} -{% endif %} -{% endfor %} -{% for sec in kiali_deployment_remote_cluster_secret_volumes %} - - name: {{ sec }} - secret: - secretName: {{ kiali_deployment_remote_cluster_secret_volumes[sec].secret_name }} -{% endfor %} -{% if kiali_vars.deployment.affinity.node|length > 0 or kiali_vars.deployment.affinity.pod|length > 0 or kiali_vars.deployment.affinity.pod_anti|length > 0 %} - affinity: -{% if kiali_vars.deployment.affinity.node|length > 0 %} - nodeAffinity: - {{ kiali_vars.deployment.affinity.node | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - nodeAffinity: null -{% endif %} -{% if kiali_vars.deployment.affinity.pod|length > 0 %} - podAffinity: - {{ kiali_vars.deployment.affinity.pod | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - podAffinity: null -{% endif %} -{% if kiali_vars.deployment.affinity.pod_anti|length > 0 %} - podAntiAffinity: - {{ kiali_vars.deployment.affinity.pod_anti | to_nice_yaml(indent=0) | trim | indent(10) }} -{% else %} - podAntiAffinity: null -{% endif %} -{% else %} - affinity: null -{% endif %} -{% if kiali_vars.deployment.tolerations|length > 0 %} - tolerations: - {{ kiali_vars.deployment.tolerations | to_nice_yaml(indent=0) | trim | indent(6) }} -{% else %} - tolerations: null -{% endif %} -{% if kiali_vars.deployment.node_selector|length > 0 %} - nodeSelector: - {{ kiali_vars.deployment.node_selector | to_nice_yaml(indent=0) | trim | indent(8) }} -{% else %} - nodeSelector: null -{% endif %} diff --git a/roles/v2.1/kiali-deploy/templates/openshift/hpa.yaml b/roles/v2.1/kiali-deploy/templates/openshift/hpa.yaml deleted file mode 100644 index 2802d2169..000000000 --- a/roles/v2.1/kiali-deploy/templates/openshift/hpa.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{% if kiali_vars.deployment.hpa.spec | length > 0 %} -apiVersion: {{ kiali_vars.deployment.hpa.api_version }} -kind: HorizontalPodAutoscaler -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ kiali_vars.deployment.instance_name }} - {{ kiali_vars.deployment.hpa.spec | to_nice_yaml(indent=0) | trim | indent(2) }} -{% endif %} diff --git a/roles/v2.1/kiali-deploy/templates/openshift/oauth.yaml b/roles/v2.1/kiali-deploy/templates/openshift/oauth.yaml deleted file mode 100644 index d9dec0f04..000000000 --- a/roles/v2.1/kiali-deploy/templates/openshift/oauth.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: oauth.openshift.io/v1 -kind: OAuthClient -metadata: - name: {{ kiali_vars.deployment.instance_name }}-{{ kiali_vars.deployment.namespace }} - labels: {{ kiali_resource_metadata_labels }} -redirectURIs: - - {{ kiali_route_url }}/api/auth/callback -{% if kiali_vars.server.web_port | length > 0 %} - - {{ kiali_route_url }}:{{ kiali_vars.server.web_port }}/api/auth/callback -{% endif %} -grantMethod: auto -{% if kiali_vars.auth.openshift.token_inactivity_timeout is defined %} -accessTokenInactivityTimeoutSeconds: {{ kiali_vars.auth.openshift.token_inactivity_timeout }} -{% endif %} -{% if kiali_vars.auth.openshift.token_max_age is defined %} -accessTokenMaxAgeSeconds: {{ kiali_vars.auth.openshift.token_max_age }} -{% endif %} diff --git a/roles/v2.1/kiali-deploy/templates/openshift/role-viewer.yaml b/roles/v2.1/kiali-deploy/templates/openshift/role-viewer.yaml deleted file mode 100644 index af7e8c5ea..000000000 --- a/roles/v2.1/kiali-deploy/templates/openshift/role-viewer.yaml +++ /dev/null @@ -1,95 +0,0 @@ -{% for namespace in role_namespaces %} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: {{ role_kind }} -metadata: - name: {{ kiali_vars.deployment.instance_name }}-viewer - namespace: "{{ namespace }}" - labels: {{ kiali_resource_metadata_labels }} -rules: -- apiGroups: [""] - resources: - - configmaps - - endpoints -{% if 'logs-tab' not in kiali_vars.kiali_feature_flags.disabled_features %} - - pods/log -{% endif %} - verbs: - - get - - list - - watch -- apiGroups: [""] - resources: - - namespaces - - pods - - replicationcontrollers - - services - verbs: - - get - - list - - watch -- apiGroups: [""] - resources: - - pods/portforward - verbs: - - create - - post -- apiGroups: ["extensions", "apps"] - resources: - - daemonsets - - deployments - - replicasets - - statefulsets - verbs: - - get - - list - - watch -- apiGroups: ["batch"] - resources: - - cronjobs - - jobs - verbs: - - get - - list - - watch -- apiGroups: - - networking.istio.io - - security.istio.io - - extensions.istio.io - - telemetry.istio.io - - gateway.networking.k8s.io - resources: ["*"] - verbs: - - get - - list - - watch -- apiGroups: ["apps.openshift.io"] - resources: - - deploymentconfigs - verbs: - - get - - list - - watch -- apiGroups: ["project.openshift.io"] - resources: - - projects - verbs: - - get -- apiGroups: ["route.openshift.io"] - resources: - - routes - verbs: - - get -- apiGroups: ["authentication.k8s.io"] - resources: - - tokenreviews - verbs: - - create -- apiGroups: ["admissionregistration.k8s.io"] - resources: - - mutatingwebhookconfigurations - verbs: - - get - - list - - watch -{% endfor %} diff --git a/roles/v2.1/kiali-deploy/templates/openshift/role.yaml b/roles/v2.1/kiali-deploy/templates/openshift/role.yaml deleted file mode 100644 index fb1ea6767..000000000 --- a/roles/v2.1/kiali-deploy/templates/openshift/role.yaml +++ /dev/null @@ -1,102 +0,0 @@ -{% for namespace in role_namespaces %} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: {{ role_kind }} -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ namespace }}" - labels: {{ kiali_resource_metadata_labels }} -rules: -- apiGroups: [""] - resources: - - configmaps - - endpoints -{% if 'logs-tab' not in kiali_vars.kiali_feature_flags.disabled_features %} - - pods/log -{% endif %} - verbs: - - get - - list - - watch -- apiGroups: [""] - resources: - - namespaces - - pods - - replicationcontrollers - - services - verbs: - - get - - list - - watch - - patch -- apiGroups: [""] - resources: - - pods/portforward - verbs: - - create - - post -- apiGroups: ["extensions", "apps"] - resources: - - daemonsets - - deployments - - replicasets - - statefulsets - verbs: - - get - - list - - watch - - patch -- apiGroups: ["batch"] - resources: - - cronjobs - - jobs - verbs: - - get - - list - - watch - - patch -- apiGroups: - - networking.istio.io - - security.istio.io - - extensions.istio.io - - telemetry.istio.io - - gateway.networking.k8s.io - resources: ["*"] - verbs: - - get - - list - - watch - - create - - delete - - patch -- apiGroups: ["apps.openshift.io"] - resources: - - deploymentconfigs - verbs: - - get - - list - - watch - - patch -- apiGroups: ["project.openshift.io"] - resources: - - projects - verbs: - - get -- apiGroups: ["route.openshift.io"] - resources: - - routes - verbs: - - get -- apiGroups: ["authentication.k8s.io"] - resources: - - tokenreviews - verbs: - - create -- apiGroups: ["admissionregistration.k8s.io"] - resources: - - mutatingwebhookconfigurations - verbs: - - get - - list - - watch -{% endfor %} diff --git a/roles/v2.1/kiali-deploy/templates/openshift/rolebinding.yaml b/roles/v2.1/kiali-deploy/templates/openshift/rolebinding.yaml deleted file mode 100644 index 19ec73a59..000000000 --- a/roles/v2.1/kiali-deploy/templates/openshift/rolebinding.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{% for namespace in role_namespaces %} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: {{ role_binding_kind }} -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ namespace }}" - labels: {{ kiali_resource_metadata_labels }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: {{ role_kind }} - name: {{ (kiali_vars.deployment.instance_name + '-viewer') if ((kiali_vars.deployment.view_only_mode|bool == True) or (kiali_vars.auth.strategy != 'anonymous')) else kiali_vars.deployment.instance_name }} -subjects: -- kind: ServiceAccount - name: {{ kiali_vars.deployment.instance_name }}-service-account - namespace: "{{ kiali_vars.deployment.namespace }}" -{% endfor %} diff --git a/roles/v2.1/kiali-deploy/templates/openshift/route.yaml b/roles/v2.1/kiali-deploy/templates/openshift/route.yaml deleted file mode 100644 index a8156d375..000000000 --- a/roles/v2.1/kiali-deploy/templates/openshift/route.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_vars.deployment.ingress.additional_labels | combine(kiali_resource_metadata_labels) }} -{% if kiali_vars.deployment.ingress.override_yaml is defined and kiali_vars.deployment.ingress.override_yaml.metadata is defined and kiali_vars.deployment.ingress.override_yaml.metadata.annotations is defined %} - {{ kiali_vars.deployment.ingress.override_yaml.metadata | to_nice_yaml(indent=0) | trim | indent(2) }} -{% endif %} -spec: -{% if kiali_vars.deployment.ingress.override_yaml is defined and kiali_vars.deployment.ingress.override_yaml.spec is defined %} - {{ kiali_vars.deployment.ingress.override_yaml.spec | to_nice_yaml(indent=0) | trim | indent(2) }} -{% else %} - tls: - termination: reencrypt - insecureEdgeTerminationPolicy: Redirect - to: - kind: Service - name: {{ kiali_vars.deployment.instance_name }} - port: - targetPort: {{ kiali_vars.server.port }} -{% endif %} diff --git a/roles/v2.1/kiali-deploy/templates/openshift/service.yaml b/roles/v2.1/kiali-deploy/templates/openshift/service.yaml deleted file mode 100644 index fdde4bfc2..000000000 --- a/roles/v2.1/kiali-deploy/templates/openshift/service.yaml +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ kiali_vars.deployment.instance_name }} - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} - annotations: - service.beta.openshift.io/serving-cert-secret-name: {{ kiali_vars.deployment.instance_name }}-cert-secret -{% if kiali_vars.deployment.service_annotations|length > 0 %} - {{ kiali_vars.deployment.service_annotations | to_nice_yaml(indent=0) | trim | indent(4) }} -{% endif %} -spec: -{% if kiali_vars.deployment.service_type is defined %} - type: {{ kiali_vars.deployment.service_type }} -{% endif %} - ports: - - name: {{ 'http' if kiali_vars.identity.cert_file == "" else 'tcp' }} - protocol: TCP -{% if k8s_version is defined and k8s_version is version('1.20', '>=') %} - appProtocol: {{ 'http' if kiali_vars.identity.cert_file == "" else 'https' }} -{% endif %} - port: {{ kiali_vars.server.port }} -{% if kiali_vars.deployment.service_type is defined and kiali_vars.deployment.service_type == "NodePort" and kiali_vars.server.node_port is defined %} - nodePort: {{ kiali_vars.server.node_port }} -{% endif %} -{% if kiali_vars.server.observability.metrics.enabled|bool == True %} - - name: http-metrics - protocol: TCP -{% if k8s_version is defined and k8s_version is version('1.20', '>=') %} - appProtocol: http -{% endif %} - port: {{ kiali_vars.server.observability.metrics.port }} -{% endif %} - selector: -{% if query(k8s_plugin, kind='Service', resource_name=kiali_vars.deployment.instance_name, namespace=kiali_vars.deployment.namespace) | length > 0 %} - app: null - version: null -{% endif %} - app.kubernetes.io/name: kiali - app.kubernetes.io/instance: {{ kiali_vars.deployment.instance_name }} - {% if kiali_vars.deployment.additional_service_yaml is defined %}{{ kiali_vars.deployment.additional_service_yaml | to_nice_yaml(indent=0) | trim | indent(2) }}{% endif %} diff --git a/roles/v2.1/kiali-deploy/templates/openshift/serviceaccount.yaml b/roles/v2.1/kiali-deploy/templates/openshift/serviceaccount.yaml deleted file mode 100644 index 5feedce14..000000000 --- a/roles/v2.1/kiali-deploy/templates/openshift/serviceaccount.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ kiali_vars.deployment.instance_name }}-service-account - namespace: "{{ kiali_vars.deployment.namespace }}" - labels: {{ kiali_resource_metadata_labels }} diff --git a/roles/v2.1/kiali-deploy/vars/main.yml b/roles/v2.1/kiali-deploy/vars/main.yml deleted file mode 100644 index 553583618..000000000 --- a/roles/v2.1/kiali-deploy/vars/main.yml +++ /dev/null @@ -1,116 +0,0 @@ -# These are the actual variables used by the role. You will notice it is -# one big dictionary (key="kiali_vars") whose child dictionaries mimic those -# as defined in defaults/main.yml. -# The child dictionaries below will have values that are a combination of the default values -# (as found in defaults/main.yaml) and user-supplied values. -# Without this magic, a user supplying only one key/value pair in a child dictionary will -# clear out (make undefined) all the rest of the key/value pairs in that child dictionary. -# This is not what we want. We want the rest of the dictionary to keep the defaults, -# thus allowing the user to override only a subset of key/values in a dictionary. -# -# I found this trick at https://groups.google.com/forum/#!topic/Ansible-project/pGbRYZyqxZ4 -# I tweeked that solution a little bit because I did not want to require the user to supply -# everything under a main "kiali_vars" dictionary. - -kiali_vars: - installation_tag: "{{ installation_tag | default(kiali_defaults.installation_tag) }}" - istio_namespace: "{{ istio_namespace | default(kiali_defaults.istio_namespace) }}" - version: "{{ version | default(kiali_defaults.version) }}" - - additional_display_details: | - {%- if additional_display_details is defined and additional_display_details is iterable -%} - {{ additional_display_details }} - {%- else -%} - {{ kiali_defaults.additional_display_details }} - {%- endif -%} - - auth: | - {%- if auth is defined and auth is iterable -%} - {{ kiali_defaults.auth | combine((auth | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.auth }} - {%- endif -%} - - clustering: | - {%- if clustering is defined and clustering is iterable -%} - {{ kiali_defaults.clustering | combine((clustering | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.clustering }} - {%- endif -%} - - custom_dashboards: | - {%- if custom_dashboards is defined and custom_dashboards is iterable -%} - {{ custom_dashboards }} - {%- else -%} - {{ kiali_defaults.custom_dashboards }} - {%- endif -%} - - deployment: | - {%- if deployment is defined and deployment is iterable -%} - {{ kiali_defaults.deployment | combine((deployment | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.deployment }} - {%- endif -%} - - extensions: | - {%- if extensions is defined and extensions is iterable -%} - {{ extensions }} - {%- else -%} - {{ kiali_defaults.extensions }} - {%- endif -%} - - external_services: | - {%- if external_services is defined and external_services is iterable -%} - {{ kiali_defaults.external_services | combine((external_services | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.external_services }} - {%- endif -%} - - health_config: | - {%- if health_config is defined and health_config is iterable -%} - {{ kiali_defaults.health_config | combine((health_config | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.health_config }} - {%- endif -%} - - identity: | - {%- if identity is defined and identity is iterable -%} - {{ kiali_defaults.identity | combine((identity | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.identity }} - {%- endif -%} - - istio_labels: | - {%- if istio_labels is defined and istio_labels is iterable -%} - {{ kiali_defaults.istio_labels | combine((istio_labels | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.istio_labels }} - {%- endif -%} - - kiali_feature_flags: | - {%- if kiali_feature_flags is defined and kiali_feature_flags is iterable -%} - {{ kiali_defaults.kiali_feature_flags | combine((kiali_feature_flags | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.kiali_feature_flags }} - {%- endif -%} - - kubernetes_config: | - {%- if kubernetes_config is defined and kubernetes_config is iterable -%} - {{ kiali_defaults.kubernetes_config | combine((kubernetes_config | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.kubernetes_config }} - {%- endif -%} - - login_token: | - {%- if login_token is defined and login_token is iterable -%} - {{ kiali_defaults.login_token | combine((login_token | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.login_token }} - {%- endif -%} - - server: | - {%- if server is defined and server is iterable -%} - {{ kiali_defaults.server | combine((server | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults.server }} - {%- endif -%} diff --git a/roles/v2.1/kiali-remove/defaults/main.yml b/roles/v2.1/kiali-remove/defaults/main.yml deleted file mode 100644 index e3698ca4f..000000000 --- a/roles/v2.1/kiali-remove/defaults/main.yml +++ /dev/null @@ -1,11 +0,0 @@ -kiali_defaults_remove: - istio_namespace: "" - - deployment: - hpa: - api_version: "" - instance_name: "kiali" - -# Will be auto-detected, but for debugging purposes you can force one of these to true -is_k8s: false -is_openshift: false diff --git a/roles/v2.1/kiali-remove/filter_plugins/stripnone.py b/roles/v2.1/kiali-remove/filter_plugins/stripnone.py deleted file mode 100644 index 4dbd53033..000000000 --- a/roles/v2.1/kiali-remove/filter_plugins/stripnone.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} - -# Process recursively the given value if it is a dict and remove all keys that have a None value -def strip_none(value): - if isinstance(value, dict): - dicts = {} - for k,v in value.items(): - if isinstance(v, dict): - dicts[k] = strip_none(v) - elif v is not None: - dicts[k] = v - return dicts - else: - return value - -# ---- Ansible filters ---- -class FilterModule(object): - def filters(self): - return { - 'stripnone': strip_none - } diff --git a/roles/v2.1/kiali-remove/meta/main.yml b/roles/v2.1/kiali-remove/meta/main.yml deleted file mode 100644 index e9334e3c7..000000000 --- a/roles/v2.1/kiali-remove/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -collections: -- kubernetes.core diff --git a/roles/v2.1/kiali-remove/tasks/clusterroles-to-remove.yml b/roles/v2.1/kiali-remove/tasks/clusterroles-to-remove.yml deleted file mode 100644 index 7fc61dc49..000000000 --- a/roles/v2.1/kiali-remove/tasks/clusterroles-to-remove.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ kiali_vars_remove.deployment.instance_name }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ kiali_vars_remove.deployment.instance_name }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ kiali_vars_remove.deployment.instance_name }}-viewer ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ kiali_vars_remove.deployment.instance_name }}-{{ kiali_vars_remove.deployment.namespace }}-oauth ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ kiali_vars_remove.deployment.instance_name }}-{{ kiali_vars_remove.deployment.namespace }}-oauth diff --git a/roles/v2.1/kiali-remove/tasks/main.yml b/roles/v2.1/kiali-remove/tasks/main.yml deleted file mode 100644 index d589ade49..000000000 --- a/roles/v2.1/kiali-remove/tasks/main.yml +++ /dev/null @@ -1,240 +0,0 @@ -# These tasks remove all Kiali resources such that no remnants of Kiali will remain. -# -# Note that we ignore_errors everywhere - we do not want these tasks to ever abort with a failure. -# This is because these are run within a finalizer and if a failure aborts any task here -# the user will never be able to delete the Kiali CR - in fact, the delete will hang indefinitely -# and the user will need to do an ugly hack to fix it. - -- ignore_errors: yes - set_fact: - k8s_plugin: kubernetes.core.k8s - -- name: Get the original CR that was deleted - ignore_errors: yes - set_fact: - current_cr: "{{ _kiali_io_kiali }}" - -- name: Get api group information from the cluster - ignore_errors: yes - set_fact: - api_groups: "{{ lookup(k8s_plugin, cluster_info='api_groups') }}" - when: - - is_openshift == False - - is_k8s == False - -- name: Get api version information from the cluster - ignore_errors: yes - k8s_cluster_info: - register: api_status - -- name: Determine the cluster type - ignore_errors: yes - set_fact: - is_openshift: "{{ True if 'route.openshift.io' in api_groups else False }}" - is_k8s: "{{ False if 'route.openshift.io' in api_groups else True }}" - when: - - is_openshift == False - - is_k8s == False - -# Indicate what kind of cluster we are in (OpenShift or Kubernetes). -- ignore_errors: yes - debug: - msg: "CLUSTER TYPE: is_openshift={{ is_openshift }}; is_k8s={{ is_k8s }}" - -- name: Print some debug information - ignore_errors: yes - vars: - msg: | - Kiali Variables: - -------------------------------- - {{ kiali_vars_remove | to_nice_yaml }} - debug: - msg: "{{ msg.split('\n') }}" - -- name: Set default HPA api_version - ignore_errors: yes - set_fact: - kiali_vars_remove: "{{ kiali_vars_remove | combine({'deployment': {'hpa': {'api_version': 'autoscaling/v2' if (api_status.apis['autoscaling/v2'] is defined) else 'autoscaling/v2beta2' }}}, recursive=True) }}" - when: - - kiali_vars_remove.deployment.hpa.api_version == "" - -# There is an edge case where a user installed Kiali with one instance name, then changed the instance name in the CR. -# This is not allowed. When this happens, the operator will abort with an error message telling the user to uninstall Kiali. -# The user will do this by deleting the Kiali CR, at which time this ansible role is executed. -# In this case we must use the instance name stored in the status not the spec because the spec will have the bad name -# and the status will have the correct name that was used to initially install Kiali. -- name: Ensure the correct instance_name is used - ignore_errors: yes - set_fact: - kiali_vars_remove: "{{ kiali_vars_remove | combine({'deployment': {'instance_name': current_cr.status.deployment.instanceName}}, recursive=True) }}" - when: - - current_cr.status is defined - - current_cr.status.deployment is defined - - current_cr.status.deployment.instanceName is defined - - current_cr.status.deployment.instanceName != kiali_vars_remove.deployment.instance_name - -- name: Set default deployment namespace to the same namespace where the CR lives - ignore_errors: yes - set_fact: - kiali_vars_remove: "{{ kiali_vars_remove | combine({'deployment': {'namespace': current_cr.metadata.namespace}}, recursive=True) }}" - when: - - kiali_vars_remove.deployment.namespace is not defined or kiali_vars_remove.deployment.namespace == "" - -- name: Set default istio namespace - ignore_errors: yes - set_fact: - kiali_vars_remove: "{{ kiali_vars_remove | combine({'istio_namespace': kiali_vars_remove.deployment.namespace}, recursive=True) }}" - when: - - kiali_vars_remove.istio_namespace == "" - -- name: Define the expected label for the namespaces and signing key secret - set_fact: - kiali_instance_label_name: "{{ 'kiali.io/' + kiali_vars_remove.deployment.instance_name + '.home' }}" - kiali_instance_label_value: "{{ kiali_vars_remove.deployment.namespace }}" - -- name: Get namespaces that have Kiali roles in them - ignore_errors: yes - set_fact: - namespaces_with_kiali_roles: "{{ query(k8s_plugin, label_selector=(kiali_instance_label_name + '=' + kiali_instance_label_value), kind='Namespace', errors='ignore') | default ([]) | map(attribute='metadata.name') | list }}" - when: - - kiali_vars_remove.deployment.cluster_wide_access == False - -- name: Delete Kiali roles - ignore_errors: yes - k8s: - state: absent - definition: | - {% for namespace in namespaces_with_kiali_roles %} - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - name: "{{ kiali_vars_remove.deployment.instance_name }}" - namespace: "{{ namespace }}" - ... - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ kiali_vars_remove.deployment.instance_name }}" - namespace: "{{ namespace }}" - ... - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: Role - metadata: - name: "{{ kiali_vars_remove.deployment.instance_name }}-viewer" - namespace: "{{ namespace }}" - ... - {% endfor %} - when: - - namespaces_with_kiali_roles is defined - - namespaces_with_kiali_roles | length > 0 - -- name: Remote Kiali label from namespaces that are currently accessible - ignore_errors: yes - k8s: - state: patched - definition: | - {% for namespace in namespaces_with_kiali_roles %} - --- - apiVersion: v1 - kind: Namespace - metadata: - name: "{{ namespace }}" - labels: - {{ kiali_instance_label_name }}: null - ... - {% endfor %} - when: - - namespaces_with_kiali_roles is defined - - namespaces_with_kiali_roles | length > 0 - -- name: Delete Kiali cluster roles - ignore_errors: yes - k8s: - state: absent - continue_on_error: false - template: - - clusterroles-to-remove.yml - retries: 6 - delay: 10 - -- name: Delete Kiali resources - ignore_errors: yes - k8s: - state: absent - continue_on_error: false - template: - - resources-to-remove.yml - retries: 6 - delay: 10 - -- name: Unlabel the signing key secret if it exists to indicate this Kiali instance no longer uses it - ignore_errors: yes - k8s: - state: present - definition: | - apiVersion: v1 - kind: Secret - metadata: - name: "kiali-signing-key" - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - labels: - {{ kiali_instance_label_name }}: null - -- name: Delete the signing key secret if no other Kiali installation is using it - ignore_errors: yes - vars: - signing_key_secret_labels: "{{ lookup(k8s_plugin, namespace=kiali_vars_remove.deployment.namespace, kind='Secret', resource_name='kiali-signing-key', api_version='v1') | default({}) | json_query('metadata.labels') }}" - k8s: - state: absent - definition: - apiVersion: v1 - kind: Secret - metadata: - name: kiali-signing-key - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - when: - - (signing_key_secret_labels is not defined) or (signing_key_secret_labels | length == 0) or (signing_key_secret_labels | dict2items | selectattr('key', 'match', 'kiali.io/.*\.home') | list | length == 0) - -- name: Delete OpenShift-specific Kiali resources - ignore_errors: yes - k8s: - state: absent - continue_on_error: false - template: - - os-resources-to-remove.yml - retries: 6 - delay: 10 - when: - - is_openshift == True - -# If it is Removed, the web console is disabled. -# See: https://docs.openshift.com/container-platform/4.13/web_console/disabling-web-console.html -- name: Determine if OpenShift Console is installed and enabled - ignore_errors: yes - vars: - console_res: "{{ query(k8s_plugin, resource_name='cluster', api_version='operator.openshift.io/v1', kind='Console', errors='ignore') }}" - set_fact: - has_openshift_console: "{{ console_res | length > 0 and console_res[0].spec.managementState != 'Removed' }}" - when: - - is_openshift == True - -- name: Delete OpenShift-specific Kiali ConsoleLinks - ignore_errors: yes - k8s: - state: absent - definition: | - {% for cl in query(k8s_plugin, kind='ConsoleLink', label_selector=(kiali_instance_label_name + '=' + kiali_instance_label_value)) %} - --- - apiVersion: "{{ cl.apiVersion }}" - kind: "{{ cl.kind }}" - metadata: - name: "{{ cl.metadata.name }}" - ... - {% endfor %} - when: - - is_openshift == True - - has_openshift_console is defined - - has_openshift_console == True diff --git a/roles/v2.1/kiali-remove/tasks/os-resources-to-remove.yml b/roles/v2.1/kiali-remove/tasks/os-resources-to-remove.yml deleted file mode 100644 index 81d0ce07f..000000000 --- a/roles/v2.1/kiali-remove/tasks/os-resources-to-remove.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -apiVersion: oauth.openshift.io/v1 -kind: OAuthClient -metadata: - name: {{ kiali_vars_remove.deployment.instance_name + '-' + kiali_vars_remove.deployment.namespace }} ---- -apiVersion: route.openshift.io/v1 -kind: Route -metadata: - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - name: {{ kiali_vars_remove.deployment.instance_name }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - name: {{ kiali_vars_remove.deployment.instance_name }}-cabundle diff --git a/roles/v2.1/kiali-remove/tasks/resources-to-remove.yml b/roles/v2.1/kiali-remove/tasks/resources-to-remove.yml deleted file mode 100644 index f9ea3e646..000000000 --- a/roles/v2.1/kiali-remove/tasks/resources-to-remove.yml +++ /dev/null @@ -1,67 +0,0 @@ ---- -apiVersion: {{ kiali_vars_remove.deployment.hpa.api_version }} -kind: HorizontalPodAutoscaler -metadata: - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - name: {{ kiali_vars_remove.deployment.instance_name }} ---- -apiVersion: networking.k8s.io/{{ 'v1' if (lookup(k8s_plugin, kind='Ingress', api_version='networking.k8s.io/v1', errors='ignore') is iterable) else 'v1beta1' }} -kind: Ingress -metadata: - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - name: {{ kiali_vars_remove.deployment.instance_name }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - name: {{ kiali_vars_remove.deployment.instance_name }} ---- -apiVersion: v1 -kind: ReplicaSet -metadata: - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - name: {{ kiali_vars_remove.deployment.instance_name }} ---- -apiVersion: v1 -kind: Pod -metadata: - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - name: {{ kiali_vars_remove.deployment.instance_name }} ---- -apiVersion: v1 -kind: Service -metadata: - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - name: {{ kiali_vars_remove.deployment.instance_name }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - name: {{ kiali_vars_remove.deployment.instance_name }}-service-account ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - name: {{ kiali_vars_remove.deployment.instance_name }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - name: {{ kiali_vars_remove.deployment.instance_name }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - name: {{ kiali_vars_remove.deployment.instance_name }}-viewer ---- -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: "{{ kiali_vars_remove.deployment.namespace }}" - name: {{ kiali_vars_remove.deployment.instance_name }} ---- diff --git a/roles/v2.1/kiali-remove/vars/main.yml b/roles/v2.1/kiali-remove/vars/main.yml deleted file mode 100644 index 4771d535d..000000000 --- a/roles/v2.1/kiali-remove/vars/main.yml +++ /dev/null @@ -1,9 +0,0 @@ -kiali_vars_remove: - istio_namespace: "{{ istio_namespace | default(kiali_defaults_remove.istio_namespace) }}" - - deployment: | - {%- if deployment is defined and deployment is iterable -%} - {{ kiali_defaults_remove.deployment | combine((deployment | stripnone), recursive=True) }} - {%- else -%} - {{ kiali_defaults_remove.deployment }} - {%- endif -%} diff --git a/roles/v2.1/ossmconsole-deploy/defaults/main.yml b/roles/v2.1/ossmconsole-deploy/defaults/main.yml deleted file mode 100644 index 8dc928ce4..000000000 --- a/roles/v2.1/ossmconsole-deploy/defaults/main.yml +++ /dev/null @@ -1,26 +0,0 @@ -# Defaults for all user-facing OSSM Console settings. -# -# Note that these are under the main dictionary group "ossmconsole_defaults". -# The actual vars used by the role are found in the vars/ directory. -# These defaults (the dictionaries under "ossmconsole_defaults") are merged into the vars such that the values -# below (e.g. deployment) are merged in rather than completely replaced by user-supplied values. -# -# If new groups are added to these defaults, you must remember to add the merge code to vars/main.yml. - -ossmconsole_defaults: - version: "default" - - deployment: - imageDigest: "" - imageName: "" - imagePullPolicy: "IfNotPresent" - imagePullSecrets: [] - imageVersion: "" - namespace: "" - - kiali: - graph: - impl: "pf" - serviceName: "" - serviceNamespace: "" - servicePort: 0 diff --git a/roles/v2.1/ossmconsole-deploy/filter_plugins/stripnone.py b/roles/v2.1/ossmconsole-deploy/filter_plugins/stripnone.py deleted file mode 100644 index 4dbd53033..000000000 --- a/roles/v2.1/ossmconsole-deploy/filter_plugins/stripnone.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} - -# Process recursively the given value if it is a dict and remove all keys that have a None value -def strip_none(value): - if isinstance(value, dict): - dicts = {} - for k,v in value.items(): - if isinstance(v, dict): - dicts[k] = strip_none(v) - elif v is not None: - dicts[k] = v - return dicts - else: - return value - -# ---- Ansible filters ---- -class FilterModule(object): - def filters(self): - return { - 'stripnone': strip_none - } diff --git a/roles/v2.1/ossmconsole-deploy/meta/main.yml b/roles/v2.1/ossmconsole-deploy/meta/main.yml deleted file mode 100644 index e9334e3c7..000000000 --- a/roles/v2.1/ossmconsole-deploy/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -collections: -- kubernetes.core diff --git a/roles/v2.1/ossmconsole-deploy/tasks/main.yml b/roles/v2.1/ossmconsole-deploy/tasks/main.yml deleted file mode 100644 index edc486efb..000000000 --- a/roles/v2.1/ossmconsole-deploy/tasks/main.yml +++ /dev/null @@ -1,413 +0,0 @@ -- set_fact: - k8s_plugin: kubernetes.core.k8s - -- name: Get the original CR as-is - set_fact: - current_cr: "{{ _kiali_io_ossmconsole }}" - -- name: Find oldest CR - vars: - crs: "{{ query(k8s_plugin, kind=current_cr.kind, api_version=current_cr.apiVersion) | sort(attribute='metadata.creationTimestamp') }}" - set_fact: - oldest_ossmconsole_cr: "{{ crs[0] }}" - when: - - crs | length > 0 - -- block: - - debug: - msg: "Ignoring this CR [{{ current_cr.metadata.namespace }}/{{ current_cr.metadata.name }}]. The CR in control of OSSMC is [{{ oldest_ossmconsole_cr.metadata.namespace }}/{{ oldest_ossmconsole_cr.metadata.name }}]." - - include_tasks: update-status-progress.yml - vars: - status_progress_message: "Ignoring this CR. The CR in control of OSSMC is [{{ oldest_ossmconsole_cr.metadata.namespace }}/{{ oldest_ossmconsole_cr.metadata.name }}]." - - meta: end_play - when: - - oldest_ossmconsole_cr is defined - - oldest_ossmconsole_cr.metadata.name != current_cr.metadata.name or oldest_ossmconsole_cr.metadata.namespace != current_cr.metadata.namespace - -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Initializing" - status_vars: - specVersion: "{{ ossmconsole_vars.version }}" - deployment: - namespace: null - kiali: - serviceName: null - serviceNamespace: null - servicePort: null - -# There are three possible values for the console managementState: Managed, Unmanaged, and Removed. -# If it is Removed, the web console is disabled and thus we cannot install OSSMC so abort immediately. -# See: https://docs.openshift.com/container-platform/4.13/web_console/disabling-web-console.html -- name: Ensure OpenShift Console is installed and enabled - vars: - console_res: "{{ query(k8s_plugin, resource_name='cluster', api_version='operator.openshift.io/v1', kind='Console', errors='ignore') }}" - fail: - msg: "The OpenShift Console is not installed and enabled. Cannot install OSSMC." - when: - - console_res | length == 0 or console_res[0].spec.managementState == "Removed" - -- name: Get information about the cluster - set_fact: - api_groups: "{{ lookup(k8s_plugin, cluster_info='api_groups') }}" - -- name: Determine the Kubernetes version - set_fact: - k8s_version: "{{ lookup(k8s_plugin, cluster_info='version').kubernetes.gitVersion | regex_replace('^v', '') }}" - ignore_errors: yes - -- name: Determine the OpenShift version - vars: - kube_apiserver_cluster_op_raw: "{{ lookup(k8s_plugin, api_version='config.openshift.io/v1', kind='ClusterOperator', resource_name='kube-apiserver') | default({}) }}" - ri_query: "status.versions[?name == 'raw-internal'].version" - set_fact: - openshift_version: "{{ kube_apiserver_cluster_op_raw | json_query(ri_query) | join }}" - -- name: Get information about the operator - k8s_info: - api_version: v1 - kind: Pod - namespace: "{{ lookup('env', 'POD_NAMESPACE') }}" - name: "{{ lookup('env', 'POD_NAME') }}" - register: operator_pod_raw - ignore_errors: yes -- name: Determine the version of the operator based on the version label - set_fact: - operator_version: "{{ operator_pod_raw.resources[0].metadata.labels.version }}" - when: - - operator_pod_raw is defined - - operator_pod_raw.resources[0] is defined - - operator_pod_raw.resources[0].metadata is defined - - operator_pod_raw.resources[0].metadata.labels is defined - - operator_pod_raw.resources[0].metadata.labels.version is defined -- set_fact: - operator_version: "unknown" - when: - - operator_version is not defined -- debug: - msg: "OPERATOR VERSION: [{{ operator_version }}]" - -- name: Print some debug information - vars: - msg: | - OSSM Console Variables: - -------------------------------- - {{ ossmconsole_vars | to_nice_yaml }} - debug: - msg: "{{ msg.split('\n') }}" - -- name: Set default deployment namespace to the same namespace where the CR lives - set_fact: - ossmconsole_vars: "{{ ossmconsole_vars | combine({'deployment': {'namespace': current_cr.metadata.namespace}}, recursive=True) }}" - when: - - ossmconsole_vars.deployment.namespace is not defined or ossmconsole_vars.deployment.namespace == "" - -- name: Do not support installing in any namespace other than where the CR lives - fail: - msg: "The operator currently does not support installing the plugin in any namespace other than the namespace where the CR was created." - when: - - ossmconsole_vars.deployment.namespace != current_cr.metadata.namespace - -# Never allow deployment.namespace to change to avoid leaking resources - to uninstall resources you must delete the OSSMConsole CR -- name: Ensure the deployment.namespace has not changed - fail: - msg: "The deployment.namespace cannot be changed to a different value. It was [{{ current_cr.status.deployment.namespace }}] but is now [{{ ossmconsole_vars.deployment.namespace }}]. In order to install OSSM Console with a different deployment.namespace, please uninstall OSSM Console first." - when: - - current_cr.status is defined - - current_cr.status.deployment is defined - - current_cr.status.deployment.namespace is defined - - current_cr.status.deployment.namespace != ossmconsole_vars.deployment.namespace - -# If we need to auto-discover some things that require the Kiali Route, get the route now - first look in the CR's namespace then anywhere else. If Kiali is not found, abort. -- name: Auto-discover the Kiali Route - preference goes to a Kiali installed in the same namespace as the CR - vars: - kiali_in_namespace: "{{ query(k8s_plugin, label_selector='app.kubernetes.io/name=kiali', api_version='route.openshift.io/v1', kind='Route', namespace=current_cr.metadata.namespace) }}" - kiali_anywhere: "{{ query(k8s_plugin, label_selector='app.kubernetes.io/name=kiali', api_version='route.openshift.io/v1', kind='Route') }}" - set_fact: - kiali_route: "{{ kiali_in_namespace[0] if kiali_in_namespace | length > 0 else (kiali_anywhere[0] if kiali_anywhere | length > 0 else '') }}" - when: - - ossmconsole_vars.kiali.serviceName == "" or ossmconsole_vars.kiali.serviceNamespace == "" or ossmconsole_vars.kiali.servicePort == 0 - ignore_errors: yes - -- fail: - msg: "Failed to auto-discover the Kiali Route. Make sure Kiali is installed. You can specify the full 'kiali' section in the CR if there is a Kiali installed but cannot be auto-discovered by this operator." - when: - - kiali_route is defined - - kiali_route == "" - -- name: Auto-discover the Kiali Service Name - set_fact: - kiali_service_name: "{{ kiali_route.spec.to.name }}" - when: - - ossmconsole_vars.kiali.serviceName == "" - ignore_errors: yes - -- name: Auto-discover the Kiali Service Namespace - set_fact: - kiali_service_namespace: "{{ kiali_route.metadata.namespace }}" - when: - - ossmconsole_vars.kiali.serviceNamespace == "" - ignore_errors: yes - -- name: Auto-discover the Kiali Service Port - set_fact: - kiali_service_port: "{{ kiali_route.spec.port.targetPort }}" - when: - - ossmconsole_vars.kiali.servicePort == 0 - ignore_errors: yes - -- fail: - msg: "Failed to auto-discover the Kiali Service Name. Make sure Kiali is installed. You can specify 'kiali.serviceName' in the CR if there is a Kiali Service the plugin can use but cannot be auto-discovered by this operator." - when: - - ossmconsole_vars.kiali.serviceName == "" - - kiali_service_name is not defined or kiali_service_name == "" - -- fail: - msg: "Failed to auto-discover the Kiali Service Namespace. Make sure Kiali is installed. You can specify 'kiali.serviceNamespace' in the CR if there is a Kiali Service the plugin can use but cannot be auto-discovered by this operator." - when: - - ossmconsole_vars.kiali.serviceNamespace == "" - - kiali_service_namespace is not defined or kiali_service_namespace == "" - -- fail: - msg: "Failed to auto-discover the Kiali Service Port. Make sure Kiali is installed. You can specify 'kiali.servicePort' in the CR if there is a Kiali Service the plugin can use but cannot be auto-discovered by this operator." - when: - - ossmconsole_vars.kiali.servicePort == 0 - - kiali_service_port is not defined or kiali_service_port == "" - -# Set the auto-discovered values that we found -- set_fact: - ossmconsole_vars: "{{ ossmconsole_vars | combine({'kiali': {'serviceName': kiali_service_name}}, recursive=True) }}" - when: - - ossmconsole_vars.kiali.serviceName == "" -- set_fact: - ossmconsole_vars: "{{ ossmconsole_vars | combine({'kiali': {'serviceNamespace': kiali_service_namespace}}, recursive=True) }}" - when: - - ossmconsole_vars.kiali.serviceNamespace == "" -- set_fact: - ossmconsole_vars: "{{ ossmconsole_vars | combine({'kiali': {'servicePort': kiali_service_port}}, recursive=True) }}" - when: - - ossmconsole_vars.kiali.servicePort == 0 - -- name: Ask Kiali for information about itself - vars: - version_url: "{{ 'https://' + ossmconsole_vars.kiali.serviceName + '.' + ossmconsole_vars.kiali.serviceNamespace + '.svc.cluster.local:' + ossmconsole_vars.kiali.servicePort + '/api' }}" - uri: - url: "{{ version_url }}" - status_code: 200 - validate_certs: false - return_content: true - register: kiali_info_results - until: - - kiali_info_results is defined - - kiali_info_results.status is defined - - kiali_info_results.status == 200 - retries: 60 - delay: 5 - ignore_errors: yes - -- name: Determine Kiali version - vars: - q: status."Kiali version" - set_fact: - kiali_version: "{{ kiali_info_results.json | json_query(q) }}" - when: - - kiali_info_results is defined - - kiali_info_results.json is defined - ignore_errors: yes - -- name: "Determine environment to store in status" - set_fact: - status_environment: "{{ status_environment | default({}) | combine({item.0: item.1}) }}" - loop: "{{ data[0] | zip(data[1]) | list }}" - vars: - data: - - ['kialiVersion', 'kubernetesVersion', 'openshiftVersion', 'operatorVersion'] - - ["{{kiali_version|default('unknown')}}", "{{k8s_version|default('')}}", "{{openshift_version|default('')}}", "{{operator_version}}"] - when: - - item.1 != "" - - item.1 != "false" - - item.1 != False - -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Setting up configuration" - status_vars: - environment: "{{ status_environment | default({}) }}" - deployment: - namespace: "{{ ossmconsole_vars.deployment.namespace }}" - kiali: - serviceName: "{{ ossmconsole_vars.kiali.serviceName }}" - serviceNamespace: "{{ ossmconsole_vars.kiali.serviceNamespace }}" - servicePort: "{{ ossmconsole_vars.kiali.servicePort }}" - -# The OSSMC plugin must talk to a Kiali of the same version. -# We asked the Kiali Server what version it is, so we already have that. -# To know the OSSMC version, we assume even if the user overrides the imageName/imageVersion, that that image must -# support the same major.minor version of the playbook (i.e. spec.version). We look up the version of the playbook -# in original_supported_ossmconsole_images (which contains the actual imageVersion strings, even if they were overridden -# by the RELATED_IMAGES environment variables - we assume all RELATED_IMAGES are fixed hashs of the original imageVersion -# numbers) and obtain the imageVersion that that playbook supports and assume that is the version -# of OSSMC that is being installed - so that is the version we compare with the Kiali version. Note that if imageVersion -# is the string "operator_version", the supported OSSMC version is the same version as the operator itself. -# Note that we ignore any "v" prefix that may be in front of the version numbers; always just compare major.minor (v or no-v). -# If, for some reason, the user runs into an edge case where OSSMC needs to be installed even if the versions don't match, -# set the env var OSSMC_SKIP_VERSION_CHECK=true in the operator. -- name: Ensure Kiali major.minor version is supported by the version of OSSMC being installed - vars: - kiali_version_to_match: "{{ kiali_version | default('') | regex_replace('^[v]?([0-9]+.[0-9]+).*', '\\1') }}" - ossmc_version_expected: "{{ original_supported_ossmconsole_images[ossmconsole_vars.version].imageVersion | default ('') }}" - ossmc_version_to_match: "{{ ((operator_version | default('')) if ossmc_version_expected == 'operator_version' else ossmc_version_expected) | regex_replace('^[v]?([0-9]+.[0-9]+).*', '\\1') }}" - fail: - msg: "The version of the Kiali Server [{{ kiali_version_to_match }}] does not match the OSSMC version [{{ ossmc_version_to_match }}]. OSSMC will not be installed. To skip this check and allow for the install to continue, set the operator environment variable OSSMC_SKIP_VERSION_CHECK to 'true'." - when: - - kiali_version_to_match != ossmc_version_to_match - - lookup('env', 'OSSMC_SKIP_VERSION_CHECK') | default('false', True) != "true" - -- name: Only allow ad-hoc OSSM Console image when appropriate - fail: - msg: "The operator is forbidden from accepting an OSSMConsole CR that defines an ad hoc OSSM Console image [{{ ossmconsole_vars.deployment.imageName }}{{ '@' + ossmconsole_vars.deployment.imageDigest if ossmconsole_vars.deployment.imageDigest != '' else '' }}:{{ ossmconsole_vars.deployment.imageVersion }}]. Remove spec.deployment.imageName, spec.deployment.imageVersion, and spec.deployment.imageDigest from the OSSMConsole CR." - when: - - ossmconsole_vars.deployment.imageName != "" or ossmconsole_vars.deployment.imageVersion != "" or ossmconsole_vars.deployment.imageDigest != "" - - lookup('env', 'ALLOW_AD_HOC_OSSMCONSOLE_IMAGE') | default('false', True) != "true" - -- name: Default the image name to a known supported image. - set_fact: - ossmconsole_vars: "{{ ossmconsole_vars | combine({'deployment': {'imageName': supported_ossmconsole_images[ossmconsole_vars.version].imageName}}, recursive=True) }}" - when: - - ossmconsole_vars.deployment.imageName == "" -- name: Default the image version to a known supported image. - set_fact: - ossmconsole_vars: "{{ ossmconsole_vars | combine({'deployment': {'imageVersion': ('latest' if operator_version == 'master' else operator_version) if supported_ossmconsole_images[ossmconsole_vars.version].imageVersion == 'operator_version' else supported_ossmconsole_images[ossmconsole_vars.version].imageVersion}}, recursive=True) }}" - when: - - ossmconsole_vars.deployment.imageVersion == "" - -- name: If image version is latest then we will want to always pull - set_fact: - ossmconsole_vars: "{{ ossmconsole_vars | combine({'deployment': {'imagePullPolicy': 'Always'}}, recursive=True) }}" - when: - - ossmconsole_vars.deployment.imageVersion == "latest" - -- name: Confirm the cluster can access github.com when it needs to determine the last release of Kiali - uri: - url: https://api.github.com/repos/kiali/openshift-servicemesh-plugin/releases - when: - - ossmconsole_vars.deployment.imageVersion == "lastrelease" -- name: Determine image version when last release is to be installed - shell: echo -n $(curl -s https://api.github.com/repos/kiali/openshift-servicemesh-plugin/releases 2> /dev/null | grep "tag_name" | sed -e 's/.*://' -e 's/ *"//' -e 's/",//' | grep -v "snapshot" | sort -t "." -k 1.2g,1 -k 2g,2 -k 3g | tail -n 1) - register: github_lastrelease - when: - - ossmconsole_vars.deployment.imageVersion == "lastrelease" -- set_fact: - ossmconsole_vars: "{{ ossmconsole_vars | combine({'deployment': {'imageVersion': github_lastrelease.stdout}}, recursive=True) }}" - when: - - ossmconsole_vars.deployment.imageVersion == "lastrelease" - -- name: Determine image version when it explicitly was configured as the operator_version - set_fact: - ossmconsole_vars: "{{ ossmconsole_vars | combine({'deployment': {'imageVersion': 'latest' if operator_version == 'master' else operator_version}}, recursive=True) }}" - when: - - ossmconsole_vars.deployment.imageVersion == "operator_version" - -- fail: - msg: "Could not determine what the image version should be. Set deployment.imageVersion to a valid value" - when: - - ossmconsole_vars.deployment.imageVersion == "" or ossmconsole_vars.deployment.imageVersion == "unknown" - -# Indicate which image we are going to use. -- debug: - msg: "IMAGE_NAME={{ ossmconsole_vars.deployment.imageName }}; IMAGE VERSION={{ ossmconsole_vars.deployment.imageVersion }}" - -- name: Determine what metadata labels to apply to all created resources - vars: - version_label: "{{ (ossmconsole_vars.deployment.imageVersion) if (ossmconsole_vars.deployment.imageVersion | length <= 63) else (ossmconsole_vars.deployment.imageVersion[:60] + 'XXX') }}" - set_fact: - ossmconsole_resource_metadata_labels: - app: ossmconsole - version: "{{ version_label }}" - app.kubernetes.io/name: ossmconsole - app.kubernetes.io/version: "{{ version_label }}" - app.kubernetes.io/instance: ossmconsole - app.kubernetes.io/part-of: ossmconsole - -- name: Delete OSSM Console deployment if image is changing - this uninstalls any old version of OSSM Console that might be running - k8s: - state: absent - api_version: apps/v1 - kind: Deployment - namespace: "{{ ossmconsole_vars.deployment.namespace }}" - name: ossmconsole - when: - - current_image_name is defined and current_image_version is defined - - (current_image_name != ossmconsole_vars.deployment.imageName) or (current_image_version != ossmconsole_vars.deployment.imageVersion) - -# Get the deployment's custom annotation we set that tells us when we last updated the Deployment. -# We need this to ensure the Deployment we update retains this same timestamp unless changes are made -# that requires a pod restart - in which case we update this timestamp. -- name: Find current deployment, if it exists - set_fact: - current_deployment: "{{ lookup(k8s_plugin, resource_name='ossmconsole', namespace=ossmconsole_vars.deployment.namespace, api_version='apps/v1', kind='Deployment') }}" - -- name: Get current deployment last-updated annotation timestamp from existing deployment - set_fact: - current_deployment_last_updated: "{{ current_deployment.spec.template.metadata.annotations['ossmconsole.kiali.io/last-updated'] if current_deployment.spec.template.metadata.annotations['ossmconsole.kiali.io/last-updated'] is defined else lookup('pipe','date') }}" - deployment_is_new: false - when: - - current_deployment is defined - - current_deployment.spec is defined - - current_deployment.spec.template is defined - - current_deployment.spec.template.metadata is defined - - current_deployment.spec.template.metadata.annotations is defined - -- name: Set current deployment last-updated annotation timestamp for new deployments - set_fact: - current_deployment_last_updated: "{{ lookup('pipe','date') }}" - deployment_is_new: true - when: - - current_deployment_last_updated is not defined - -# Now deploy all resources for the specific cluster environment - -- name: Execute for OpenShift environment - include_tasks: openshift/os-main.yml - vars: - deployment_last_updated: "{{ current_deployment_last_updated }}" - -# If something changed that can only be picked up when the OSSM Console pod starts up, then restart the pod using a rolling restart -# We do this by checking the processed_resources_dict created by process-resources.yml task. If there is a map key -# with the kind (ConfigMap) with the name of our config map appended to it ("-plugin-conf") see if that config map changed. -# If it did, we need to restart the pod so it can re-read the new config. -- name: Force the OSSM Console pod to restart if necessary - vars: - keyname: "{{ 'ConfigMap-plugin-conf' }}" - updated_deployment: "{{ lookup(k8s_plugin, resource_name='ossmconsole', namespace=ossmconsole_vars.deployment.namespace, api_version='apps/v1', kind='Deployment') | combine({'spec': {'template': {'metadata': {'annotations': {'ossmconsole.kiali.io/last-updated': lookup('pipe','date') }}}}}, recursive=True) }}" - k8s: - state: "present" - definition: "{{ updated_deployment }}" - when: - - deployment_is_new == False - - processed_resources_dict is defined - - processed_resources_dict[keyname] is defined - - processed_resources_dict[keyname].changed == True - - processed_resources_dict[keyname].method == "patch" - -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Enabling plugin" - status_vars: {} - -- name: Enable plugin by ensuring the OSSM Console is in the Console list of plugins - vars: - existing_plugins: "{{ lookup(k8s_plugin, resource_name='cluster', api_version='operator.openshift.io/v1', kind='Console').spec.plugins | default([]) }}" - k8s: - state: patched - api_version: operator.openshift.io/v1 - kind: Console - name: cluster - definition: - spec: - plugins: "{{ (existing_plugins | difference(['ossmconsole'])) + ['ossmconsole'] }}" - -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Finished" - status_vars: {} diff --git a/roles/v2.1/ossmconsole-deploy/tasks/openshift/os-main.yml b/roles/v2.1/ossmconsole-deploy/tasks/openshift/os-main.yml deleted file mode 100644 index 98aebb694..000000000 --- a/roles/v2.1/ossmconsole-deploy/tasks/openshift/os-main.yml +++ /dev/null @@ -1,13 +0,0 @@ -- include_tasks: update-status-progress.yml - vars: - status_progress_message: "Creating core resources" - -- name: Create OSSM Console objects on OpenShift - include_tasks: process-resource.yml - vars: - process_resource_templates: - - "templates/openshift/configmap-nginx.yaml" - - "templates/openshift/configmap-plugin.yaml" - - "templates/openshift/deployment.yaml" - - "templates/openshift/service.yaml" - - "templates/openshift/consoleplugin.yaml" \ No newline at end of file diff --git a/roles/v2.1/ossmconsole-deploy/tasks/process-resource.yml b/roles/v2.1/ossmconsole-deploy/tasks/process-resource.yml deleted file mode 100644 index 5696ca251..000000000 --- a/roles/v2.1/ossmconsole-deploy/tasks/process-resource.yml +++ /dev/null @@ -1,31 +0,0 @@ -# process all template names found in process_resource_templates - any empty strings in the list are ignored. -# This will keep a running tally of all processed resources in "processed_resources_dict". -- name: "Create OSSMConsole resources from templates" - k8s: - state: "present" - continue_on_error: false - template: "{{ process_resource_templates | select() | list }}" - register: process_resource_templates_result - retries: 6 - delay: 10 - -# Store the results of the processed resource so they can be examined later (e.g. to know if something changed or stayed the same) -- vars: - kinds: "{{ process_resource_templates_result.result.results | map(attribute='result.kind') | list }}" - names: "{{ process_resource_templates_result.result.results | map(attribute='result.metadata.name') | list }}" - changed: "{{ process_resource_templates_result.result.results | map(attribute='changed') | list }}" - method: "{{ process_resource_templates_result.result.results | map(attribute='method') | list }}" - thedict: "{{ processed_resources_dict | default({}) }}" - set_fact: - processed_resources_dict: | - {% for kind in kinds %} - {% set _ = thedict.update({ (kind + '-' + names[loop.index0]): {'name': names[loop.index0], 'changed': changed[loop.index0], 'method': method[loop.index0]}}) %} - {% endfor %} - {{ thedict }} - when: - - process_resource_templates_result is defined - - process_resource_templates_result | length > 0 - -- name: "Resource creation results" - debug: - msg: "{{ processed_resources_dict }}" diff --git a/roles/v2.1/ossmconsole-deploy/tasks/update-status-progress.yml b/roles/v2.1/ossmconsole-deploy/tasks/update-status-progress.yml deleted file mode 100644 index 58570bceb..000000000 --- a/roles/v2.1/ossmconsole-deploy/tasks/update-status-progress.yml +++ /dev/null @@ -1,16 +0,0 @@ -- name: Prepare status progress facts - ignore_errors: yes - set_fact: - status_progress_step: "{{ 1 if status_progress_step is not defined else (status_progress_step|int + 1) }}" - status_progress_start: "{{ ('%Y-%m-%d %H:%M:%S' | strftime) if status_progress_start is not defined else (status_progress_start) }}" - -- name: Update CR status progress field with any additional status fields - ignore_errors: yes - vars: - duration: "{{ ('%Y-%m-%d %H:%M:%S' | strftime | to_datetime) - (status_progress_start | to_datetime) }}" - operator_sdk.util.k8s_status: - api_version: "{{ current_cr.apiVersion }}" - kind: "{{ current_cr.kind }}" - name: "{{ current_cr.metadata.name }}" - namespace: "{{ current_cr.metadata.namespace }}" - status: "{{ status_vars | default({}) | combine({'progress':{'message': status_progress_step + '. ' + status_progress_message, 'duration': duration }}, recursive=True) }}" diff --git a/roles/v2.1/ossmconsole-deploy/tasks/update-status.yml b/roles/v2.1/ossmconsole-deploy/tasks/update-status.yml deleted file mode 100644 index fa7793085..000000000 --- a/roles/v2.1/ossmconsole-deploy/tasks/update-status.yml +++ /dev/null @@ -1,8 +0,0 @@ -- name: Update CR status field - ignore_errors: yes - operator_sdk.util.k8s_status: - api_version: "{{ current_cr.apiVersion }}" - kind: "{{ current_cr.kind }}" - name: "{{ current_cr.metadata.name }}" - namespace: "{{ current_cr.metadata.namespace }}" - status: "{{ status_vars }}" diff --git a/roles/v2.1/ossmconsole-deploy/templates/openshift/configmap-nginx.yaml b/roles/v2.1/ossmconsole-deploy/templates/openshift/configmap-nginx.yaml deleted file mode 100644 index 41bbcd63a..000000000 --- a/roles/v2.1/ossmconsole-deploy/templates/openshift/configmap-nginx.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: nginx-conf - namespace: "{{ ossmconsole_vars.deployment.namespace }}" - labels: {{ ossmconsole_resource_metadata_labels }} -data: - nginx.conf: | - error_log /dev/stdout; - events {} - http { - access_log /dev/stdout; - include /etc/nginx/mime.types; - default_type application/octet-stream; - keepalive_timeout 65; - server { - listen 9443 ssl; - listen [::]:9443 ssl; - ssl_certificate /var/serving-cert/tls.crt; - ssl_certificate_key /var/serving-cert/tls.key; - - add_header oauth_token "$http_Authorization"; - - location / { - root /usr/share/nginx/html; - } - } - } diff --git a/roles/v2.1/ossmconsole-deploy/templates/openshift/configmap-plugin.yaml b/roles/v2.1/ossmconsole-deploy/templates/openshift/configmap-plugin.yaml deleted file mode 100644 index 1798e1bb4..000000000 --- a/roles/v2.1/ossmconsole-deploy/templates/openshift/configmap-plugin.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: plugin-conf - namespace: "{{ ossmconsole_vars.deployment.namespace }}" - labels: {{ ossmconsole_resource_metadata_labels }} -data: - plugin-config.json: | - { - "graph": { - "impl": "{{ ossmconsole_vars.kiali.graph.impl }}" - } - } diff --git a/roles/v2.1/ossmconsole-deploy/templates/openshift/consoleplugin.yaml b/roles/v2.1/ossmconsole-deploy/templates/openshift/consoleplugin.yaml deleted file mode 100644 index 7fd7b6246..000000000 --- a/roles/v2.1/ossmconsole-deploy/templates/openshift/consoleplugin.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: console.openshift.io/v1 -kind: ConsolePlugin -metadata: - name: ossmconsole - labels: {{ ossmconsole_resource_metadata_labels }} -spec: - displayName: "OpenShift Service Mesh Console" - backend: - service: - name: ossmconsole - namespace: "{{ ossmconsole_vars.deployment.namespace }}" - port: 9443 - basePath: "/" - type: Service - proxy: - - alias: kiali - authorization: UserToken - endpoint: - service: - name: {{ ossmconsole_vars.kiali.serviceName }} - namespace: "{{ ossmconsole_vars.kiali.serviceNamespace }}" - port: {{ ossmconsole_vars.kiali.servicePort }} - type: Service diff --git a/roles/v2.1/ossmconsole-deploy/templates/openshift/deployment.yaml b/roles/v2.1/ossmconsole-deploy/templates/openshift/deployment.yaml deleted file mode 100644 index 59c709f3b..000000000 --- a/roles/v2.1/ossmconsole-deploy/templates/openshift/deployment.yaml +++ /dev/null @@ -1,71 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ossmconsole - namespace: "{{ ossmconsole_vars.deployment.namespace }}" - labels: {{ ossmconsole_resource_metadata_labels }} -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: ossmconsole - app.kubernetes.io/instance: ossmconsole - template: - metadata: - name: ossmconsole - labels: {{ ossmconsole_resource_metadata_labels }} - annotations: - ossmconsole.kiali.io/last-updated: "{{ deployment_last_updated }}" - spec: -{% if ossmconsole_vars.deployment.imagePullSecrets | default([]) | length > 0 %} - imagePullSecrets: -{% for n in ossmconsole_vars.deployment.imagePullSecrets %} - - name: {{ n }} -{% endfor %} -{% endif %} - containers: - - name: ossmconsole - image: {{ ossmconsole_vars.deployment.imageName }}{{ '@' + ossmconsole_vars.deployment.imageDigest if ossmconsole_vars.deployment.imageDigest != '' else '' }}:{{ ossmconsole_vars.deployment.imageVersion }} - imagePullPolicy: {{ ossmconsole_vars.deployment.imagePullPolicy }} - ports: - - containerPort: 9443 - protocol: TCP - securityContext: - allowPrivilegeEscalation: false - privileged: false - runAsNonRoot: true - capabilities: - drop: - - ALL - volumeMounts: - - name: ossmconsole-cert-secret - readOnly: true - mountPath: /var/serving-cert - - name: nginx-conf - readOnly: true - mountPath: /etc/nginx/nginx.conf - subPath: nginx.conf - - name: plugin-conf - readOnly: true - mountPath: /usr/share/nginx/html/plugin-config.json - subPath: plugin-config.json - volumes: - - name: ossmconsole-cert-secret - secret: - secretName: ossmconsole-cert-secret - defaultMode: 420 - - name: nginx-conf - configMap: - name: nginx-conf - defaultMode: 420 - - name: plugin-conf - configMap: - name: plugin-conf - defaultMode: 420 - restartPolicy: Always - dnsPolicy: ClusterFirst - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: "25%" - maxSurge: "25%" diff --git a/roles/v2.1/ossmconsole-deploy/templates/openshift/service.yaml b/roles/v2.1/ossmconsole-deploy/templates/openshift/service.yaml deleted file mode 100644 index 7db4897e1..000000000 --- a/roles/v2.1/ossmconsole-deploy/templates/openshift/service.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: ossmconsole - namespace: "{{ ossmconsole_vars.deployment.namespace }}" - labels: {{ ossmconsole_resource_metadata_labels }} - annotations: - service.beta.openshift.io/serving-cert-secret-name: ossmconsole-cert-secret -spec: - ports: - - name: 9443-tcp - protocol: TCP - port: 9443 - targetPort: 9443 - selector: - app.kubernetes.io/name: ossmconsole - app.kubernetes.io/instance: ossmconsole - type: ClusterIP - sessionAffinity: None diff --git a/roles/v2.1/ossmconsole-deploy/vars/main.yml b/roles/v2.1/ossmconsole-deploy/vars/main.yml deleted file mode 100644 index c67b88055..000000000 --- a/roles/v2.1/ossmconsole-deploy/vars/main.yml +++ /dev/null @@ -1,30 +0,0 @@ -# These are the actual variables used by the role. You will notice it is -# one big dictionary (key="ossmconsole_vars") whose child dictionaries mimic those -# as defined in defaults/main.yml. -# The child dictionaries below will have values that are a combination of the default values -# (as found in defaults/main.yaml) and user-supplied values. -# Without this magic, a user supplying only one key/value pair in a child dictionary will -# clear out (make undefined) all the rest of the key/value pairs in that child dictionary. -# This is not what we want. We want the rest of the dictionary to keep the defaults, -# thus allowing the user to override only a subset of key/values in a dictionary. -# -# I found this trick at https://groups.google.com/forum/#!topic/Ansible-project/pGbRYZyqxZ4 -# I tweeked that solution a little bit because I did not want to require the user to supply -# everything under a main "ossmconsole_vars" dictionary. - -ossmconsole_vars: - version: "{{ version | default(ossmconsole_defaults.version) }}" - - deployment: | - {%- if deployment is defined and deployment is iterable -%} - {{ ossmconsole_defaults.deployment | combine((deployment | stripnone), recursive=True) }} - {%- else -%} - {{ ossmconsole_defaults.deployment }} - {%- endif -%} - - kiali: | - {%- if kiali is defined and kiali is iterable -%} - {{ ossmconsole_defaults.kiali | combine((kiali | stripnone), recursive=True) }} - {%- else -%} - {{ ossmconsole_defaults.kiali }} - {%- endif -%} diff --git a/roles/v2.1/ossmconsole-remove/defaults/main.yml b/roles/v2.1/ossmconsole-remove/defaults/main.yml deleted file mode 100644 index 71bbb33aa..000000000 --- a/roles/v2.1/ossmconsole-remove/defaults/main.yml +++ /dev/null @@ -1,3 +0,0 @@ -ossmconsole_defaults: - deployment: - namespace: "" diff --git a/roles/v2.1/ossmconsole-remove/filter_plugins/stripnone.py b/roles/v2.1/ossmconsole-remove/filter_plugins/stripnone.py deleted file mode 100644 index 4dbd53033..000000000 --- a/roles/v2.1/ossmconsole-remove/filter_plugins/stripnone.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -ANSIBLE_METADATA = { - 'metadata_version': '1.1', - 'status': ['preview'], - 'supported_by': 'community' -} - -# Process recursively the given value if it is a dict and remove all keys that have a None value -def strip_none(value): - if isinstance(value, dict): - dicts = {} - for k,v in value.items(): - if isinstance(v, dict): - dicts[k] = strip_none(v) - elif v is not None: - dicts[k] = v - return dicts - else: - return value - -# ---- Ansible filters ---- -class FilterModule(object): - def filters(self): - return { - 'stripnone': strip_none - } diff --git a/roles/v2.1/ossmconsole-remove/meta/main.yml b/roles/v2.1/ossmconsole-remove/meta/main.yml deleted file mode 100644 index e9334e3c7..000000000 --- a/roles/v2.1/ossmconsole-remove/meta/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -collections: -- kubernetes.core diff --git a/roles/v2.1/ossmconsole-remove/tasks/main.yml b/roles/v2.1/ossmconsole-remove/tasks/main.yml deleted file mode 100644 index 3eea9043f..000000000 --- a/roles/v2.1/ossmconsole-remove/tasks/main.yml +++ /dev/null @@ -1,72 +0,0 @@ -# These tasks remove all resources such that no remnants of OSSM Console will remain. -# -# Note that we ignore_errors everywhere - we do not want these tasks to ever abort with a failure. -# This is because these are run within a finalizer and if a failure aborts any task here -# the user will never be able to delete the OSSMConsole CR - in fact, the delete will hang indefinitely -# and the user will need to do an ugly hack to fix it. - -- ignore_errors: yes - set_fact: - k8s_plugin: kubernetes.core.k8s - -- name: Get the original CR that was deleted - ignore_errors: yes - set_fact: - current_cr: "{{ _kiali_io_ossmconsole }}" - -- name: Find oldest CR - vars: - crs: "{{ query(k8s_plugin, kind=current_cr.kind, api_version=current_cr.apiVersion) | sort(attribute='metadata.creationTimestamp') }}" - set_fact: - oldest_ossmconsole_cr: "{{ crs[0] }}" - when: - - crs | length > 0 - -- block: - - debug: - msg: "Ignoring this CR [{{ current_cr.metadata.namespace }}/{{ current_cr.metadata.name }}]. The CR in control of OSSMC is [{{ oldest_ossmconsole_cr.metadata.namespace }}/{{ oldest_ossmconsole_cr.metadata.name }}]." - - meta: end_play - when: - - oldest_ossmconsole_cr is defined - - oldest_ossmconsole_cr.metadata.name != current_cr.metadata.name or oldest_ossmconsole_cr.metadata.namespace != current_cr.metadata.namespace - -- name: Print some debug information - ignore_errors: yes - vars: - msg: | - OSSM Console Variables: - -------------------------------- - {{ ossmconsole_vars_remove | to_nice_yaml }} - debug: - msg: "{{ msg.split('\n') }}" - -- name: Set default deployment namespace to the same namespace where the CR lives - ignore_errors: yes - set_fact: - ossmconsole_vars_remove: "{{ ossmconsole_vars_remove | combine({'deployment': {'namespace': current_cr.metadata.namespace}}, recursive=True) }}" - when: - - ossmconsole_vars_remove.deployment.namespace is not defined or ossmconsole_vars_remove.deployment.namespace == "" - -- name: Disable plugin by ensuring the OSSM Console is removed from the Console list of plugins - ignore_errors: yes - vars: - existing_plugins: "{{ lookup(k8s_plugin, resource_name='cluster', api_version='operator.openshift.io/v1', kind='Console').spec.plugins | default([]) }}" - k8s: - state: patched - api_version: operator.openshift.io/v1 - kind: Console - name: cluster - definition: - spec: - plugins: "{{ existing_plugins | difference(['ossmconsole']) }}" - -- name: Delete OSSM Console resources - ignore_errors: yes - k8s: - state: absent - namespace: "{{ ossmconsole_vars_remove.deployment.namespace }}" - continue_on_error: false - template: - - resources-to-remove.yml - retries: 6 - delay: 10 diff --git a/roles/v2.1/ossmconsole-remove/tasks/resources-to-remove.yml b/roles/v2.1/ossmconsole-remove/tasks/resources-to-remove.yml deleted file mode 100644 index 5990ebbe5..000000000 --- a/roles/v2.1/ossmconsole-remove/tasks/resources-to-remove.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: "{{ ossmconsole_vars_remove.deployment.namespace }}" - name: nginx-config ---- -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: "{{ ossmconsole_vars_remove.deployment.namespace }}" - name: plugin-config ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - namespace: "{{ ossmconsole_vars_remove.deployment.namespace }}" - name: ossmconsole ---- -apiVersion: v1 -kind: Service -metadata: - namespace: "{{ ossmconsole_vars_remove.deployment.namespace }}" - name: ossmconsole ---- -apiVersion: console.openshift.io/v1alpha1 -kind: ConsolePlugin -metadata: - name: ossmconsole diff --git a/roles/v2.1/ossmconsole-remove/vars/main.yml b/roles/v2.1/ossmconsole-remove/vars/main.yml deleted file mode 100644 index f2bbfbc29..000000000 --- a/roles/v2.1/ossmconsole-remove/vars/main.yml +++ /dev/null @@ -1,7 +0,0 @@ -ossmconsole_vars_remove: - deployment: | - {%- if deployment is defined and deployment is iterable -%} - {{ ossmconsole_defaults.deployment | combine((deployment | stripnone), recursive=True) }} - {%- else -%} - {{ ossmconsole_defaults.deployment }} - {%- endif -%}