diff --git a/byoh-reference/admin-tools/kustomization.yaml b/byoh-reference/admin-tools/kustomization.yaml new file mode 100644 index 0000000..7c415e6 --- /dev/null +++ b/byoh-reference/admin-tools/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - ../base + +transformers: + - site-values.yaml diff --git a/byoh-reference/admin-tools/site-values.yaml b/byoh-reference/admin-tools/site-values.yaml new file mode 100644 index 0000000..45c150a --- /dev/null +++ b/byoh-reference/admin-tools/site-values.yaml @@ -0,0 +1,14 @@ +apiVersion: openinfradev.github.com/v1 +kind: HelmValuesTransformer +metadata: + name: site + +global: + clusterName: cluster.local + +charts: +- name: keycloak-operator + override: +- name: keycloak-resources + override: + keycloak.externalDatabase.address: postgresql.decapod-db.svc.$(clusterName) diff --git a/byoh-reference/decapod-controller/kustomization.yaml b/byoh-reference/decapod-controller/kustomization.yaml new file mode 100644 index 0000000..7c415e6 --- /dev/null +++ b/byoh-reference/decapod-controller/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - ../base + +transformers: + - site-values.yaml diff --git a/byoh-reference/decapod-controller/site-values.yaml b/byoh-reference/decapod-controller/site-values.yaml new file mode 100644 index 0000000..6fb83a4 --- /dev/null +++ b/byoh-reference/decapod-controller/site-values.yaml @@ -0,0 +1,6 @@ +apiVersion: openinfradev.github.com/v1 +kind: HelmValuesTransformer +metadata: + name: site + +charts: [] diff --git a/byoh-reference/lma/kustomization.yaml b/byoh-reference/lma/kustomization.yaml new file mode 100644 index 0000000..7c415e6 --- /dev/null +++ b/byoh-reference/lma/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - ../base + +transformers: + - site-values.yaml diff --git a/byoh-reference/lma/site-values.yaml b/byoh-reference/lma/site-values.yaml new file mode 100644 index 0000000..369a696 --- /dev/null +++ b/byoh-reference/lma/site-values.yaml @@ -0,0 +1,235 @@ +apiVersion: openinfradev.github.com/v1 +kind: HelmValuesTransformer +metadata: + name: site + +global: + nodeSelector: + taco-lma: enabled + clusterName: cluster.local + storageClassName: taco-storage + repository: https://openinfradev.github.io/helm-repo/ + serviceScrapeInterval: 30s + defaultPassword: password + defaultUser: taco + thanosObjstoreSecret: taco-objstore-secret + thanosPrimaryCluster: false + # servicemesh dashboard and grafana + realms: 04a70f29 + serviceDomain: taco-cat.xyz + keycloakDomain: keycloak-eom.taco-cat.xyz + grafanaClientSecret: JLtsanYtrCg21RGxrcVmQP0GeuDFUhpA + +charts: +- name: prometheus-operator + override: + prometheusOperator.nodeSelector: $(nodeSelector) + +- name: eck-operator + +- name: prometheus + override: + prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.storageClassName: $(storageClassName) + prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage: 20Gi + prometheus.prometheusSpec.retention: 2d + prometheus.prometheusSpec.externalLabels.taco_cluster: $(clusterName) + prometheus.prometheusSpec.nodeSelector: $(nodeSelector) + + alertmanager.service.type: NodePort + alertmanager.service.nodePort: 30111 + alertmanager.alertmanagerSpec.alertmanagerConfigSelector.matchLabels.alertmanagerConfig: example + alertmanager.alertmanagerSpec.nodeSelector: $(nodeSelector) + alertmanager.alertmanagerSpec.retention: 2h + alertmanager.config.global.slack_api_url: https://hooks.slack.com/services/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 + +- name: prometheus-node-exporter + override: + hostNetwork: false + +- name: kube-state-metrics + override: + nodeSelector: $(nodeSelector) + +- name: prometheus-pushgateway + override: + nodeSelector: $(nodeSelector) + +- name: prometheus-process-exporter + override: + conf.processes: dockerd,kubelet,kube-proxy,ntpd,node + pod.hostNetwork: false + +- name: eck-resource + override: + kibana.nodeSelector: $(nodeSelector) + kibana.server.basePath: /kibana + kibana.readinessPath: /kibana/login + + elasticsearch.nodeSets.master.nodeSelector: $(nodeSelector) + elasticsearch.nodeSets.master.count: 1 + elasticsearch.nodeSets.master.javaOpts: "-Xms1g -Xmx1g" + elasticsearch.nodeSets.master.limitCpu: 2 + elasticsearch.nodeSets.master.limitMem: 4Gi + elasticsearch.nodeSets.master.pvc.storageClassName: $(storageClassName) + elasticsearch.nodeSets.master.pvc.size: 1Gi + + elasticsearch.nodeSets.hotdata.nodeSelector: $(nodeSelector) + elasticsearch.nodeSets.hotdata.count: 1 + elasticsearch.nodeSets.hotdata.javaOpts: "-Xms1g -Xmx1g" + elasticsearch.nodeSets.hotdata.limitCpu: 2 + elasticsearch.nodeSets.hotdata.limitMem: 2Gi + elasticsearch.nodeSets.hotdata.pvc.storageClassName: $(storageClassName) + elasticsearch.nodeSets.hotdata.pvc.size: 10Gi + + elasticsearch.nodeSets.client.enabled: false + + +- name: grafana + override: + adminPassword: password + persistence.storageClassName: $(storageClassName) + sidecar.dashboards.searchNamespace: ALL + # grafana oidc + service.type: ClusterIP + grafana.ini.server: + domain: dashboard-$(realms).$(serviceDomain) + root_url: https://dashboard-$(realms).$(serviceDomain)/grafana + serve_from_sub_path: true + grafana.ini.auth.generic_oauth: + enabled: true + name: keycloak + allow_sign_up: true + client_id: grafana + client_secret: $(grafanaClientSecret) + scopes: openid profile email + auth_url: https://$(keycloakDomain)/auth/realms/$(realms)/protocol/openid-connect/auth + token_url: https://$(keycloakDomain)/auth/realms/$(realms)/protocol/openid-connect/token + api_url: https://$(keycloakDomain)/auth/realms/$(realms)/protocol/openid-connect/userinfo + grafana.ini.auth: + disable_login_form: false + oauth_auto_login: true + disable_signout_menu: true + grafana.ini.security: + allow_embedding: true + cookie_secure: true + cookie_samesite: none + grafana.ini.user: + auto_assign_org: true + auto_assign_org_role: Admin + +- name: fluentbit-operator + override: + global.base_cluster_url: $(clusterName) + fluentbitOperator.nodeSelector: $(nodeSelector) + logExporter.nodeSelector: $(nodeSelector) + +- name: fluentbit + override: + global.base_cluster_url: $(clusterName) + global.nodeSelector: $(nodeSelector) + fluentbit.clusterName: $(clusterName) + fluentbit.outputs.es.host: eck-elasticsearch-es-http.lma.svc.$(clusterName) + fluentbit.outputs.kafka: + enabled: false + fluentbit.nodeSelector: $(nodeSelector) + fluentbit.targetLogs: + - bufferChunkSize: 2M + bufferMaxSize: 5M + do_not_store_as_default: true + index: container + memBufLimit: 20MB + multi_index: + - index: platform + key: $kubernetes['namespace_name'] + value: kube-system|lma|fed|argo|openstack|istio-system|istio-services|trident|registry + name: dockerlog + parser: docker + path: /var/log/containers/*.log + tag: kube.* + type: fluent + - index: syslog + name: syslog + parser: syslog-rfc5424 + path: /var/log/syslog + tag: syslog.* + type: syslog + +- name: addons + override: + SPECIAL_VALUE: SPECIAL + serviceMonitor.trident: + enabled: false + interval: $(serviceScrapeInterval) + serviceMonitor.kubelet.interval: 30s + serviceMonitor.additionalScrapeConfigs: + metricbeat.enabled: false + kibanaInit.url: http://eck-kibana-dashboard-kb-http.lma.svc.$(clusterName):5601 + grafanaDashboard.istio.enabled: false + grafanaDashboard.jaeger.enabled: false + serviceMonitor.istio.enabled: false + serviceMonitor.jaeger.enabled: false + prometheusRules.istio.aggregation.enabled: false + prometheusRules.istio.optimization.enabled: false + +- name: prometheus-adapter + override: + nodeSelector: $(nodeSelector) + +- name: kubernetes-event-exporter + override: + conf.default.hosts: + - "https://eck-elasticsearch-es-http.lma.svc.$(clusterName):9200" + +- name: thanos + override: + global.storageClass: $(storageClassName) + clusterDomain: $(clusterName) + existingObjstoreSecret: $(thanosObjstoreSecret) + query.nodeSelector: $(nodeSelector) + queryFrontend.nodeSelector: $(nodeSelector) + queryFrontend.service.type: NodePort + queryFrontend.service.http.nodePort: 30007 + querier.stores: + - prometheus-operated.lma.svc.$(clusterName):10901 + bucketweb.enabled: $(thanosPrimaryCluster) + bucketweb.nodeSelector: $(nodeSelector) + compactor.enabled: $(thanosPrimaryCluster) + compactor.nodeSelector: $(nodeSelector) + storegateway.nodeSelector: $(nodeSelector) + compactor.persistence.size: 8Gi + # compactor.extraFlags: + # - --compact.enable-vertical-compaction + # - --deduplication.replica-label="replica" + storegateway.persistence.size: 8Gi + ruler.enabled: $(thanosPrimaryCluster) + ruler.nodeSelector: $(nodeSelector) + ruler.alertmanagers: + - http://fed-master-alertmanager.lma.svc.$(clusterName):9093 + ruler.persistence.size: 8Gi + minio.accessKey.password: $(defaultUser) + minio.secretKey.password: $(defaultPassword) + minio.defaultBuckets: thanos + minio.persistence.storageClass: $(storageClassName) + minio.persistence.accessMode: ReadWriteOnce + minio.persistence.size: 10Gi + +- name: thanos-config + override: + objectStorage: + bucketName: thanos + endpoint: thanos-minio.lma.svc.$(clusterName):9000 + access_key: $(defaultUser) + secret_key: $(defaultPassword) + secretName: $(thanosObjstoreSecret) + sidecarsService.name: thanos-sidecars + sidecarsService.endpoints: + - 192.168.97.102 # should not be in the loopback range (127.0.0.0/8) + +- name: prepare-etcd-secret + override: + nodeSelector: + "node-role.kubernetes.io/master": "" + tolerations: + - key: "node-role.kubernetes.io/master" + effect: "NoSchedule" + operator: "Exists" diff --git a/byoh-reference/openstack/image-values.yaml b/byoh-reference/openstack/image-values.yaml new file mode 100644 index 0000000..7943b85 --- /dev/null +++ b/byoh-reference/openstack/image-values.yaml @@ -0,0 +1,281 @@ +apiVersion: openinfradev.github.com/v1 +kind: HelmValuesTransformer +metadata: + name: image +global: + registry: deploy001:5000 + +charts: +- name: glance + override: + images.pull_policy: IfNotPresent + images.tags: + bootstrap: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + db_drop: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + db_init: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + dep_check: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + glance_api: $(registry)/stein/ubuntu-source-glance-api:taco-0.1.0 + glance_db_sync: $(registry)/stein/ubuntu-source-glance-api:taco-0.1.0 + glance_metadefs_load: $(registry)/stein/ubuntu-source-glance-api:taco-0.1.0 + glance_registry: $(registry)/stein/ubuntu-source-glance-registry:taco-0.1.0 + glance_storage_init: $(registry)/port/ceph-config-helper:v1.14.3 + image_repo_sync: $(registry)/docker:17.07.0 + ks_endpoints: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + ks_service: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + ks_user: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + rabbit_init: $(registry)/rabbitmq:3.7-management + test: $(registry)/stein/ubuntu-source-rally:taco-0.1.0 + +- name: heat + override: + images.pull_policy: IfNotPresent + images.tags: + bootstrap: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + db_drop: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + db_init: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + dep_check: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + heat_api: $(registry)/stein/ubuntu-source-heat-api:taco-0.1.0 + heat_cfn: $(registry)/stein/ubuntu-source-heat-api:taco-0.1.0 + heat_cloudwatch: $(registry)/stein/ubuntu-source-heat-api:taco-0.1.0 + heat_db_sync: $(registry)/stein/ubuntu-source-heat-api:taco-0.1.0 + heat_engine: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + heat_engine_cleaner: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + heat_purge_deleted: $(registry)/openstackhelm/heat:ocata-ubuntu_xenial + image_repo_sync: $(registry)/docker:17.07.0 + ks_endpoints: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + ks_service: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + ks_user: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + rabbit_init: $(registry)/rabbitmq:3.7-management + test: $(registry)/stein/ubuntu-source-rally:taco-0.1.0 + +- name: horizon + override: + images.pull_policy: IfNotPresent + images.tags: + db_drop: $(registry)/stein/ubuntu-source-horizon:taco-0.1.0 + db_init: $(registry)/stein/ubuntu-source-horizon:taco-0.1.0 + dep_check: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + horizon: $(registry)/stein/ubuntu-source-horizon:taco-0.1.0 + horizon_db_sync: $(registry)/stein/ubuntu-source-horizon:taco-0.1.0 + image_repo_sync: $(registry)/docker:17.07.0 + test: $(registry)/openstackhelm/osh-selenium:latest-ubuntu_bionic + +- name: ingress + override: + images.pull_policy: IfNotPresent + images.tags: + dep_check: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + entrypoint: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + error_pages: $(registry)/google_containers/defaultbackend:1.0 + image_repo_sync: $(registry)/docker:17.07.0 + ingress: $(registry)/kubernetes-ingress-controller/nginx-ingress-controller:0.23.0 + ingress_module_init: $(registry)/stein/ubuntu-source-neutron-server:taco-0.1.0 + ingress_routed_vip: $(registry)/stein/ubuntu-source-neutron-server:taco-0.1.0 + keepalived: $(registry)/osixia/keepalived:1.4.5 + +- name: keystone + override: + images.pull_policy: IfNotPresent + images.tags: + bootstrap: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + db_drop: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + db_init: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + dep_check: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + image_repo_sync: $(registry)/docker:17.07.0 + keystone_api: $(registry)/stein/ubuntu-source-keystone:taco-0.1.0 + keystone_credential_cleanup: $(registry)/openstackhelm/heat:ocata-ubuntu_xenial + keystone_credential_rotate: $(registry)/stein/ubuntu-source-keystone:taco-0.1.0 + keystone_credential_setup: $(registry)/stein/ubuntu-source-keystone:taco-0.1.0 + keystone_db_sync: $(registry)/stein/ubuntu-source-keystone:taco-0.1.0 + keystone_domain_manage: $(registry)/stein/ubuntu-source-keystone:taco-0.1.0 + keystone_fernet_rotate: $(registry)/stein/ubuntu-source-keystone:taco-0.1.0 + keystone_fernet_setup: $(registry)/stein/ubuntu-source-keystone:taco-0.1.0 + ks_user: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + rabbit_init: $(registry)/rabbitmq:3.7-management + test: $(registry)/stein/ubuntu-source-rally:taco-0.1.0 + +- name: libvirt + override: + images.pull_policy: IfNotPresent + images.tags: + dep_check: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + image_repo_sync: $(registry)/docker:17.07.0 + libvirt: $(registry)/stein/ubuntu-source-nova-libvirt:taco-0.1.0 + +- name: mariadb + override: + images.pull_policy: IfNotPresent + images.tags: + dep_check: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + error_pages: $(registry)/google_containers/defaultbackend:1.0 + image_repo_sync: $(registry)/docker:17.07.0 + ingress: $(registry)/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0 + mariadb: $(registry)/openstackhelm/mariadb:10.2.18 + mariadb_backup: $(registry)/openstackhelm/mariadb:latest-ubuntu_xenial + prometheus_create_mysql_user: $(registry)/mariadb:10.2.13 + prometheus_mysql_exporter: $(registry)/prom/mysqld-exporter:v0.10.0 + prometheus_mysql_exporter_helm_tests: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + scripted_test: $(registry)/openstackhelm/mariadb:latest-ubuntu_xenial + +- name: memcached + override: + images.pull_policy: IfNotPresent + images.tags: + dep_check: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + image_repo_sync: $(registry)/docker:17.07.0 + memcached: $(registry)/memcached:1.5.5 + prometheus_memcached_exporter: $(registry)/prom/memcached-exporter:v0.4.1 + +- name: neutron + override: + images.pull_policy: IfNotPresent + images.tags: + bootstrap: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + db_drop: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + db_init: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + dep_check: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + image_repo_sync: $(registry)/docker:17.07.0 + ks_endpoints: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + ks_service: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + ks_user: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + neutron_bagpipe_bgp: $(registry)/stein/ubuntu-source-neutron-server:taco-0.1.0 + neutron_db_sync: $(registry)/stein/ubuntu-source-neutron-server:taco-0.1.0 + neutron_dhcp: $(registry)/stein/ubuntu-source-neutron-dhcp-agent:taco-0.1.0 + neutron_ironic_agent: $(registry)/stein/ubuntu-source-neutron-server:taco-0.1.0 + neutron_l2gw: $(registry)/stein/ubuntu-source-neutron-server:taco-0.1.0 + neutron_l3: $(registry)/stein/ubuntu-source-neutron-l3-agent:taco-0.1.0 + neutron_linuxbridge_agent: $(registry)/stein/ubuntu-source-neutron-linuxbridge-agent:taco-0.1.0 + neutron_metadata: $(registry)/stein/ubuntu-source-neutron-metadata-agent:taco-0.1.0 + neutron_openvswitch_agent: $(registry)/stein/ubuntu-source-neutron-openvswitch-agent:taco-0.1.0 + neutron_server: $(registry)/stein/ubuntu-source-neutron-server:taco-0.1.0.onos + neutron_sriov_agent: $(registry)/stein/ubuntu-source-neutron-sriov-agent:taco-0.1.0 + neutron_sriov_agent_init: $(registry)/stein/ubuntu-source-neutron-sriov-agent:taco-0.1.0 + purge_test: $(registry)/openstackhelm/ospurge:latest + rabbit_init: $(registry)/rabbitmq:3.7-management + test: $(registry)/xrally/xrally-openstack:1.5.0 + +- name: nova + override: + images.pull_policy: IfNotPresent + images.tags: + bootstrap: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + db_drop: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + db_init: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + dep_check: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + image_repo_sync: $(registry)/docker:17.07.0 + ks_endpoints: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + ks_service: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + ks_user: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + nova_api: $(registry)/stein/ubuntu-source-nova-api:taco-0.1.0 + nova_cell_setup: $(registry)/stein/ubuntu-source-nova-api:taco-0.1.0 + nova_cell_setup_init: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + nova_compute: $(registry)/stein/ubuntu-source-nova-compute:taco-0.1.0 + nova_compute_ironic: $(registry)/stein/ubuntu-source-nova-compute-ironic:taco-0.1.0 + nova_compute_ssh: $(registry)/stein/ubuntu-source-nova-ssh:taco-0.1.0 + nova_conductor: $(registry)/stein/ubuntu-source-nova-conductor:taco-0.1.0 + nova_consoleauth: $(registry)/stein/ubuntu-source-nova-consoleauth:taco-0.1.0 + nova_db_sync: $(registry)/stein/ubuntu-source-nova-api:taco-0.1.0 + nova_novncproxy: $(registry)/stein/ubuntu-source-nova-novncproxy:taco-0.1.0 + nova_novncproxy_assets: $(registry)/stein/ubuntu-source-nova-novncproxy:taco-0.1.0 + nova_placement: $(registry)/stein/ubuntu-source-nova-placement-api:taco-0.1.0 + nova_scheduler: $(registry)/stein/ubuntu-source-nova-scheduler:taco-0.1.0 + nova_service_cleaner: $(registry)/port/ceph-config-helper:v1.14.3 + nova_spiceproxy: $(registry)/stein/ubuntu-source-nova-spicehtml5proxy:taco-0.1.0 + nova_spiceproxy_assets: $(registry)/stein/ubuntu-source-nova-spicehtml5proxy:taco-0.1.0 + rabbit_init: $(registry)/rabbitmq:3.7-management + test: $(registry)/stein/ubuntu-source-rally:taco-0.1.0 + +- name: prometheus-openstack-exporter + override: + images.pull_policy: IfNotPresent + images.tags: + dep_check: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + image_repo_sync: $(registry)/docker:17.07.0 + ks_user: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + prometheus_openstack_exporter: $(registry)/openstackhelm/prometheus-openstack-exporter:ubuntu_bionic-20191017 + +- name: rabbitmq + override: + images.pull_policy: IfNotPresent + images.tags: + dep_check: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + image_repo_sync: $(registry)/docker:17.07.0 + prometheus_rabbitmq_exporter: $(registry)/kbudde/rabbitmq-exporter:v0.21.0 + prometheus_rabbitmq_exporter_helm_tests: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + rabbitmq: $(registry)/rabbitmq:3.7.13 + rabbitmq_init: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + scripted_test: $(registry)/rabbitmq:3.7.13-management + +- name: ceph-provisioners + override: + images.pull_policy: IfNotPresent + images.tags: + ceph_bootstrap: $(registry)/openstackhelm/ceph-daemon:latest-ubuntu_xenial + ceph_cephfs_provisioner: $(registry)/openstackhelm/ceph-cephfs-provisioner:latest-ubuntu_xenial + ceph_config_helper: $(registry)/port/ceph-config-helper:v1.14.3 + ceph_rbd_provisioner: $(registry)/openstackhelm/ceph-rbd-provisioner:latest-ubuntu_xenial + dep_check: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + image_repo_sync: $(registry)/docker:17.07.0 + +- name: cinder + override: + images.pull_policy: IfNotPresent + images.tags: + bootstrap: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + cinder_api: $(registry)/stein/ubuntu-source-cinder-api:taco-0.1.0 + cinder_backup: $(registry)/stein/ubuntu-source-cinder-backup:taco-0.1.0 + cinder_backup_storage_init: $(registry)/port/ceph-config-helper:v1.14.3 + cinder_db_sync: $(registry)/stein/ubuntu-source-cinder-api:taco-0.1.0 + cinder_scheduler: $(registry)/stein/ubuntu-source-cinder-scheduler:taco-0.1.0 + cinder_storage_init: $(registry)/port/ceph-config-helper:v1.14.3 + cinder_volume: $(registry)/stein/ubuntu-source-cinder-volume:taco-0.1.0 + cinder_volume_usage_audit: $(registry)/stein/ubuntu-source-cinder-volume:taco-0.1.0 + db_drop: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + db_init: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + dep_check: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + image_repo_sync: $(registry)/docker:17.07.0 + ks_endpoints: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + ks_service: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + ks_user: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + rabbit_init: $(registry)/rabbitmq:3.7-management + test: $(registry)/xrally/xrally-openstack:1.5.0 + +- name: ironic + override: + images.pull_policy: IfNotPresent + images.tags: + bootstrap: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + db_drop: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + db_init: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + dep_check: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + image_repo_sync: $(registry)/docker:17.07.0 + ironic_api: $(registry)/stein/ubuntu-source-ironic-api:taco-0.1.0 + ironic_conductor: $(registry)/stein/ubuntu-source-ironic-conductor:taco-0.1.0 + ironic_db_sync: $(registry)/stein/ubuntu-source-ironic-api:taco-0.1.0 + ironic_manage_cleaning_network: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + ironic_pxe: $(registry)/stein/ubuntu-source-ironic-pxe:taco-0.1.0 + ironic_pxe_http: $(registry)/nginx:1.13.3 + ironic_pxe_init: $(registry)/stein/ubuntu-source-ironic-pxe:taco-0.1.0 + ironic_retrive_cleaning_network: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + ironic_retrive_swift_config: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + ks_endpoints: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + ks_service: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + ks_user: $(registry)/stein/ubuntu-source-heat-engine:taco-0.1.0 + rabbit_init: $(registry)/rabbitmq:3.7-management + +- name: sona + override: + images.pull_policy: IfNotPresent + images.tags: + curl: $(registry)/os-client-docker:latest + onos: $(registry)/onos-sona-nightly-docker:stable + python: $(registry)/python:2-alpine + +- name: openvswitch + override: + images.pull_policy: IfNotPresent + images.tags: + openvswitch_db_server: $(registry)/kolla/ubuntu-source-openvswitch-db-server:8.0.3 + openvswitch_vswitchd: $(registry)/kolla/ubuntu-source-openvswitch-vswitchd:8.0.3 + dep_check: $(registry)/airshipit/kubernetes-entrypoint:v1.0.0 + image_repo_sync: $(registry)/docker:17.07.0 diff --git a/byoh-reference/openstack/kustomization.yaml b/byoh-reference/openstack/kustomization.yaml new file mode 100644 index 0000000..ecb90a3 --- /dev/null +++ b/byoh-reference/openstack/kustomization.yaml @@ -0,0 +1,6 @@ +resources: + - ../base + +transformers: + - site-values.yaml + - image-values.yaml diff --git a/byoh-reference/openstack/site-values.yaml b/byoh-reference/openstack/site-values.yaml new file mode 100644 index 0000000..b406e2a --- /dev/null +++ b/byoh-reference/openstack/site-values.yaml @@ -0,0 +1,427 @@ +apiVersion: openinfradev.github.com/v1 +kind: HelmValuesTransformer +metadata: + name: site + +global: + clusterName: cluster.local + storageClassName: taco-storage + repository: https://openinfradev.github.io/helm-repo/ + externalIP: 172.16.53.26 + ceph_keyring: AQBAHAdfHQ5RLhAAzynxAYUokA55ku4ZwqVJjQ== + cinder_keyring: AQAin8tU0CFgEhAATb7sYgtWsh+S5HEbg6MrGg== + rbd_secret_uuid: 582393ff-9a5c-4a2e-ae0d-86ec18c36afc + hypervisorInterface: eth0 + libvirtInterface: eth0 + adminPassword: Pa$$w0rd! + +charts: +- name: ceph-provisioners + source: + repository: $(repository) + override: + deployment.ceph: false + deployment.client_secrets: true + deployment.rbd_provisioner: true + deployment.cephfs_provisioner: false + storageclass.rbd.provision_storage_class: true + storageclass.cephfs.provision_storage_class: false + conf.ceph.global.mon_host: TACO_MON_HOST + manifests.configmap_bin: false + manifests.configmap_bin_common: false + manifests.configmap_etc: true + manifests.deployment_rbd_provisioner: false + manifests.deployment_cephfs_provisioner: false + manifests.job_bootstrap: false + manifests.job_cephfs_client_key: false + manifests.job_image_repo_sync: false + manifests.job_namespace_client_key_cleaner: false + manifests.job_namespace_client_key: false + manifests.storageclass_cephfs: false + manifests.storageclass_rbd: false + manifests.helm_tests: false + endpoints.cluster_domain_suffix: $(clusterName) + + +- name: cinder + source: + repository: $(repository) + override: + pod.security_context.cinder_api.pod.runAsUser: 42407 + pod.security_context.cinder_backup.pod.runAsUser: 42407 + pod.security_context.cinder_scheduler.pod.runAsUser: 42407 + pod.security_context.cinder_volume.pod.runAsUser: 42407 + pod.replicas.api: 3 + pod.replicas.backup: 1 + pod.replicas.scheduler: 3 + pod.replicas.volume: 1 + # conf.logging.loggers.keys: root + #conf.logging.loggers.keys: cinder + #conf.logging.loggers.keys: oslo_service + conf.logging.logger_oslo_service.level: DEBUG + conf.logging.logger_oslo_service.handlers: stderr + conf.logging.logger_oslo_service.qualname: oslo_service + conf.logging.logger_cinder.level: DEBUG + conf.ceph.admin_keyring: $(ceph_keyring) + conf.ceph.enabled: true + conf.backends.rbd.rbd_pool: cinder-volumes + conf.backends.rbd_secret_uuid: $(rbd_secret_uuid) + conf.cinder.DEFAULT.enabled_backends: rbd + conf.cinder.DEFAULT.default_volume_type: rbd + conf.cinder.DEFAULT.backup_driver: cinder.backup.drivers.ceph.CephBackupDriver + conf.cinder.DEFAULT.backup_ceph_user: cinder-backup + conf.cinder.DEFAULT.backup_ceph_pool: backups + conf.cinder.DEFAULT.debug: true + endpoints.identity.auth.admin.username: admin + endpoints.identity.auth.admin.password: $(adminPassword) + endpoints.volume.path.default: /v2/%(tenant_id)s + endpoints.cluster_domain_suffix: $(clusterName) + +- name: glance + source: + repository: $(repository) + override: + pod.security_context.glance.pod.runAsUser: 42415 + pod.replicas.api: 1 + pod.replicas.registry: 1 + storage: $(storageClassName) + conf.ceph.enabled: true + conf.ceph.admin_keyring: $(ceph_keyring) + conf.glance.glance_store.rbd_store_replication: 3 + conf.glance.glance_store.rbd_store_user: glance + conf.glance.glance_store.rbd_store_pool: images + conf.glance.DEFAULT.hw_scsi_model: virtio-scsi + conf.glance.DEFAULT.hw_disk_bus: scsi + conf.glance.DEFAULT.hw_qemu_guest_agent: yes + conf.glance.DEFAULT.os_require_quiesce: yes + conf.glance.DEFAULT.show_image_direct_url: true + conf.glance.DEFAULT.show_multiple_locations: true + bootstrap.enabled: true + bootstrap.structured.images.cirros.name: Cirros 0.5.1 64-bit + bootstrap.structured.images.cirros.id: 201084fc-c276-4744-8504-cb974dbb3610 + bootstrap.structured.images.cirros.source_url: https://download.cirros-cloud.net/0.5.1/ + bootstrap.structured.images.cirros.image_file: cirros-0.5.1-x86_64-disk.img + bootstrap.structured.images.cirros.private: false + endpoints.identity.auth.admin.username: admin + endpoints.identity.auth.admin.password: $(adminPassword) + endpoints.cluster_domain_suffix: $(clusterName) + +- name: heat + source: + repository: $(repository) + override: + pod.security_context.heat.pod.runAsUser: 42418 + pod.replicas.api: 3 + pod.replicas.cfn: 3 + pod.replicas.cloudwatch: 3 + pod.replicas.engine: 3 + endpoints.identity.auth.admin.username: admin + endpoints.identity.auth.admin.password: $(adminPassword) + endpoints.cluster_domain_suffix: $(clusterName) + +- name: horizon + source: + repository: $(repository) + override: + endpoints.cluster_domain_suffix: $(clusterName) + pod.security_context.horizon.pod.runAsUser: 42420 + pod.replicas.server: 3 + network.node_port.enabled: true + network.node_port.port: 31000 + conf.software.apache2.site_dir: /etc/apache2/sites-enabled + conf.horizon.apache: | + Listen 0.0.0.0:{{ tuple "dashboard" "internal" "web" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }} + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined + LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" proxy + SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded + CustomLog /dev/stdout combined env=!forwarded + CustomLog /dev/stdout proxy env=forwarded + + WSGIScriptReloading On + WSGIDaemonProcess horizon-http processes=5 threads=1 user=horizon group=horizon display-name=%{GROUP} python-path=/var/lib/kolla/venv/lib/python2.7/site-packages + WSGIProcessGroup horizon-http + WSGIScriptAlias / /var/www/cgi-bin/horizon/django.wsgi + WSGIPassAuthorization On + RewriteEngine on + RewriteCond %{REQUEST_METHOD} !^(POST|PUT|GET|DELETE) + RewriteRule .* - [F] + + Require all granted + + Alias /static /var/www/html/horizon + + SetHandler None + + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /dev/stdout + TransferLog /dev/stdout + SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded + CustomLog /dev/stdout combined env=!forwarded + CustomLog /dev/stdout proxy env=forwarded + + conf.horizon.local_settings.config.openstack_neutron_network.enable_router: "True" + conf.horizon.local_settings.config.openstack_neutron_network.enable_quotas: "True" + conf.horizon.local_settings.config.openstack_neutron_network.enable_ipv6: "False" + conf.horizon.local_settings.config.openstack_neutron_network.enable_distributed_router: "False" + conf.horizon.local_settings.config.openstack_neutron_network.enable_ha_router: "True" + conf.horizon.local_settings.config.openstack_neutron_network.enable_lb: "True" + conf.horizon.local_settings.config.openstack_neutron_network.enable_firewall: "False" + conf.horizon.local_settings.config.openstack_neutron_network.enable_vpn: "False" + conf.horizon.local_settings.config.openstack_neutron_network.enable_fip_topology_check: "True" + +- name: ingress + source: + repository: $(repository) + override: + network.host_namespace: true + network.vip.manage: false + network.vip.mode: keepalived + network.vip.interface: eth0 + network.vip.addr: 10.10.10.122/32 + network.vip.keepalived_router_id: 49 + monitoring.prometheus.enabled: true + monitoring.prometheus.ingress_exporter.scrape: true + monitoring.prometheus.config.worker-processes: 8 + config.worker-processes: 8 + pod.security_context.server.container.ingress_vip.capabilities.add: NET_ADMIN + pod.security_context.server.container.ingress_vip.readOnlyRootFilesystem: false + pod.security_context.server.container.ingress_vip.runAsUser: 0 + pod.replicas.ingress: 3 + pod.replicas.error_page: 1 + conf.ingress.bind-address: 0.0.0.0 + endpoints.ingress.port.server.default: 28080 + endpoints.cluster_domain_suffix: $(clusterName) + +- name: keystone + source: + repository: $(repository) + override: + conf.keystone.DEFAULT.debug: true + pod.security_context.keystone.pod.runAsUser: 42425 + pod.replicas.api: 1 + endpoints.identity.auth.admin.username: admin + endpoints.identity.auth.admin.password: $(adminPassword) + endpoints.cluster_domain_suffix: $(clusterName) + pod.probes.api.api.readiness.enabled: false + pod.probes.api.api.liveness.enabled: false + +- name: libvirt + source: + repository: $(repository) + override: + release_group: null + network.backend: + - openvswitch + conf.ceph.enabled: true + conf.ceph.admin_keyring: $(ceph_keyring) + conf.ceph.cinder.keyring: $(cinder_keyring) + conf.ceph.cinder.secret_uuid: $(rbd_secret_uuid) + conf.libvirt.listen_addr: 0.0.0.0 + conf.libvirt.log_level: 3 + manifests.configmap_bin: true + manifests.configmap_etc: true + manifests.daemonset_libvirt: true + endpoints.cluster_domain_suffix: $(clusterName) + +- name: openvswitch + source: + repository: $(repository) + override: + release_group: null + pod.security_context.openvswitch_db_server.pod.runAsUser: 0 + pod.security_context.openvswitch_db_server.container.server.runAsUser: 0 + pod.user.nova.uid: 42436 + endpoints.cluster_domain_suffix: $(clusterName) + +- name: mariadb + source: + repository: $(repository) + override: + pod.replicas.server: 1 + volume.size: 40Gi + volume.enabled: true + volume.backup.class_name: $(storageClassName) + volume.backup.size: 10Gi + volume.backup.enabled: false + volume.class_name: $(storageClassName) + endpoints.cluster_domain_suffix: $(clusterName) + +- name: memcached + source: + repository: $(repository) + override: + pod.replicas.server: 3 + monitoring.prometheus.enabled: true + monitoring.prometheus.memcached_exporter.scrape: true + endpoints.cluster_domain_suffix: $(clusterName) +- name: neutron + source: + repository: $(repository) + override: + pod.replicas.server: 3 + pod.user.neutron.uid: 42435 + pod.security_context.neutron.pod.runAsUser: 42435 + pod.security_context.neutron_dhcp_agent.pod.runAsUser: 42435 + pod.security_context.neutron_dhcp_agent.container.neutron_dhcp_agent.readOnlyRootFilesystem: false + pod.security_context.neutron_l2gw_agent.pod.runAsUser: 42435 + pod.security_context.neutron_bagpipe_bgp.pod.runAsUser: 42435 + pod.security_context.neutron_l3_agent.pod.runAsUser: 42435 + pod.security_context.neutron_l3_agent.container.neutron_l3_agent.readOnlyRootFilesystem: false + pod.security_context.neutron_lb_agent.pod.runAsUser: 42435 + pod.security_context.neutron_lb_agent.container.neutron_lb_agent.readOnlyRootFilesystem: false + pod.security_context.neutron_metadata_agent.pod.runAsUser: 42435 + pod.security_context.neutron_ovs_agent.pod.runAsUser: 42435 + pod.security_context.neutron_server.pod.runAsUser: 42435 + pod.security_context.neutron_server.container.neutron_server.readOnlyRootFilesystem: false + pod.security_context.neutron_sriov_agent.pod.runAsUser: 42435 + pod.probes.dhcp_agent.dhcp_agent.readiness.enabled: false + pod.probes.dhcp_agent.dhcp_agent.liveness.enabled: false + pod.probes.l3_agent.l3_agent.readiness.enabled: false + pod.probes.l3_agent.l3_agent.liveness.enabled: false + pod.probes.metadata_agent.metadata_agent.readiness.enabled: false + pod.probes.metadata_agent.metadata_agent.liveness.enabled: false + pod.probes.ovs_agent.ovs_agent.liveness.enabled: false + pod.probes.sriov_agent.sriov_agent.readiness.enabled: false + dependencies.static.ovs_agent.pod: null + network.backend: + - openvswitch + network.share_namespaces: true + network.interface.tunnel: $(hypervisorInterface) + conf.auto_bridge_add.br-ex: $(libvirtInterface) + conf.paste.composite:neutronapi_v2_0.keystone: cors http_proxy_to_wsgi request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0 + conf.paste.app:neutronversions.paste.app_factory: neutron.pecan_wsgi.app:versions_factory + conf.neutron_sudoers: | + # This sudoers file supports rootwrap-daemon for both Kolla and LOCI Images. + Defaults !requiretty + Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin" + neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *, /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *, /var/lib/kolla/venv/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf, /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf, /var/lib/kolla/venv/bin/privsep-helper /etc/neutron/rootwrap.conf * + conf.neutron.DEFAULT.core_plugin: ml2 + conf.neutron.DEFAULT.l3_ha: false + conf.neutron.DEFAULT.global_physnet_mtu: 9000 + conf.neutron.DEFAULT.service_plugins: router + conf.neutron.agent.root_helper: sudo /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf + conf.neutron.placement.auth_version: v3 + conf.neutron.placement.auth_type: password + conf.neutron.placement.auth_uri: http://keystone-api.openstack.svc.cluster.local:5000/v3 + conf.neutron.placement.auth_url: http://keystone-api.openstack.svc.cluster.local:5000/v3 + conf.neutron.placement.endpoint_type: internal + conf.neutron.placement.project_domain_name: service + conf.neutron.placement.project_name: service + conf.neutron.placement.user_domain_name: service + conf.neutron.placement.region_name: RegionOne + conf.neutron.placement.password: t0pNw8pN4Qs93p1InK7XWDLz8CH7a08KZ6ClafHC + conf.neutron.placement.username: nova +# using flat network instead of dvr. by jacob +# conf.neutron.l3_agent.DEFAULT.agent_mode: dvr_snat + conf.plugins.ml2_conf.ml2.mechanism_drivers: openvswitch,l2population + conf.plugins.ml2_conf.ml2.type_drivers: vlan, flat, vxlan + conf.plugins.ml2_conf.ml2.tenant_network_types: vxlan +# using vlan network instead of dvr. by jacob +# conf.plugins.ml2_conf.ml2_type_vlan.network_vlan_ranges: "provider:110:110" + conf.plugins.ml2_conf.ml2_type_flat.flat_networks: provider + conf.plugins.openvswitch_agent.agent.tunnel_types: vxlan + conf.plugins.openvswitch_agent.agent.l2_population: True + conf.plugins.openvswitch_agent.agent.arp_responder: True +# using vlan network instead of dvr. by jacob +# conf.plugins.openvswitch_agent.agent.enable_distributed_routing: True + conf.plugins.openvswitch_agent.agent.enable_distributed_routing: False + conf.plugins.openvswitch_agent.ovs.bridge_mappings: provider:br-ex + conf.plugins.openvswitch_agent.securitygroup.firewall_driver: openvswitch + endpoints.cluster_domain_suffix: $(clusterName) + +- name: nova + source: + repository: $(repository) + override: + bootstrap.structured.flavors.enabled: true + bootstrap.structured.flavors.options.m1_tiny.id: 0c84e220-a258-439f-a6ff-f8e9fd980025 + network.backend: + - openvswitch + network.novncproxy.name: nova-novncproxy + network.novncproxy.node_port.enabled: true + network.novncproxy.node_port.port: 30608 + console.novnc.compute.vncserver_proxyclient_interface: $(hypervisorInterface) + console.novnc.vncproxy.vncserver_proxyclient_interface: $(hypervisorInterface) + conf.hypervisor.host_interface: $(hypervisorInterface) + conf.libvirt.live_migration_interface: $(hypervisorInterface) + conf.ceph.enabled: true + conf.ceph.admin_keyring: $(ceph_keyring) + conf.ceph.cinder.user: cinder + conf.ceph.cinder.keyring: $(cinder_keyring) + conf.nova.DEFAULT.scheduler_default_filters: RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter + conf.nova.DEFAULT.debug: true + conf.nova.DEFAULT.dhcp_domain: "" + conf.nova.DEFAULT.config_drive_cdrom: true + conf.nova.DEFAULT.config_drive_format: iso9660 + conf.nova.DEFAULT.force_config_drive: true + conf.nova.DEFAULT.ram_allocation_ratio: 0.8 + conf.nova.DEFAULT.disk_allocation_ratio: 9999.0 + conf.nova.DEFAULT.cpu_allocation_ratio: 8.0 + conf.nova.DEFAULT.osapi_compute_workers: 8 + conf.nova.vnc.novncproxy_base_url: http://$(externalIP):30608/vnc_auto.html + conf.nova.libvirt.images_type: rbd + conf.nova.libvirt.rbd_user: cinder + conf.nova.libvirt.rbd_secret_uuid: $(rbd_secret_uuid) + conf.nova.libvirt.virt_type: kvm + conf.nova.scheduler.discover_hosts_in_cells_interval: 60 + conf.nova.placement_database.max_retries: -1 + conf.nova.placement_database.sync_on_startup: true + conf.nova.placement.region_name: RegionOne + conf.rootwrap_filters: null + endpoints.identity.auth.admin.username: admin + endpoints.identity.auth.admin.password: $(adminPassword) + pod.mandatory_access_control.type: null + pod.user.nova.uid: 42436 + pod.security_context.nova.pod.runAsUser: 42436 + pod.replicas.api_metadata: 3 + pod.replicas.osapi: 3 + pod.replicas.conductor: 3 + pod.replicas.consoleauth: 3 + pod.replicas.scheduler: 3 + pod.replicas.novncproxy: 3 + endpoints.cluster_domain_suffix: $(clusterName) + +- name: rabbitmq + source: + repository: $(repository) + override: + pod.replicas.server: 3 +# volume.class_name: rbd + volume.use_local_path.enabled: true + volume.use_local_path.host_path: /ssd-data/rabbitmq-data + volume.chown_on_start: true + volume.enabled: false + volume.size: 512Mi + monitoring.prometheus.enabled: true + monitoring.prometheus.rabbitmq_exporter.scrape: true + endpoints.cluster_domain_suffix: $(clusterName) + +#- name: prometheus-kube-state-metrics +# source: +# repository: $(repository) + +#- name: grafana +# source: +# repository: $(repository) + +#- name: prometheus +# source: +# repository: $(repository) +# override: +# endpoints.monitoring.auth.admin.username: admin +# endpoints.monitoring.auth.admin.password: $(adminPassword) +# storage.storage_class: $(storageClassName) +# storage.requests.storage: 22Gi + +#- name: prometheus-openstack-exporter +# source: +# repository: $(repository) +# override: +# endpoints.identity.name: keystone +# endpoints.identity.auth.admin.region_name: RegionOne +# endpoints.identity.auth.admin.username: admin +# endpoints.identity.auth.admin.password: $(adminPassword) +# dummy: dummy +# pod.mandatory_access_control.type: null diff --git a/byoh-reference/sealed-secrets/kustomization.yaml b/byoh-reference/sealed-secrets/kustomization.yaml new file mode 100644 index 0000000..7c415e6 --- /dev/null +++ b/byoh-reference/sealed-secrets/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - ../base + +transformers: + - site-values.yaml diff --git a/byoh-reference/sealed-secrets/site-values.yaml b/byoh-reference/sealed-secrets/site-values.yaml new file mode 100644 index 0000000..6fb83a4 --- /dev/null +++ b/byoh-reference/sealed-secrets/site-values.yaml @@ -0,0 +1,6 @@ +apiVersion: openinfradev.github.com/v1 +kind: HelmValuesTransformer +metadata: + name: site + +charts: [] diff --git a/byoh-reference/service-mesh/kustomization.yaml b/byoh-reference/service-mesh/kustomization.yaml new file mode 100644 index 0000000..7c415e6 --- /dev/null +++ b/byoh-reference/service-mesh/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - ../base + +transformers: + - site-values.yaml diff --git a/byoh-reference/service-mesh/site-values.yaml b/byoh-reference/service-mesh/site-values.yaml new file mode 100644 index 0000000..e6060eb --- /dev/null +++ b/byoh-reference/service-mesh/site-values.yaml @@ -0,0 +1,62 @@ +apiVersion: openinfradev.github.com/v1 +kind: HelmValuesTransformer +metadata: + name: site + +global: + clusterName: cluster.local + serviceMeshControlNodeSelector: + servicemesh: enabled + serviceMeshIngressNodeSelector: + taco-ingress-gateway: enabled + serviceMeshEgressNodeSelector: + taco-egress-gateway: enabled + ingressGatewayLabel: istio-ingress-gateway + egressGatewayLabel: istio-egress-gateway + +charts: +- name: istiod + override: + pilot.traceSampling: 1.0 + +- name: istio-ingress-gateway + override: + replicaCount: 1 + service.type: LoadBalancer + resources.requests.cpu: 1000m + resources.requests.memory: 1024Mi + resources.limits.cpu: 2000m + resources.limits.memory: 2048Mi + +- name: jaeger-operator + override: {} + +- name: servicemesh-jaeger-resource + override: + sampling.param: 100 + storage.options.es: + serverUrls: https://eck-elasticsearch-es-http.lma.svc:9200 + username: taco-fluentbit + password: tacoword + elasticsearch: + host: eck-elasticsearch-es-http.lma.svc.$(clusterName) + user: + enabled: false + username: taco-jaeger + password: tacoword + query.basePath: /jaeger + +- name: kiali-operator + override: {} + +- name: servicemesh-kiali-resource + override: + auth.strategy: anonymous + externalServices.prometheus.url: http://lma-prometheus.lma.svc:9090 + externalServices.tracing.inClusterUrl: http://jaeger-operator-jaeger-query.istio-system:16686 + externalServices.grafana.auth.type: basic + externalServices.grafana.auth.username: admin + externalServices.grafana.auth.password: password + externalServices.grafana.inClusterUrl: http://grafana.lma.svc:80 + externalServices.grafana.url: https://grafana-v2.taco-cat.xyz + server.webRoot: /kiali diff --git a/byoh-reference/tks-cluster-byoh/kustomization.yaml b/byoh-reference/tks-cluster-byoh/kustomization.yaml new file mode 100644 index 0000000..4d8d8ae --- /dev/null +++ b/byoh-reference/tks-cluster-byoh/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - ../base + +transformers: + - site-values.yaml diff --git a/byoh-reference/tks-cluster-byoh/site-values.yaml b/byoh-reference/tks-cluster-byoh/site-values.yaml new file mode 100644 index 0000000..6140560 --- /dev/null +++ b/byoh-reference/tks-cluster-byoh/site-values.yaml @@ -0,0 +1,31 @@ +apiVersion: openinfradev.github.com/v1 +kind: HelmValuesTransformer +metadata: + name: site + +global: + # These values are replaced on cluster creation by workflow + clusterName: cluster.local +charts: +- name: cluster-api-byoh + override: + cluster.name: $(clusterName) + machineDeployment: + - name: taco + replicas: 1 + selector: + matchLabels: + role: tks + labels: + servicemesh: enabled + taco-egress-gateway: enabled + taco-ingress-gateway: enabled + taco-lma: enabled + - name: normal + replicas: 1 + autoscaling: + minSize: 1 + maxSize: 5 + selector: + matchLabels: + role: worker diff --git a/byoh-reference/tks-cluster-common/kustomization.yaml b/byoh-reference/tks-cluster-common/kustomization.yaml new file mode 100644 index 0000000..4d8d8ae --- /dev/null +++ b/byoh-reference/tks-cluster-common/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - ../base + +transformers: + - site-values.yaml diff --git a/byoh-reference/tks-cluster-common/site-values.yaml b/byoh-reference/tks-cluster-common/site-values.yaml new file mode 100644 index 0000000..c0fbe02 --- /dev/null +++ b/byoh-reference/tks-cluster-common/site-values.yaml @@ -0,0 +1,44 @@ +apiVersion: openinfradev.github.com/v1 +kind: HelmValuesTransformer +metadata: + name: site + +global: + # These values are replaced on cluster creation by workflow + clusterName: cluster.local +charts: +- name: kubernetes-addons + override: + cni.calico.enabled: true + +- name: ingress-nginx + override: + controller: + nodeSelector: + taco-lma: enabled + resources: + requests: + cpu: 2000m + memory: 4Gi + service: + annotations: + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + externalTrafficPolicy: Local + annotations: + service.beta.kubernetes.io/aws-load-balancer-name: "taco-ingress-nlb" + service.beta.kubernetes.io/aws-load-balancer-type: "nlb" + service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" + type: LoadBalancer + config: + enable-underscores-in-headers: "true" + proxy-body-size: "10m" + +- name: cluster-autoscaler + override: + discoveryNamespace: $(clusterName) + discoveryClusterName: $(clusterName) + +- name: cluster-autoscaler-rbac + override: + deployMgmtRbacOnly: + targetNamespace: $(clusterName)