Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

v2 Refactor fix #186

Merged
merged 9 commits into from
Oct 25, 2024
32 changes: 17 additions & 15 deletions helmcharts/global-cloud-values-aws.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,43 +3,45 @@ global:
cloud_store_provider: &cloud_store_provider "s3"
cloud_storage_region: &cloud_storage_region "ap-south-1"
dataset_api_container: ""
postgresql_backup_cloud_bucket: &backups_bucket "backups-obsrv-test-905418313089"
redis_backup_cloud_bucket: &redis_backup_cloud_bucket "backups-obsrv-test-905418313089"
velero_backup_cloud_bucket: &velero_backup_cloud_bucket "velero-obsrv-test-905418313089"
cloud_storage_bucket: &cloud_storage_bucket "obsrv-test-905418313089"
postgresql_backup_cloud_bucket: &backups_bucket "backups-obsrv-v2-dev-905418313089"
redis_backup_cloud_bucket: &redis_backup_cloud_bucket "backups-obsrv-v2-dev-905418313089"
velero_backup_cloud_bucket: &velero_backup_cloud_bucket "velero-obsrv-v2-dev-905418313089 "
cloud_storage_bucket: &cloud_storage_bucket "obsrv-v2-dev-905418313089"
cloud_storage_config: |+
'{"identity":"name","credential":"key" , "region": region- name}'

spark_cloud_bucket: ""
storage_class_name: &storage_class_name "gp2"
spark_service_account_arn: ""
secor_backup_bucket: &secor_backup_bucket "telemetry_data_store"
checkpoint_bucket: &checkpoint_bucket "s3://checkpoint-obsrv-test-905418313089"
checkpoint_bucket: &checkpoint_bucket "s3://checkpoint-obsrv-v2-dev-905418313089"
deep_store_type: &deep_store_type "s3"
s3_access_key: &s3-access-key ""
s3_secret_key: &s3-secret-access-key ""
kong_annotations: &kong_annotations
service.beta.kubernetes.io/aws-load-balancer-type: nlb
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
service.beta.kubernetes.io/aws-load-balancer-eip-allocations: "eipalloc-0aa23e2d7c6e7d1f0"
service.beta.kubernetes.io/aws-load-balancer-subnets: "subnet-07f8d7579cb8fe885"
service.beta.kubernetes.io/aws-load-balancer-eip-allocations: "eipalloc-048c5d808e0cbfe90"
service.beta.kubernetes.io/aws-load-balancer-subnets: "subnet-058fb1b800efeff66"

service_accounts:
enabled: &create_sa true
secor: &secor_sa_annotation
eks.amazonaws.com/role-arn: "arn:aws:iam::905418313089:role/test-obsrv-secor-sa-iam-role"
eks.amazonaws.com/role-arn: "arn:aws:iam::905418313089:role/dev-obsrv-v2-secor-sa-iam-role"
dataset_api: &dataset_api_sa_annotation
eks.amazonaws.com/role-arn: "arn:aws:iam::905418313089:role/test-obsrv-dataset-api-sa-iam-role"
eks.amazonaws.com/role-arn: "arn:aws:iam::905418313089:role/dev-obsrv-v2-dataset-api-sa-iam-role"
druid_raw: &druid_raw_sa_annotation
eks.amazonaws.com/role-arn: "arn:aws:iam::905418313089:role/test-obsrv-druid-raw-sa-iam-role"
eks.amazonaws.com/role-arn: "arn:aws:iam::905418313089:role/dev-obsrv-v2-druid-raw-sa-iam-role"
flink: &flink_sa_annotation
eks.amazonaws.com/role-arn: "arn:aws:iam::905418313089:role/test-obsrv-flink-sa-iam-role"
eks.amazonaws.com/role-arn: "arn:aws:iam::905418313089:role/dev-obsrv-v2-flink-sa-iam-role"
postgresql_backup: &postgresql_backup_sa_annotation
eks.amazonaws.com/role-arn: "arn:aws:iam::905418313089:role/test-obsrv-postgresql-backup-sa-iam-role"
eks.amazonaws.com/role-arn: "arn:aws:iam::905418313089:role/dev-obsrv-v2-postgresql-backup-sa-iam-role"
redis_backup: &redis_backup_sa_annotation
eks.amazonaws.com/role-arn: "arn:aws:iam::905418313089:role/test-obsrv-redis-backup-sa-iam-role"
eks.amazonaws.com/role-arn: "arn:aws:iam::905418313089:role/dev-obsrv-v2-redis-backup-sa-iam-role"
s3_exporter: &s3_exporter_sa_annotation
eks.amazonaws.com/role-arn: "arn:aws:iam::905418313089:role/test-obsrv-s3-exporter-sa-iam-role"
eks.amazonaws.com/role-arn: "arn:aws:iam::905418313089:role/dev-obsrv-v2-s3-exporter-sa-iam-role"
spark: &spark_sa_annotation
eks.amazonaws.com/role-arn: "arn:aws:iam::905418313089:role/test-obsrv-spark-sa-iam-role"
eks.amazonaws.com/role-arn: "arn:aws:iam::905418313089:role/dev-obsrv-v2-spark-sa-iam-role"

redis-service-account: &redis-service-account
serviceAccount:
Expand Down
16 changes: 8 additions & 8 deletions helmcharts/global-resource-values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ loki:

#druid-raw-cluster need changes in yaml files
druid-raw-cluster:
druid_brokers_pod:
druid_brokers:
resources:
limits:
cpu: 500m
Expand All @@ -300,7 +300,7 @@ druid-raw-cluster:
cpu: 250m
memory: 128Mi

druid_coordinator_pod:
druid_coordinator:
resources:
limits:
cpu: 500m
Expand All @@ -309,7 +309,7 @@ druid-raw-cluster:
cpu: 250m
memory: 500Mi

druid_overlord_pod:
druid_overlord:
resources:
limits:
cpu: 500m
Expand All @@ -318,7 +318,7 @@ druid-raw-cluster:
cpu: 250m
memory: 256Mi

druid_historical_pod:
druid_historical:
resources:
limits:
cpu: 1
Expand All @@ -331,13 +331,13 @@ druid-raw-cluster:
- ReadWriteOnce
size: 50Gi

druid_middlemanager_pod:
druid_middlemanager:
persistence:
accessModes:
- ReadWriteOnce
size: 1Gi

druid_indexer_pod:
druid_indexer:
resources:
limits:
cpu: 1
Expand All @@ -350,7 +350,7 @@ druid-raw-cluster:
- ReadWriteOnce
size: 20Gi

druid_router_pod:
druid_router:
resources:
limits:
cpu: 512m
Expand Down Expand Up @@ -500,7 +500,7 @@ secor:
requests:
cpu: 128m
memory: 512Mi
#issue running helm templete


spark:
master:
Expand Down
13 changes: 13 additions & 0 deletions helmcharts/global-values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,19 @@ kube-prometheus-stack:
env:
GF_SECURITY_ADMIN_PASSWORD: *grafana-admin-password
GF_SECURITY_ADMIN_USER: *grafana-admin-user
grafana.ini:
smtp:
enabled: true
host: ""
user: ""
password: ""
from_address: ""
cert_file: ""
key_file: ""
ehlo_identity: ""
startTLS_policy: ""
skip_verify: true
from_name: "Grafana"



Expand Down
225 changes: 100 additions & 125 deletions helmcharts/obsrv/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -619,136 +619,111 @@ kube-prometheus-stack:
- target_label: __address__
replacement: s3-exporter.s3-exporter.svc.cluster.local:9340

kube-state-metrics:
namespaceOverride: "monitoring"
customLabels:
system.monitoring: "true"
metricLabelsAllowlist:
- pods=[*]
- deployments=[*]
- statefulsets=[*]
- persistentvolumeclaims=[*]
- persistentvolumes=[*]
resources:
limits:
cpu: "0.1"
memory: 256Mi
requests:
cpu: "0.1"
memory: 128Mi
releaseLabel: true
prometheus:
monitor:
enabled: true
honorLabels: true
selfMonitor:
enabled: false
kube-state-metrics:
namespaceOverride: "monitoring"
customLabels:
system.monitoring: "true"
metricLabelsAllowlist:
- pods=[*]
- deployments=[*]
- statefulsets=[*]
- persistentvolumeclaims=[*]
- persistentvolumes=[*]
resources:
limits:
cpu: "0.1"
memory: 256Mi
requests:
cpu: "0.1"
memory: 128Mi
releaseLabel: true
prometheus:
monitor:
enabled: true
honorLabels: true
selfMonitor:
enabled: false

grafana:
namespaceOverride: "monitoring"
adminPassword: prom-operator
sidecar:
dashboards:
grafana:
namespaceOverride: "monitoring"
# adminPassword: prom-operator
service:
enabled: true
label: grafana_dashboard
labelValue: "1"
searchNamespace: ALL
annotations: {}
multicluster:
global:
enabled: false
etcd:
enabled: false
provider:
allowUiUpdates: false
datasources:
portName: http-web
type: ClusterIP
serviceMonitor:
enabled: true
defaultDatasourceEnabled: true
isDefaultDatasource: true
uid: prometheus
annotations: {}
httpMethod: POST
createPrometheusReplicasDatasources: false
label: grafana_datasource
labelValue: "1"
exemplarTraceIdDestinations: {}
service:
enabled: true
portName: http-web
type: ClusterIP
serviceMonitor:
enabled: true
interval: ""
extraLabels:
system.monitoring: "true"
podLabels:
system.monitoring: "true"
resources:
limits:
cpu: 0.2
memory: 256Mi
requests:
cpu: "0.1"
memory: 128Mi
env:
GF_AUTH_GENERIC_OAUTH_ENABLED: "true"
GF_AUTH_GENERIC_OAUTH_NAME: "obsrv"
GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP: "true"
GF_AUTH_GENERIC_OAUTH_CLIENT_ID: "528806583-dev.oauth.obsrv.ai"
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET: "el642dcXd1P3v6i+hODnGrUKx9ZSWAlmXWZaEoZQI7/R3NUGQlLTnNCV"
GF_AUTH_GENERIC_OAUTH_SCOPES: "read"
GF_AUTH_GENERIC_OAUTH_AUTH_HTTP_METHOD: "POST"
GF_AUTH_GENERIC_OAUTH_USERNAME_FIELD: "email"
GF_AUTH_OAUTH_ALLOW_INSECURE_EMAIL_LOOKUP: true
GF_AUTH_SKIP_ORG_ROLE_SYNC: true
GF_SECURITY_ALLOW_EMBEDDING: "true"
GF_SERVER_ROOT_URL: "https://{{ .Values.global.domain }}/grafana"
GF_SERVER_DOMAIN: "{{ .Values.global.domain }}"
GF_SERVER_SERVE_FROM_SUBPATH: true
persistence:
enabled: true
size: 1Gi
alerting:
infra.yaml:
file: alerting/infra.yaml
api.yaml:
file: alerting/api.yaml
ingestion.yaml:
file: alerting/ingestion.yaml
node.yaml:
file: alerting/node.yaml
processing.yaml:
file: alerting/processing.yaml
storage.yaml:
file: alerting/storage.yaml

prometheus-node-exporter:
namespaceOverride: "monitoring"
fullnameOverride: node-exporter
podLabels:
system.monitoring: "true"
jobLabel: node-exporter
resources:
limits:
cpu: "0.1"
memory: 256Mi
requests:
cpu: "0.1"
memory: 128Mi
releaseLabel: true
extraArgs:
- --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
- --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
service:
portName: http-metrics
prometheus:
monitor:
interval: ""
extraLabels:
system.monitoring: "true"
podLabels:
system.monitoring: "true"
resources:
limits:
cpu: 0.2
memory: 256Mi
requests:
cpu: "0.1"
memory: 128Mi
env:
GF_AUTH_GENERIC_OAUTH_ENABLED: "true"
GF_AUTH_GENERIC_OAUTH_NAME: "obsrv"
GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP: "true"
GF_AUTH_GENERIC_OAUTH_CLIENT_ID: "528806583-dev.oauth.obsrv.ai"
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET: "el642dcXd1P3v6i+hODnGrUKx9ZSWAlmXWZaEoZQI7/R3NUGQlLTnNCV"
GF_AUTH_GENERIC_OAUTH_SCOPES: "read"
GF_AUTH_GENERIC_OAUTH_AUTH_HTTP_METHOD: "POST"
GF_AUTH_GENERIC_OAUTH_USERNAME_FIELD: "email"
GF_AUTH_OAUTH_ALLOW_INSECURE_EMAIL_LOOKUP: true
GF_AUTH_SKIP_ORG_ROLE_SYNC: true
GF_SECURITY_ALLOW_EMBEDDING: "true"
GF_SERVER_ROOT_URL: "https://{{ .Values.global.domain }}/grafana"
GF_SERVER_DOMAIN: "{{ .Values.global.domain }}"
GF_SERVER_SERVE_FROM_SUBPATH: true
persistence:
enabled: true
size: 1Gi
alerting:
infra.yaml:
file: alerting/infra.yaml
api.yaml:
file: alerting/api.yaml
ingestion.yaml:
file: alerting/ingestion.yaml
node.yaml:
file: alerting/node.yaml
processing.yaml:
file: alerting/processing.yaml
storage.yaml:
file: alerting/storage.yaml

prometheus-node-exporter:
namespaceOverride: "monitoring"
fullnameOverride: node-exporter
podLabels:
system.monitoring: "true"
jobLabel: node-exporter
scrapeTimeout: ""
proxyUrl: ""
rbac:
pspEnabled: false
resources:
limits:
cpu: "0.1"
memory: 256Mi
requests:
cpu: "0.1"
memory: 128Mi
releaseLabel: true
extraArgs:
- --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
- --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$
service:
portName: http-metrics
prometheus:
monitor:
enabled: true
jobLabel: node-exporter
scrapeTimeout: ""
proxyUrl: ""
rbac:
pspEnabled: false

druid-raw-cluster:
namespace: &druidns "druid-raw"
Expand Down
2 changes: 1 addition & 1 deletion helmcharts/services/dataset-api/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ env:
denorm_redis_port: "{{ .Values.global.redis_denorm.port }}"
dedup_redis_host: "{{ .Values.global.redis_dedup.host }}"
dedup_redis_port: "{{ .Values.global.redis_dedup.port }}"
cloud_storage_provider: "{{ .Values.global.storage_provider }}"
cloud_storage_provider: "{{ .Values.global.cloud_storage_provider }}"
cloud_storage_region: "{{ .Values.global.cloud_storage_region }}"
container: "{{ .Values.global.dataset_api_container }}"
container_prefix: "connector-registry"
Expand Down
Loading