Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

separation startup scripts and application config from yaml files #149

Merged
merged 7 commits into from
Oct 30, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions charts/tidb-cluster/templates/_helpers.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,11 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}

{{- define "helm-toolkit.utils.template" -}}
{{- $name := index . 0 -}}
{{- $context := index . 1 -}}
{{- $last := base $context.Template.Name }}
{{- $wtf := $context.Template.Name | replace $last $name -}}
{{ include $wtf $context }}
{{- end -}}
602 changes: 602 additions & 0 deletions charts/tidb-cluster/templates/config/_alert-rules-config.tpl

Large diffs are not rendered by default.

78 changes: 78 additions & 0 deletions charts/tidb-cluster/templates/config/_drainer-config.tpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
# drainer Configuration.

# addr (i.e. 'host:port') to listen on for drainer connections
# will register this addr into etcd
# addr = "127.0.0.1:8249"

# the interval time (in seconds) of detect pumps' status
detect-interval = {{ .Values.binlog.drainer.detectInterval | default 10 }}

# drainer meta data directory path
data-dir = "/data"

# a comma separated list of PD endpoints
pd-urls = "http://{{ .Values.clusterName }}-pd:2379"

#[security]
# Path of file that contains list of trusted SSL CAs for connection with cluster components.
# ssl-ca = "/path/to/ca.pem"
# Path of file that contains X509 certificate in PEM format for connection with cluster components.
# ssl-cert = "/path/to/pump.pem"
# Path of file that contains X509 key in PEM format for connection with cluster components.
# ssl-key = "/path/to/pump-key.pem"

# syncer Configuration.
[syncer]

# disable sync these schema
ignore-schemas = "{{ .Values.binlog.drainer.ignoreSchemas | default "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql,test" }}"

# number of binlog events in a transaction batch
txn-batch = {{ .Values.binlog.drainer.txnBatch | default 1 }}

# work count to execute binlogs
worker-count = {{ .Values.binlog.drainer.workerCount | default 1 }}

disable-dispatch = {{ .Values.binlog.drainer.disableDispatch | default false }}

# safe mode will split update to delete and insert
safe-mode = {{ .Values.binlog.drainer.safeMode | default false }}

# downstream storage, equal to --dest-db-type
# valid values are "mysql", "pb", "tidb", "flash", "kafka"
db-type = "{{ .Values.binlog.drainer.destDBType }}"

##replicate-do-db priority over replicate-do-table if have same db name
##and we support regex expression , start with '~' declare use regex expression.
#
#replicate-do-db = ["~^b.*","s1"]
#[[syncer.replicate-do-table]]
#db-name ="test"
#tbl-name = "log"

#[[syncer.replicate-do-table]]
#db-name ="test"
#tbl-name = "~^a.*"

{{- if eq .Values.binlog.drainer.destDBType "pb" }}
# Uncomment this if you want to use pb or sql as db-type.
# Compress compresses output file, like pb and sql file. Now it supports "gzip" algorithm only.
# Values can be "gzip". Leave it empty to disable compression.
[syncer.to]
dir = "/data/pb"
compression = "gzip"
{{- end }}


{{- if eq .Values.binlog.drainer.destDBType "kafka" }}
# when db-type is kafka, you can uncomment this to config the down stream kafka, it will be the globle config kafka default
[syncer.to]
# only need config one of zookeeper-addrs and kafka-addrs, will get kafka address if zookeeper-addrs is configed.
{{- if .Values.binlog.drainer.kafka.zookeeperAddrs }}
zookeeper-addrs = {{ .Values.binlog.drainer.kafka.zookeeperAddrs }}
{{- end }}
{{- if .Values.binlog.drainer.kafka.kafkaAddrs }}
kafka-addrs = {{ .Values.binlog.drainer.kafka.kafkaAddrs }}
{{- end }}
kafka-version = {{ .Values.binlog.drainer.kafka.kafkaVersion | default "0.8.2.0" }}
{{- end }}
Loading