forked from pingcap/tidb-binlog
-
Notifications
You must be signed in to change notification settings - Fork 0
/
drainer.toml
172 lines (143 loc) · 6.59 KB
/
drainer.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
# drainer Configuration.
# addr (i.e. 'host:port') to listen on for drainer connections
# will register this addr into etcd
addr = "127.0.0.1:8249"
# addr(i.e. 'host:port') to advertise to the public
advertise-addr = ""
# the interval time (in seconds) of detect pumps' status
detect-interval = 10
# drainer meta data directory path
data-dir = "data.drainer"
# a comma separated list of PD endpoints
pd-urls = "http://127.0.0.1:2379"
# Use the specified compressor to compress payload between pump and drainer
compressor = ""
#[security]
# Path of file that contains list of trusted SSL CAs for connection with cluster components.
# ssl-ca = "/path/to/ca.pem"
# Path of file that contains X509 certificate in PEM format for connection with cluster components.
# ssl-cert = "/path/to/pump.pem"
# Path of file that contains X509 key in PEM format for connection with cluster components.
# ssl-key = "/path/to/pump-key.pem"
# syncer Configuration.
[syncer]
# Assume the upstream sql-mode.
# If this is setted , will use the same sql-mode to parse DDL statment, and set the same sql-mode at downstream when db-type is mysql.
# If this is not setted, it will not set any sql-mode.
# sql-mode = "STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION"
# number of binlog events in a transaction batch
txn-batch = 20
# sync ddl to downstream db or not
sync-ddl = true
# This variable works in dual-a. if it is false, the upstream data will all be synchronized to the downstream, except for the filtered table.
# If it is true, the channel value is set at the same time, and the upstream starts with the mark table ID updated, and the channel ID is the same as its channel ID.
# this part of data will not be synchronized to the downstream. Therefore, in dual-a scenario,both sides Channel id also needs to be set to the same value
loopback-control = false
# When loopback control is turned on, the channel ID will work.
# In the dual-a scenario, the channel ID synchronized from the downstream to the upstream and the channel ID synchronized from
# the upstream to the downstream need to be set to the same value to avoid loopback synchronization
channel-id = 1
# work count to execute binlogs
# if the latency between drainer and downstream(mysql or tidb) are too high, you might want to increase this
# to get higher throughput by higher concurrent write to the downstream
worker-count = 16
# enable-dispatch = true
# safe mode will split update to delete and insert
safe-mode = false
# downstream storage, equal to --dest-db-type
# valid values are "mysql", "file", "tidb", "kafka"
db-type = "mysql"
# ignore syncing the txn with specified commit ts to downstream
ignore-txn-commit-ts = []
# disable sync these schema
ignore-schemas = "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql"
##replicate-do-db priority over replicate-do-table if have same db name
##and we support regex expression , start with '~' declare use regex expression.
#
#replicate-do-db = ["~^b.*","s1"]
[syncer.relay]
# directory of relay logs. Empty string indicates disabling relay log.
# relay log works only if the downstream is TiDB/MySQL.
# log-dir = ""
# max file size of each relay log
# max-file-size = 10485760
#[[syncer.replicate-do-table]]
#db-name ="test"
#tbl-name = "log"
#[[syncer.replicate-do-table]]
#db-name ="test"
#tbl-name = "~^a.*"
# disable sync these table
#[[syncer.ignore-table]]
#db-name = "test"
#tbl-name = "log"
# the downstream mysql protocol database
[syncer.to]
host = "127.0.0.1"
user = "root"
password = ""
# if encrypted_password is not empty, password will be ignored.
encrypted_password = ""
port = 3306
# 1: SyncFullColumn, 2: SyncPartialColumn
# when setting SyncPartialColumn drainer will allow the downstream schema
# having more or less column numbers and relax sql mode by removing STRICT_TRANS_TABLES.
# sync-mode = 1
#
# Uncomment this part if you need TLS to connecting downstream MySQL/TiDB.
# You can only specified only `ssl-ca` if there is no client certificate and don't need server to authenticate client.
# [syncer.to.security]
# Path of file that contains list of trusted SSL CAs.
# ssl-ca = "/path/to/ca.pem"
# Path of file that contains X509 certificate in PEM format.
# ssl-cert = "/path/to/drainer.pem"
# Path of file that contains X509 key in PEM format.
# ssl-key = "/path/to/drainer-key.pem"
# The common name which is allowed to connection with cluster components.
# cert-allowed-cn = ["binlog"]
[syncer.to.checkpoint]
# only support mysql or tidb now, you can uncomment this to control where the checkpoint is saved.
# the default way how checkpoint is saved according to db-type is:
# mysql/tidb -> the according downstream mysql/tidb
# file/kafka -> file in `data-dir`
# type = "mysql"
# you can uncomment this to change the database to save checkpoint when the checkpoint type is mysql or tidb
# schema = "tidb_binlog"
# host = "127.0.0.1"
# user = "root"
# if encrypted_password is not empty, password will be ignored.
# encrypted_password = ""
# password = ""
# port = 3306
# [syncer.to.checkpoint.security]
# Path of file that contains list of trusted SSL CAs.
# ssl-ca = "/path/to/ca.pem"
# Path of file that contains X509 certificate in PEM format.
# ssl-cert = "/path/to/drainer.pem"
# Path of file that contains X509 key in PEM format.
# ssl-key = "/path/to/drainer-key.pem"
# The common name which is allowed to connection with cluster components.
# cert-allowed-cn = ["binlog"]
# Uncomment this if you want to use file as db-type.
#[syncer.to]
# directory to save binlog file, default same as data-dir(save checkpoint file) if this is not configured.
# dir = "data.drainer"
#
# an integer value to control expiry date of the saved binlog data, indicates for how long (in days) the binlog data would be stored.
# the default value is 0. If this value is set to less or equal to 0, drainer will never gc saved binlog files.
# Otherwise, drainer will check each saved binlog per hour and erase the binlogs whose last mod time is retention-time * days ago.
# However, drainer will never gc the newest binlog file.
# retention-time = 7
# when db-type is kafka, you can uncomment this to config the down stream kafka, it will be the globle config kafka default
#[syncer.to]
# only need config one of zookeeper-addrs and kafka-addrs, will get kafka address if zookeeper-addrs is configed.
# zookeeper-addrs = "127.0.0.1:2181"
# kafka-addrs = "127.0.0.1:9092"
# kafka-version = "0.8.2.0"
# kafka-max-messages = 1024
# kafka-max-message-size = 1073741824 # configure max kafka **client** message size
# kafka-client-id = "tidb_binlog"
#
# the topic name drainer will push msg, the default name is <cluster-id>_obinlog
# be careful don't use the same name if run multi drainer instances
# topic-name = ""