forked from twendt/docker-influxdb
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.toml
178 lines (142 loc) · 7.31 KB
/
config.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
# Welcome to the InfluxDB configuration file.
# If hostname (on the OS) doesn't return a name that can be resolved by the other
# systems in the cluster, you'll have to set the hostname to an IP or something
# that can be resolved here.
# hostname = ""
bind-address = "0.0.0.0"
[logging]
# logging level can be one of "debug", "info", "warn" or "error"
level = "info"
file = "/opt/influxdb/shared/log.txt" # stdout to log to standard out
# Configure the admin server
[admin]
port = 8083 # binding is disabled if the port isn't set
assets = "/opt/influxdb/current/admin"
# Configure the http api
[api]
port = 8086 # binding is disabled if the port isn't set
# ssl-port = 8084 # Ssl support is enabled if you set a port and cert
# ssl-cert = "/cert.pem"
# connections will timeout after this amount of time. Ensures that clients that misbehave
# and keep alive connections they don't use won't end up connection a million times.
# However, if a request is taking longer than this to complete, could be a problem.
read-timeout = "5s"
[input_plugins]
# Configure the graphite api
[input_plugins.graphite]
enabled = false
# port = 2003
# database = "" # store graphite data in this database
# udp_enabled = true # enable udp interface on the same port as the tcp interface
[input_plugins.udp]
enabled = false
# port = 4444
# database = ""
# Raft configuration
[raft]
# The raft port should be open between all servers in a cluster.
# However, this port shouldn't be accessible from the internet.
port = 8090
# Where the raft logs are stored. The user running InfluxDB will need read/write access.
dir = "/data/raft"
# election-timeout = "1s"
[storage]
dir = "/data/db"
# How many requests to potentially buffer in memory. If the buffer gets filled then writes
# will still be logged and once the local storage has caught up (or compacted) the writes
# will be replayed from the WAL
write-buffer-size = 10000
[cluster]
# A comma separated list of servers to seed
# this server. this is only relevant when the
# server is joining a new cluster. Otherwise
# the server will use the list of known servers
# prior to shutting down. Any server can be pointed to
# as a seed. It will find the Raft leader automatically.
# Here's an example. Note that the port on the host is the same as the raft port.
# seed-servers = ["hosta:8090","hostb:8090"]
# Replication happens over a TCP connection with a Protobuf protocol.
# This port should be reachable between all servers in a cluster.
# However, this port shouldn't be accessible from the internet.
protobuf_port = 8099
protobuf_timeout = "2s" # the write timeout on the protobuf conn any duration parseable by time.ParseDuration
protobuf_heartbeat = "200ms" # the heartbeat interval between the servers. must be parseable by time.ParseDuration
protobuf_min_backoff = "1s" # the minimum backoff after a failed heartbeat attempt
protobuf_max_backoff = "10s" # the maxmimum backoff after a failed heartbeat attempt
# How many write requests to potentially buffer in memory per server. If the buffer gets filled then writes
# will still be logged and once the server has caught up (or come back online) the writes
# will be replayed from the WAL
write-buffer-size = 10000
# the maximum number of responses to buffer from remote nodes, if the
# expected number of responses exceed this number then querying will
# happen sequentially and the buffer size will be limited to this
# number
max-response-buffer-size = 100
# When queries get distributed out to shards, they go in parallel. This means that results can get buffered
# in memory since results will come in any order, but have to be processed in the correct time order.
# Setting this higher will give better performance, but you'll need more memory. Setting this to 1 will ensure
# that you don't need to buffer in memory, but you won't get the best performance.
concurrent-shard-query-limit = 10
[leveldb]
# Maximum mmap open files, this will affect the virtual memory used by
# the process
max-open-files = 40
# LRU cache size, LRU is used by leveldb to store contents of the
# uncompressed sstables. You can use `m` or `g` prefix for megabytes
# and gigabytes, respectively.
lru-cache-size = "200m"
# The default setting on this is 0, which means unlimited. Set this to something if you want to
# limit the max number of open files. max-open-files is per shard so this * that will be max.
max-open-shards = 0
# The default setting is 100. This option tells how many points will be fetched from LevelDb before
# they get flushed into backend.
point-batch-size = 100
# The number of points to batch in memory before writing them to leveldb. Lowering this number will
# reduce the memory usage, but will result in slower writes.
write-batch-size = 5000000
# These options specify how data is sharded across the cluster. There are two
# shard configurations that have the same knobs: short term and long term.
# Any series that begins with a capital letter like Exceptions will be written
# into the long term storage. Any series beginning with a lower case letter
# like exceptions will be written into short term. The idea being that you
# can write high precision data into short term and drop it after a couple
# of days. Meanwhile, continuous queries can run downsampling on the short term
# data and write into the long term area.
[sharding]
# how many servers in the cluster should have a copy of each shard.
# this will give you high availability and scalability on queries
replication-factor = 1
[sharding.short-term]
# each shard will have this period of time. Note that it's best to have
# group by time() intervals on all queries be < than this setting. If they are
# then the aggregate is calculated locally. Otherwise, all that data gets sent
# over the network when doing a query.
duration = "7d"
# split will determine how many shards to split each duration into. For example,
# if we created a shard for 2014-02-10 and split was set to 2. Then two shards
# would be created that have the data for 2014-02-10. By default, data will
# be split into those two shards deterministically by hashing the (database, serise)
# tuple. That means that data for a given series will be written to a single shard
# making querying efficient. That can be overridden with the next option.
split = 1
# You can override the split behavior to have the data for series that match a
# given regex be randomly distributed across the shards for a given interval.
# You can use this if you have a hot spot for a given time series writing more
# data than a single server can handle. Most people won't have to resort to this
# option. Also note that using this option means that queries will have to send
# all data over the network so they won't be as efficient.
# split-random = "/^hf.*/"
[sharding.long-term]
duration = "30d"
split = 1
# split-random = "/^Hf.*/"
[wal]
dir = "/data/wal"
flush-after = 1000 # the number of writes after which wal will be flushed, 0 for flushing on every write
bookmark-after = 1000 # the number of writes after which a bookmark will be created
# the number of writes after which an index entry is created pointing
# to the offset of the first request, default to 1k
index-after = 1000
# the number of requests per one log file, if new requests came in a
# new log file will be created
requests-per-logfile = 10000