-
-
Notifications
You must be signed in to change notification settings - Fork 49
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
elasticsearch: release buffer properly to avoid memory leaks (#630)
* release buffer properly * integration - add docker compose for elastic and some docs * enable log in test * change default value * make linter happy * optimize code * update docs
- Loading branch information
Showing
16 changed files
with
444 additions
and
106 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,20 @@ | ||
|
||
# DNS-collector with Elastic and Kibana | ||
|
||
- Copy folder [./docs/_integration/elasticsearch] and start the docker stack: | ||
|
||
```bash | ||
sudo docker compose up -d | ||
``` | ||
|
||
- Go to kibana web interface through `http://127.0.0.1:5601` | ||
|
||
- Click on `Explore on my own` and `Discover` | ||
|
||
- Finally create index pattern `dnscollector` and choose `dnstap.timestamp-rfc33939ns` | ||
|
||
- Run DNScollector from source: | ||
|
||
```bash | ||
go run . -config docs/_integration/elasticsearch/config.yml | ||
``` |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,24 @@ | ||
|
||
global: | ||
trace: | ||
verbose: true | ||
|
||
multiplexer: | ||
collectors: | ||
- name: tap | ||
dnstap: | ||
listen-ip: 0.0.0.0 | ||
listen-port: 6000 | ||
chan-buffer-size: 4096 | ||
loggers: | ||
- name: elastic | ||
elasticsearch: | ||
server: "http://127.0.0.1:9200/" | ||
index: "dnscollector" | ||
chan-buffer-size: 4096 | ||
bulk-size: 12582912 | ||
flush-interval: 5 | ||
|
||
routes: | ||
- from: [ tap ] | ||
to: [ elastic ] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,24 @@ | ||
version: "3.8" | ||
services: | ||
elasticsearch: | ||
image: docker.elastic.co/elasticsearch/elasticsearch:8.12.2 | ||
container_name: elasticsearch | ||
restart: always | ||
environment: | ||
- discovery.type=single-node | ||
- xpack.security.enabled=false | ||
- xpack.security.enrollment.enabled=false | ||
volumes: | ||
- ./data:/usr/share/elasticsearch/data | ||
ports: | ||
- 9200:9200 | ||
kibana: | ||
container_name: kibana | ||
image: docker.elastic.co/kibana/kibana:8.12.2 | ||
restart: always | ||
environment: | ||
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200 | ||
ports: | ||
- 5601:5601 | ||
depends_on: | ||
- elasticsearch |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,34 @@ | ||
# Performance tuning | ||
|
||
All loggers and collectors are based on buffered channels. | ||
The size of these buffers can be configured with `chan-buffer-size`. | ||
If you encounter the following error message in your logs, it indicates that you need to increase the chan-buffer-size: | ||
|
||
```bash | ||
logger[elastic] buffer is full, 7855 packet(s) dropped | ||
``` | ||
|
||
## CPU usage | ||
|
||
The conversion of DNS logs to JSON, text, or PCAP can incur CPU costs. Here's a list ordered by ns/op. | ||
|
||
```bash | ||
./dnsutils$ go test -bench=. | ||
goos: linux | ||
goarch: amd64 | ||
pkg: github.com/dmachard/go-dnscollector/dnsutils | ||
cpu: Intel(R) Core(TM) i5-7200U CPU @ 2.50GHz | ||
BenchmarkDnsMessage_ToTextFormat-4 2600718 460.7 ns/op | ||
BenchmarkDnsMessage_ToPacketLayer-4 1171467 969.5 ns/op | ||
BenchmarkDnsMessage_ToDNSTap-4 993242 1130 ns/op | ||
BenchmarkDnsMessage_ToExtendedDNSTap-4 618400 1951 ns/op | ||
BenchmarkDnsMessage_ToJSON-4 190939 6584 ns/op | ||
BenchmarkDnsMessage_ToFlatJSON-4 19868 55533 ns/op | ||
``` | ||
|
||
## Memory usage | ||
|
||
The main sources of memory usage in DNS-collector are: | ||
|
||
- Buffered channels | ||
- Prometheus logger with LRU cache |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,131 @@ | ||
package loggers | ||
|
||
import ( | ||
"github.com/dmachard/go-dnscollector/dnsutils" | ||
"github.com/dmachard/go-dnscollector/pkgconfig" | ||
"github.com/dmachard/go-dnscollector/pkgutils" | ||
"github.com/dmachard/go-logger" | ||
) | ||
|
||
type DevNull struct { | ||
stopProcess chan bool | ||
doneProcess chan bool | ||
stopRun chan bool | ||
doneRun chan bool | ||
inputChan chan dnsutils.DNSMessage | ||
outputChan chan dnsutils.DNSMessage | ||
config *pkgconfig.Config | ||
configChan chan *pkgconfig.Config | ||
logger *logger.Logger | ||
name string | ||
RoutingHandler pkgutils.RoutingHandler | ||
} | ||
|
||
func NewDevNull(config *pkgconfig.Config, console *logger.Logger, name string) *DevNull { | ||
console.Info(pkgutils.PrefixLogLogger+"[%s] devnull - enabled", name) | ||
so := &DevNull{ | ||
stopProcess: make(chan bool), | ||
doneProcess: make(chan bool), | ||
stopRun: make(chan bool), | ||
doneRun: make(chan bool), | ||
inputChan: make(chan dnsutils.DNSMessage, config.Loggers.Stdout.ChannelBufferSize), | ||
outputChan: make(chan dnsutils.DNSMessage, config.Loggers.Stdout.ChannelBufferSize), | ||
logger: console, | ||
config: config, | ||
configChan: make(chan *pkgconfig.Config), | ||
name: name, | ||
RoutingHandler: pkgutils.NewRoutingHandler(config, console, name), | ||
} | ||
return so | ||
} | ||
|
||
func (so *DevNull) GetName() string { return so.name } | ||
|
||
func (so *DevNull) AddDroppedRoute(wrk pkgutils.Worker) { | ||
so.RoutingHandler.AddDroppedRoute(wrk) | ||
} | ||
|
||
func (so *DevNull) AddDefaultRoute(wrk pkgutils.Worker) { | ||
so.RoutingHandler.AddDefaultRoute(wrk) | ||
} | ||
|
||
func (so *DevNull) SetLoggers(loggers []pkgutils.Worker) {} | ||
|
||
func (so *DevNull) ReadConfig() {} | ||
|
||
func (so *DevNull) ReloadConfig(config *pkgconfig.Config) { | ||
so.LogInfo("reload configuration!") | ||
so.configChan <- config | ||
} | ||
|
||
func (so *DevNull) LogInfo(msg string, v ...interface{}) { | ||
so.logger.Info(pkgutils.PrefixLogLogger+"["+so.name+"] devnull - "+msg, v...) | ||
} | ||
|
||
func (so *DevNull) LogError(msg string, v ...interface{}) { | ||
so.logger.Error(pkgutils.PrefixLogLogger+"["+so.name+"] devnull - "+msg, v...) | ||
} | ||
|
||
func (so *DevNull) GetInputChannel() chan dnsutils.DNSMessage { | ||
return so.inputChan | ||
} | ||
|
||
func (so *DevNull) Stop() { | ||
so.LogInfo("stopping logger...") | ||
so.RoutingHandler.Stop() | ||
|
||
so.LogInfo("stopping to run...") | ||
so.stopRun <- true | ||
<-so.doneRun | ||
|
||
so.LogInfo("stopping to process...") | ||
so.stopProcess <- true | ||
<-so.doneProcess | ||
} | ||
|
||
func (so *DevNull) Run() { | ||
so.LogInfo("running in background...") | ||
|
||
// goroutine to process transformed dns messages | ||
go so.Process() | ||
|
||
// loop to process incoming messages | ||
RUN_LOOP: | ||
for { | ||
select { | ||
case <-so.stopRun: | ||
so.doneRun <- true | ||
break RUN_LOOP | ||
|
||
case _, opened := <-so.inputChan: | ||
if !opened { | ||
so.LogInfo("run: input channel closed!") | ||
return | ||
} | ||
|
||
// send to output channel | ||
// so.outputChan <- dm | ||
} | ||
} | ||
so.LogInfo("run terminated") | ||
} | ||
|
||
func (so *DevNull) Process() { | ||
so.LogInfo("ready to process") | ||
PROCESS_LOOP: | ||
for { | ||
select { | ||
case <-so.stopProcess: | ||
so.doneProcess <- true | ||
break PROCESS_LOOP | ||
|
||
case _, opened := <-so.outputChan: | ||
if !opened { | ||
so.LogInfo("process: output channel closed!") | ||
return | ||
} | ||
|
||
} | ||
} | ||
so.LogInfo("processing terminated") | ||
} |
Oops, something went wrong.