diff --git a/README.md b/README.md
index 48459d79..fd5fd054 100644
--- a/README.md
+++ b/README.md
@@ -1,17 +1,19 @@
+
+
+
+
+
+
-![DNS-collector](./docs/dns-collector_logo.png)
-
-[![Go Report Card](https://goreportcard.com/badge/github.com/dmachard/go-dns-collector)](https://goreportcard.com/report/dmachard/go-dns-collector)
-![Go version](https://img.shields.io/badge/go%20version-min%201.20-blue)
-![Go tests](https://img.shields.io/badge/go%20tests-377-green)
-![Go lines](https://img.shields.io/badge/go%20lines-36222-red)
-![Go Tests](https://github.com/dmachard/go-dns-collector/actions/workflows/testing-go.yml/badge.svg)
-![Github Actions](https://github.com/dmachard/go-dns-collector/actions/workflows/testing-dnstap.yml/badge.svg)
-![Github Actions PDNS](https://github.com/dmachard/go-dns-collector/actions/workflows/testing-powerdns.yml/badge.svg)
+
+
+
-*NOTE: The code before version 1.x is considered beta quality and is subject to breaking changes.*
+
+
+
-`DNS-collector` acts as a passive high speed **ingestor, aggregator and distributor** for your DNS logs with usage indicators and security analysis, written in **Golang**. The DNS traffic can be collected and aggregated from simultaneously [sources](./docs/collectors.md) like DNStap streams, network interface or log files and relays it to multiple other [listeners](./docs/loggers.md) with some [transformations](./docs/transformers.md) on it ([traffic filtering](./docs/transformers.md#dns-filtering), [user privacy](./docs/transformers.md#user-privacy), ...).
+`DNS-collector` acts as a passive high speed **ingestor** with **pipelining** support for your DNS logs, written in **Golang**. It allows enhancing your DNS logs by adding metadata, extracting usage patterns, and facilitating security analysis. The DNS traffic can be collected and aggregated from simultaneously [sources](./docs/collectors.md) like DNStap streams, network interface or log files and relays it to multiple other [listeners](./docs/loggers.md) with some [transformations](./docs/transformers.md) on it ([traffic filtering](./docs/transformers.md#dns-filtering), [user privacy](./docs/transformers.md#user-privacy), ...).
> Additionally, DNS-collector also support
>
@@ -20,17 +22,13 @@
> - IPv4/v6 defragmentation and TCP reassembly
> - Nanoseconds in timestamps
-Run
-
-
-
-
+*NOTE: The code before version 1.x is considered beta quality and is subject to breaking changes.*
-Multiplexer
+## Features
-![overview](./docs/_images/overview.png)
+- **[Pipelining](./docs/running_mode.md)**
-## Features
+ [![overview](./docs/_images/overview.png)](./docs/running_mode.md)
- **[Collectors](./docs/collectors.md)**
@@ -73,7 +71,7 @@ Multiplexer
- Traffic [Filtering](docs/transformers/transform_trafficfiltering.md) and [Reducer](docs/transformers/transform_trafficreducer.md)
- Latency [Computing](docs/transformers/transform_latency.md)
- - Apply user [Privacy](docs/transformers/transform_userprivacy.md)
+ - Apply [User Privacy](docs/transformers/transform_userprivacy.md)
- [Normalize](docs/transformers/transform_normalize.md) DNS messages
- Add [Geographical](docs/transformers/transform_geoip.md) metadata
- Various data [Extractor](docs/transformers/transform_dataextractor.md)
@@ -87,6 +85,8 @@ Download the latest [`release`](https://github.com/dmachard/go-dns-collector/rel
./go-dnscollector -config config.yml
```
+![run](docs/_images/terminal.gif)
+
If you prefer run it from docker, follow this [guide](./docs/docker.md).
## Configuration
diff --git a/collectors/file_ingestor.go b/collectors/file_ingestor.go
index d2b6b697..256c09a8 100644
--- a/collectors/file_ingestor.go
+++ b/collectors/file_ingestor.go
@@ -138,7 +138,7 @@ func (c *FileIngestor) ProcessFile(filePath string) {
go c.ProcessPcap(filePath)
}
case pkgconfig.ModeDNSTap:
- // processs dnstap
+ // process dnstap
if filepath.Ext(filePath) == ".fstrm" {
c.LogInfo("file ready to process %s", filePath)
go c.ProcessDnstap(filePath)
@@ -412,7 +412,7 @@ func (c *FileIngestor) Run() {
go c.ProcessPcap(fn)
}
case pkgconfig.ModeDNSTap:
- // processs dnstap
+ // process dnstap
if filepath.Ext(fn) == ".fstrm" {
go c.ProcessDnstap(fn)
}
diff --git a/dnsutils/message.go b/dnsutils/message.go
index 8a13b850..11f03451 100644
--- a/dnsutils/message.go
+++ b/dnsutils/message.go
@@ -178,7 +178,7 @@ type TransformExtracted struct {
}
type TransformReducer struct {
- Occurences int `json:"occurences" msgpack:"occurences"`
+ Occurrences int `json:"occurrences" msgpack:"occurrences"`
CumulativeLength int `json:"cumulative-length" msgpack:"cumulative-length"`
}
@@ -204,7 +204,7 @@ type TransformML struct {
ConsecutiveDigits int `json:"consecutive-digits" msgpack:"consecutive-digits"`
ConsecutiveConsonants int `json:"consecutive-consonants" msgpack:"consecutive-consonants"`
Size int `json:"size" msgpack:"size"`
- Occurences int `json:"occurences" msgpack:"occurences"`
+ Occurrences int `json:"occurrences" msgpack:"occurrences"`
UncommonQtypes int `json:"uncommon-qtypes" msgpack:"uncommon-qtypes"`
}
@@ -421,8 +421,8 @@ func (dm *DNSMessage) handleReducerDirectives(directives []string, s *strings.Bu
s.WriteString("-")
} else {
switch directive := directives[0]; {
- case directive == "reducer-occurences":
- s.WriteString(strconv.Itoa(dm.Reducer.Occurences))
+ case directive == "reducer-occurrences":
+ s.WriteString(strconv.Itoa(dm.Reducer.Occurrences))
case directive == "reducer-cumulative-length":
s.WriteString(strconv.Itoa(dm.Reducer.CumulativeLength))
}
@@ -468,8 +468,8 @@ func (dm *DNSMessage) handleMachineLearningDirectives(directives []string, s *st
s.WriteString(strconv.Itoa(dm.MachineLearning.ConsecutiveConsonants))
case directive == "ml-size":
s.WriteString(strconv.Itoa(dm.MachineLearning.Size))
- case directive == "ml-occurences":
- s.WriteString(strconv.Itoa(dm.MachineLearning.Occurences))
+ case directive == "ml-occurrences":
+ s.WriteString(strconv.Itoa(dm.MachineLearning.Occurrences))
case directive == "ml-uncommon-qtypes":
s.WriteString(strconv.Itoa(dm.MachineLearning.UncommonQtypes))
}
diff --git a/dnsutils/message_test.go b/dnsutils/message_test.go
index d854c020..40365e97 100644
--- a/dnsutils/message_test.go
+++ b/dnsutils/message_test.go
@@ -156,10 +156,10 @@ func TestDnsMessage_Json_Transforms_Reference(t *testing.T) {
},
{
transform: "reducer",
- dmRef: DNSMessage{Reducer: &TransformReducer{Occurences: 10, CumulativeLength: 47}},
+ dmRef: DNSMessage{Reducer: &TransformReducer{Occurrences: 10, CumulativeLength: 47}},
jsonRef: `{
"reducer": {
- "occurences": 10,
+ "occurrences": 10,
"cumulative-length": 47
}
}`,
@@ -589,14 +589,14 @@ func TestDnsMessage_TextFormat_Directives_Reducer(t *testing.T) {
}{
{
name: "undefined",
- format: "reducer-occurences",
+ format: "reducer-occurrences",
dm: DNSMessage{},
expected: "-",
},
{
name: "default",
- format: "reducer-occurences",
- dm: DNSMessage{Reducer: &TransformReducer{Occurences: 1}},
+ format: "reducer-occurrences",
+ dm: DNSMessage{Reducer: &TransformReducer{Occurrences: 1}},
expected: "1",
},
}
diff --git a/docs/_images/overview.png b/docs/_images/overview.png
index 158e92c9..567c3ce6 100644
Binary files a/docs/_images/overview.png and b/docs/_images/overview.png differ
diff --git a/docs/collectors.md b/docs/collectors.md
index 3bd916c2..312c89e2 100644
--- a/docs/collectors.md
+++ b/docs/collectors.md
@@ -7,4 +7,4 @@
| [Tail](collectors/collector_tail.md) | Tail on plain text file |
| [XDP Sniffer](collectors/collector_xdp.md) | Live capture on network interface with XDP |
| [AF_PACKET Sniffer](collectors/collector_afpacket.md) | Live capture on network interface with AF_PACKET socket |
-| [File Ingestor](collectors/collector_file.md) | File ingestor like pcap |
+| [File Ingestor](collectors/collector_fileingestor.md) | File ingestor like pcap |
diff --git a/docs/configuration.md b/docs/configuration.md
index 8d1778fd..5e521cf4 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -2,17 +2,14 @@
The configuration of DNS-collector is done through one yaml file named [`config.yml`](https://github.com/dmachard/go-dnscollector/blob/main/config.yml). When the DNS-collector starts, it will look for the config.yml from the current working directory.
-A typically configuration would have one or more collector to receive DNS traffic, and severals loggers to process the
-incoming traffics. You can take a look to the list of config [`examples`](examples.md).
+A typically configuration in [multiplexer](./docs/running_mode.md) mode would have one or more collector to receive DNS traffic, and severals loggers to process the incoming traffics. You can take a look to the list of config [`examples`](examples.md).
+
+You can find the global settings below
- [Global](#global)
- [Trace](#trace)
- [Custom text format](#custom-text-format)
- [Server identity](#server-identity)
-- [Multiplexer](#multiplexer)
- - [Collectors](#collectors)
- - [Loggers](#loggers)
- - [Routes](#routes)
## Global
@@ -118,48 +115,3 @@ Output example:
2023-04-08T18:27:29.278929Z unbound FORWARDER_RESPONSE NOERROR 0.0.0.0 20817 IPv4 UDP 54b google.fr A 0.000000
2023-04-08T18:27:29.279039Z unbound CLIENT_RESPONSE NOERROR 127.0.0.1 39028 IPv4 UDP 54b google.fr A 0.000000
```
-
-## Multiplexer
-
-The dns collector can be configured with multiple loggers and collectors at the same time.
-
-You must defined the list of
-
-- `collectors`: list of running inputs
-- `loggers`: list of running outputs
-- `routes`: routing definition
-
-### Collectors
-
-List of supported [collectors](./collectors.md)
-
-```yaml
-multiplexer:
- collectors:
- - name:
- .....
-```
-
-### Loggers
-
-List of supported [loggers](./loggers.md)
-
-```yaml
-multiplexer:
- loggers:
- - name:
- ...
-```
-
-### Routes
-
-Then defines the routing to use between all of them according to the name.
-You can connect one collector to multiple loggers and you can also
-connect multiple collectors to the same logger.
-
-```yaml
-multiplexer:
- routes: ...
- - from: [ list of collectors by name ]
- to: [ list of loggers by name ]
-```
diff --git a/docs/loggers/logger_kafka.md b/docs/loggers/logger_kafka.md
index 83895a6d..027fb8a0 100644
--- a/docs/loggers/logger_kafka.md
+++ b/docs/loggers/logger_kafka.md
@@ -1,6 +1,6 @@
# Logger: Kafka Producer
-Kafka producer
+Kafka producer, based on [kafka-go](https://github.com/segmentio/kafka-go) library.
Options:
diff --git a/docs/overview.drawio b/docs/overview.drawio
index ec5c5664..91aae95c 100644
--- a/docs/overview.drawio
+++ b/docs/overview.drawio
@@ -1 +1,215 @@
-7Vxte5o6GP41XtfOB7mAhAAftbbdetquXdut6zcqUdmQOMCq+/UnKCgkEbCC6HbcPpAn4e3O/byStAXOxvNL35qMboiN3ZYq2/MW6LVUVYGq2or+y/ZiJdENtBIMfceOB20ED85vHAvlWDp1bBxkBoaEuKEzyQr7xPNwP8zILN8ns+ywAXGzd51YQ8wJHvqWy0u/OXY4WkkNTd7IP2JnOErurMhxz9hKBseCYGTZZJYSgfMWOPMJCVdH4/kZdiPwElxW511s6V0/mI+9sMwJ3d/a41gNXifP3249w+pc/jDP2vFV3ix3Gr9w/LDhIkHAJ1PPxtFF5BbozkZOiB8mVj/qndE5p7JROHZpS6GHQeiTn/iMuMSnEo94dFh34LhuImqpYKBF/yI58cKUfPWjcv7NksfEfojnKVH8ppeYjHHoL+iQuFeFMeoLpj3bTKKWcGyUmkDVjIVWTJzh+tobbOlBDO8OUINdoFaKoWZQtS1sDPrbpsDy+7FuyRUBjMwswJoqaRzESnLhDMRIT4ZWDjIUgIxceuPuKz0YRgcUGZdaCuIHSRe907qXmxOKR7gjxxPMXWfo0WafooypvBuh61DT0ok7xo5tR7cRznRW7SqYMSDDEjMGRTpRl0po9arEYDBQ+4dTCaQwNkfTJVWgE0AEcY06gYp14poMh/hvUwikAUnXykzZQZVC56bL9oKQQlC9Y67CDxhIYjA0eFerr01NGkJYF4QmByEl+IVDgTtKDBGEEjAyIAJqD+T0D/GYCkgJNSmxoZWDqvCx4g29sNMPTgZVITVFMKqSXBeMyYuk4MI2TTniJvHDERkSz3LPN1LG9m3GXBMyiWH8gcNwETs0axqSLMgURn/xHJ+/bHyPGpGxi9u9ebq3t4hb22L37KzRHrT8rXuSZEnNm8zQ8oc4LLaFEUC5U+5j1wqdt2zOJpq6+NQ74tBH2RgxxnMDmbFMAZn6fRyftSFAx/etRWrYJBoQlL9Pkmxs+LS64pazNeYpdYajKzi5p1zSdA1WKeZqbfg6QzPl5WN/fP6Enr/2FNiG+jExt4i4PA1tKxitI8mocWeFNA7xlhJVVvKYuiLAStQGi5cviMwn326unPmN3VtcddoCRgtBBEotlN6VicjIcgnqME2ZXcdnmfsOwjmPn0ZXvblzLS80veM+W8PZXODGkwDVdt6S+DS0HDcVt6Z6jtMt8QET0DXJ1FK+ng9AxfETqCRn0D7fvBg3SIbk6fun6+nLm2XpgrqQJEmnAihEh4xAvz/ejq5/Gvc/Ot6se/Xr7N/Lp0mScx3WVHLpL0aC9DeqFenmq1zCSJYyh8L3ByUdvHkU1lAz9SyBZKaUWjAe7OTHkW5KwEz9svdmOVmdV8+bqXwjezONKu4untO0fYut5c75Qqah4w3XtQafHVultWapb/Tx9spPBUYHsrEY1EXlT1mQXBh6TTYHHlN4VphY4LkTPicXocepqI62NidFje0xHVsE17BhQ5HJM9RXsExPStm0vGQkbdPyNKphmwZlJsoosGnc+LjAX1WEJ4RKa4Sy72BeM641Lyo+FRqyqbWp5dMwf/zeNJzPehe3C3LRbxv3ny+n7c78K27GcjZDqbxo/1QoJTO5qFFk2Zjx6i7RGmC+JK89f0FtiH8MxFxIqS3QE5K8EVtbRflwW4ihF4UY5RRCVKVReYVosj6pIMY3G6qk6JuCgVKKkdv1bCtnVV1nixWKyhYdauZtI1XHWsvehfQTZM95oUDt9NORZMo0UYVIRYapyIBx0QZLiarq5RpruZV8urLj5Z3q6/Q1VbY0h+pju7jI3uLT8X0S44Kidy5Py6fDCoAsdKmKWyojNgQJMaggIRZiKVrcxpUp7sgM+73bh9J1DdsLbCcID1PXOND0qfz0GaDZydtpbdDxIMkrAgB8NfqgSKockk/eawTdSQDKUxPAKPppFlN+kc5JgAlYzw0k2WgWSb7+3HVOhZpZNKHcsMlMEsrjqAA3urKkMMQWVNHySiP1Z3iGpGcXK0FTkRQt9ZmIoU1VMTbYrZpSML4wxqYJJdjpPWsOuI0TteScW4SoeWPOr9jYe9lAY2EG1Bs254DPXx4c7+eeyy33WQVVHk+dXS+kaUXrWQ+KLWyk5F/fmjTxS6q8k8td2ttwYZ8lDYhZss0VFYzf+1tRLqgprbzzo6uO8PQkVVM1jkw1m/koXE0UW2IBwjuj2L2WWR2Jgms6o7AFsSY3XlZaWxWcPxuJ77bzlzukRTsJuM8t63KEud5GVH3M+vleH6tf2h314+zX+IXc2bOuKajoZOqUKdVBv6bR/uLlVtt2sCR/hw4wJ/NVwTLuZkuY0fB9rvPoW14wIP44vaVsddH8WulRvcWHO995s/qU4XSaZBrJ/pNb9C3zfqxhc11nEuBi13Do9W1AMRnNMwXVfCTwC1UsqRWS/ii+X++9SOO9fqZSH5G3g6FpF2EyWRj7Vw2Kxus5y9YKXQRkd3Xu5CL0rWt6NaMu/5A3laewel+wS4/mipylEe7Sq2T7g9DWiMr7R+Wa3u1gT8UBcRuODLiOspryQEl9/Q+kxVG9xQdKNpr3LjcP/HWRF0f8ZPViY6wvWEbRPF/+eGPIRuNHYQz/3CT0ZGnRuKko2Ez2PycO7j5qTdxpc/P361bJy+avAILz/wA=
\ No newline at end of file
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/running_mode.md b/docs/running_mode.md
new file mode 100644
index 00000000..df991363
--- /dev/null
+++ b/docs/running_mode.md
@@ -0,0 +1,56 @@
+# DNS-collector - Running mode
+
+- [Multiplexer](#multiplexer)
+ - [Collectors](#collectors)
+ - [Loggers](#loggers)
+ - [Routes](#routes)
+
+## Pipelining
+
+> EXPERIMENTAL
+
+## Multiplexer
+
+The dns collector can be configured with multiple loggers and collectors at the same time.
+
+You must defined the list of
+
+- `collectors`: list of running inputs
+- `loggers`: list of running outputs
+- `routes`: routing definition
+
+### Collectors
+
+List of supported [collectors](./collectors.md)
+
+```yaml
+multiplexer:
+ collectors:
+ - name:
+ .....
+```
+
+### Loggers
+
+List of supported [loggers](./loggers.md)
+
+```yaml
+multiplexer:
+ loggers:
+ - name:
+ ...
+```
+
+### Routes
+
+Then defines the routing to use between all of them according to the name.
+You can connect one collector to multiple loggers and you can also
+connect multiple collectors to the same logger.
+
+```yaml
+multiplexer:
+ routes: ...
+ - from: [ list of collectors by name ]
+ to: [ list of loggers by name ]
+```
+
diff --git a/docs/transformers/transform_trafficreducer.md b/docs/transformers/transform_trafficreducer.md
index 1599c2f2..82dab3cc 100644
--- a/docs/transformers/transform_trafficreducer.md
+++ b/docs/transformers/transform_trafficreducer.md
@@ -30,8 +30,8 @@ transforms:
Specific text directive(s) available for the text format:
-- `reducer-occurences`: display the number of detected duplication
-- `cumulative-length`: sum of the length of each occurences
+- `reducer-occurrences`: display the number of detected duplication
+- `cumulative-length`: sum of the length of each occurrences
When the feature is enabled, the following json field are populated in your DNS message:
@@ -40,7 +40,7 @@ Example:
```json
{
"reducer": {
- "occurences": 1,
+ "occurrences": 1,
"cumulative-length": 47
}
}
diff --git a/loggers/dnstapclient.go b/loggers/dnstapclient.go
index 305b55ec..ee569372 100644
--- a/loggers/dnstapclient.go
+++ b/loggers/dnstapclient.go
@@ -200,7 +200,7 @@ func (ds *DnstapSender) ConnectToRemote() {
// block until framestream is ready
ds.transportReady <- true
- // block until an error occured, need to reconnect
+ // block until an error occurred, need to reconnect
ds.transportReconnect <- true
}
}
diff --git a/loggers/fluentd.go b/loggers/fluentd.go
index 1f24314a..3553ed5c 100644
--- a/loggers/fluentd.go
+++ b/loggers/fluentd.go
@@ -213,7 +213,7 @@ func (fc *FluentdClient) ConnectToRemote() {
// block until framestream is ready
fc.transportReady <- true
- // block until an error occured, need to reconnect
+ // block until an error occurred, need to reconnect
fc.transportReconnect <- true
}
}
diff --git a/loggers/prometheus.go b/loggers/prometheus.go
index db5ea579..93f0fef5 100644
--- a/loggers/prometheus.go
+++ b/loggers/prometheus.go
@@ -32,7 +32,7 @@ var metricNameRegex = regexp.MustCompile(`_*[^0-9A-Za-z_]+_*`)
/*
This is the list of available label values selectors.
-Configuration may specifiy a list of lables to use for metrics.
+Configuration may specify a list of lables to use for metrics.
Any label in this catalogueSelectors can be specidied in config (prometheus-labels stanza)
*/
var catalogueSelectors map[string]func(*dnsutils.DNSMessage) string = map[string]func(*dnsutils.DNSMessage) string{
@@ -93,7 +93,7 @@ type PrometheusCountersCatalogue interface {
// This type represents a set of counters for a unique set of label name=value pairs.
// By default, we create a set per setream_id for backward compatibility
-// However, we can allow slicing and dicing data using more dimentions.
+// However, we can allow slicing and dicing data using more dimensions.
// Each CounterSet is registered with Prometheus collection independently (wrapping label values)
type PrometheusCountersSet struct {
prom *Prometheus
@@ -742,7 +742,7 @@ func (c *PromCounterCatalogueContainer) GetCountersSet(dm *dnsutils.DNSMessage)
return c.stats[lbl].GetCountersSet(dm)
}
-// This function checks the configuration, to determine which label dimentions were requested
+// This function checks the configuration, to determine which label dimensions were requested
// by configuration, and returns correct implementation of Catalogue.
func CreateSystemCatalogue(o *Prometheus) ([]string, *PromCounterCatalogueContainer) {
lbls := o.config.Loggers.Prometheus.LabelsList
@@ -1322,7 +1322,7 @@ PROCESS_LOOP:
}
/*
-This is an implementation of variadic dimentions map of label values.
+This is an implementation of variadic dimensions map of label values.
Having nested structure offers the fastest operations, compared to super-flexibile approach that prom client
uses with arbitrary set of labels.
diff --git a/loggers/prometheus_test.go b/loggers/prometheus_test.go
index e44d69de..db61794c 100644
--- a/loggers/prometheus_test.go
+++ b/loggers/prometheus_test.go
@@ -171,7 +171,7 @@ func TestPrometheus_EPS_Counters(t *testing.T) {
noErrorRecord := dnsutils.GetFakeDNSMessage()
noErrorRecord.DNS.Type = dnsutils.DNSQuery
g.Record(noErrorRecord)
- // Zero second elapsed, initalize EPS
+ // Zero second elapsed, initialize EPS
g.ComputeEventsPerSecond()
mf := getMetrics(g, t)
ensureMetricValue(t, mf, "dnscollector_throughput_ops", map[string]string{"stream_id": "collector"}, 0)
diff --git a/loggers/redispub.go b/loggers/redispub.go
index cbb90c6e..5b4e8c29 100644
--- a/loggers/redispub.go
+++ b/loggers/redispub.go
@@ -227,7 +227,7 @@ func (c *RedisPub) ConnectToRemote() {
// block until framestream is ready
c.transportReady <- true
- // block until an error occured, need to reconnect
+ // block until an error occurred, need to reconnect
c.transportReconnect <- true
}
}
diff --git a/loggers/tcpclient.go b/loggers/tcpclient.go
index e426830b..d8c9cac1 100644
--- a/loggers/tcpclient.go
+++ b/loggers/tcpclient.go
@@ -225,7 +225,7 @@ func (c *TCPClient) ConnectToRemote() {
// block until framestream is ready
c.transportReady <- true
- // block until an error occured, need to reconnect
+ // block until an error occurred, need to reconnect
c.transportReconnect <- true
}
}
diff --git a/netlib/conn.go b/netlib/conn.go
index 1cc6c63e..3f64f04e 100644
--- a/netlib/conn.go
+++ b/netlib/conn.go
@@ -12,7 +12,7 @@ func Close(conn io.Closer, reset bool) error {
CloseRead() error
}
- // Agressive closing, send TCP RESET instead of FIN
+ // Aggressive closing, send TCP RESET instead of FIN
if reset {
if tcpConn, ok := conn.(*net.TCPConn); ok {
tcpConn.SetLinger(0)
diff --git a/transformers/machinelearning.go b/transformers/machinelearning.go
index fc84657f..76024190 100644
--- a/transformers/machinelearning.go
+++ b/transformers/machinelearning.go
@@ -78,7 +78,7 @@ func (p *MlProcessor) InitDNSMessage(dm *dnsutils.DNSMessage) {
ConsecutiveDigits: 0,
ConsecutiveConsonants: 0,
Size: 0,
- Occurences: 0,
+ Occurrences: 0,
UncommonQtypes: 0,
}
}
@@ -200,7 +200,7 @@ func (p *MlProcessor) AddFeatures(dm *dnsutils.DNSMessage) {
// occurences
if dm.Reducer != nil {
- dm.MachineLearning.Occurences = dm.Reducer.Occurences
+ dm.MachineLearning.Occurrences = dm.Reducer.Occurrences
}
// qtypes
diff --git a/transformers/reducer.go b/transformers/reducer.go
index aaab6dd2..801b6f46 100644
--- a/transformers/reducer.go
+++ b/transformers/reducer.go
@@ -49,12 +49,12 @@ func (mp *MapTraffic) Set(key string, dm *dnsutils.DNSMessage) {
defer mp.Unlock()
if v, ok := mp.kv.Load(key); ok {
- v.(*dnsutils.DNSMessage).Reducer.Occurences++
+ v.(*dnsutils.DNSMessage).Reducer.Occurrences++
v.(*dnsutils.DNSMessage).Reducer.CumulativeLength += dm.DNS.Length
return
}
- dm.Reducer.Occurences = 1
+ dm.Reducer.Occurrences = 1
dm.Reducer.CumulativeLength = dm.DNS.Length
mp.kv.Store(key, dm)
@@ -152,7 +152,7 @@ func (p *ReducerProcessor) LoadActiveReducers() {
func (p *ReducerProcessor) InitDNSMessage(dm *dnsutils.DNSMessage) {
if dm.Reducer == nil {
dm.Reducer = &dnsutils.TransformReducer{
- Occurences: 0,
+ Occurrences: 0,
CumulativeLength: 0,
}
}
diff --git a/transformers/reducer_test.go b/transformers/reducer_test.go
index 482daee8..4d2c9376 100644
--- a/transformers/reducer_test.go
+++ b/transformers/reducer_test.go
@@ -31,7 +31,7 @@ func TestReducer_Json(t *testing.T) {
refJSON := `
{
"reducer": {
- "occurences": 0,
+ "occurrences": 0,
"cumulative-length": 0
}
}
@@ -95,10 +95,10 @@ func TestReducer_RepetitiveTrafficDetector(t *testing.T) {
},
dnsMessagesOut: []dnsutils.DNSMessage{
{
- Reducer: &dnsutils.TransformReducer{Occurences: 1},
+ Reducer: &dnsutils.TransformReducer{Occurrences: 1},
},
{
- Reducer: &dnsutils.TransformReducer{Occurences: 1},
+ Reducer: &dnsutils.TransformReducer{Occurrences: 1},
},
},
},
@@ -118,7 +118,7 @@ func TestReducer_RepetitiveTrafficDetector(t *testing.T) {
},
dnsMessagesOut: []dnsutils.DNSMessage{
{
- Reducer: &dnsutils.TransformReducer{Occurences: 2},
+ Reducer: &dnsutils.TransformReducer{Occurrences: 2},
},
},
},
@@ -138,10 +138,10 @@ func TestReducer_RepetitiveTrafficDetector(t *testing.T) {
},
dnsMessagesOut: []dnsutils.DNSMessage{
{
- Reducer: &dnsutils.TransformReducer{Occurences: 1},
+ Reducer: &dnsutils.TransformReducer{Occurrences: 1},
},
{
- Reducer: &dnsutils.TransformReducer{Occurences: 1},
+ Reducer: &dnsutils.TransformReducer{Occurrences: 1},
},
},
},
@@ -162,8 +162,8 @@ func TestReducer_RepetitiveTrafficDetector(t *testing.T) {
for _, dmRef := range tc.dnsMessagesOut {
newDm := <-outChan
- if newDm.Reducer.Occurences != dmRef.Reducer.Occurences {
- t.Errorf("DNS message invalid repeated: Want=%d, Get=%d", dmRef.Reducer.Occurences, newDm.Reducer.Occurences)
+ if newDm.Reducer.Occurrences != dmRef.Reducer.Occurrences {
+ t.Errorf("DNS message invalid repeated: Want=%d, Get=%d", dmRef.Reducer.Occurrences, newDm.Reducer.Occurrences)
}
}
})
@@ -208,7 +208,7 @@ func TestReducer_QnamePlusOne(t *testing.T) {
},
dnsMessagesOut: []dnsutils.DNSMessage{
{
- Reducer: &dnsutils.TransformReducer{Occurences: 2},
+ Reducer: &dnsutils.TransformReducer{Occurrences: 2},
},
},
},
@@ -230,8 +230,8 @@ func TestReducer_QnamePlusOne(t *testing.T) {
for _, dmRef := range tc.dnsMessagesOut {
newDm := <-outChan
- if newDm.Reducer.Occurences != dmRef.Reducer.Occurences {
- t.Errorf("DNS message invalid repeated: Want=%d, Get=%d", dmRef.Reducer.Occurences, newDm.Reducer.Occurences)
+ if newDm.Reducer.Occurrences != dmRef.Reducer.Occurrences {
+ t.Errorf("DNS message invalid repeated: Want=%d, Get=%d", dmRef.Reducer.Occurrences, newDm.Reducer.Occurrences)
}
}
})
diff --git a/transformers/subprocessors.go b/transformers/subprocessors.go
index 71b5cd6b..3f9bb50e 100644
--- a/transformers/subprocessors.go
+++ b/transformers/subprocessors.go
@@ -307,7 +307,7 @@ func (p *Transforms) ProcessMessage(dm *dnsutils.DNSMessage) int {
return ReturnDrop
}
- // and finaly apply other transformation
+ // and finally apply other transformation
var rCode int
for _, fn := range p.activeTransforms {
rCode = fn(dm)