diff --git a/.github/workflows/bench-go.yml b/.github/workflows/bench-go.yml new file mode 100644 index 00000000..26a60314 --- /dev/null +++ b/.github/workflows/bench-go.yml @@ -0,0 +1,59 @@ +name: bench go + +on: + push: + paths-ignore: + - 'docs/**' + - 'README.md' + branches: + - main + pull_request: + paths-ignore: + - 'docs/**' + - 'README.md' + branches: + - main + +permissions: + contents: read + +jobs: + dev: + strategy: + matrix: + os-version: ['ubuntu-22.04' ] + go-version: [ '1.21', '1.22' ] + runs-on: ${{ matrix.os-version }} + + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go-version }} + + - name: Show Go version + run: | + go version + sudo go version + + - name: Set up Go for root + if: runner.os != 'macOS' + run: | + which go + sudo which go + sudo ln -sf `which go` `sudo which go` || true + + - name: Show Go version + run: | + go version + sudo go version + + - name: Bench dnsutils + run: | + cd dnsutils/ + go test -benchmem -run=^$ -bench=. + + - name: Bench transformers + run: | + cd transformers/ + go test -benchmem -run=^$ -bench=^BenchmarkUserPrivacy.*\|BenchmarkTransforms.*\|BenchmarkNormalize.*$ \ No newline at end of file diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a506a59d..6ff671db 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,7 +24,7 @@ jobs: strategy: matrix: - go-version: ['1.20', '1.21'] + go-version: ['1.21', '1.22'] steps: - uses: actions/checkout@v4 @@ -46,7 +46,7 @@ jobs: strategy: matrix: - go-version: ['1.20', '1.21'] + go-version: ['1.21', '1.22'] steps: - uses: actions/checkout@v4 @@ -55,14 +55,17 @@ jobs: go-version: ${{ matrix.go-version }} - name: build binary - run: CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go + run: | + go mod edit -go=${{ matrix.go-version }} + go mod tidy + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go go-freebsd: runs-on: ubuntu-latest strategy: matrix: - go-version: [ '1.20', '1.21' ] + go-version: ['1.21', '1.22'] steps: - uses: actions/checkout@v4 @@ -71,7 +74,10 @@ jobs: go-version: ${{ matrix.go-version }} - name: build binary - run: CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go + run: | + go mod edit -go=${{ matrix.go-version }} + go mod tidy + CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go # for macos and window, limit the build to the latest version go-macos: @@ -79,7 +85,7 @@ jobs: strategy: matrix: - go-version: [ '1.21' ] + go-version: ['1.21', '1.22'] steps: - uses: actions/checkout@v4 @@ -88,14 +94,17 @@ jobs: go-version: ${{ matrix.go-version }} - name: build binary - run: CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go + run: | + go mod edit -go=${{ matrix.go-version }} + go mod tidy + CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go go-win: runs-on: windows-latest strategy: matrix: - go-version: [ '1.21' ] + go-version: ['1.21', '1.22'] steps: - uses: actions/checkout@v4 @@ -106,6 +115,8 @@ jobs: - name: build binary run: | set CGO_ENABLED=0 + go mod edit -go=${{ matrix.go-version }} + go mod tidy go build -ldflags="-s -w" -o go-dnscollector.exe . docker: @@ -116,3 +127,12 @@ jobs: - name: Build the Docker image run: | docker build --build-arg VERSION=dev . --file Dockerfile -t go-dnscollector + + docker32b: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Build the Docker image + run: | + docker build --build-arg VERSION=dev . --platform linux/386 --file Dockerfile -t go-dnscollector diff --git a/.github/workflows/golint.yml b/.github/workflows/golint.yml index fe22baf2..5a51eb49 100644 --- a/.github/workflows/golint.yml +++ b/.github/workflows/golint.yml @@ -29,14 +29,15 @@ jobs: - uses: actions/setup-go@v5 with: - go-version: '1.21' - # workaround: when the files to be extracted are already present, tar extraction in Golangci Lint fails with the "File exists" + go-version: '1.22' + # workaround: when the files to be extracted are already present, + # tar extraction in Golangci Lint fails with the "File exists" # https://github.com/golangci/golangci-lint-action/issues/807 cache: false - name: golangci-lint - uses: golangci/golangci-lint-action@v4 + uses: golangci/golangci-lint-action@v6 with: - version: "v1.55.2" + version: "v1.57.1" args: --timeout 3m --verbose \ No newline at end of file diff --git a/.github/workflows/goreleaser.yml b/.github/workflows/goreleaser.yml index 8c61eb67..70a74771 100644 --- a/.github/workflows/goreleaser.yml +++ b/.github/workflows/goreleaser.yml @@ -19,18 +19,18 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version: "1.22" - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v5 + uses: goreleaser/goreleaser-action@v6 with: - version: latest + version: "~> v2" args: release --clean env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Upload Assets - uses: softprops/action-gh-release@v1 + uses: softprops/action-gh-release@v2 with: name: ${{github.ref_name}} tag_name: ${{github.ref_name}} diff --git a/.github/workflows/testing-dnstap.yml b/.github/workflows/testing-dnstap.yml index bf558c08..35768dbc 100644 --- a/.github/workflows/testing-dnstap.yml +++ b/.github/workflows/testing-dnstap.yml @@ -25,8 +25,8 @@ jobs: strategy: matrix: - go-version: [ '1.21' ] - unbound: [ '1.17.0', '1.18.0', '1.19.0' ] + go-version: [ '1.22' ] + unbound: [ '1.18.0', '1.19.3' ] mode: [ 'tcp' ] @@ -40,11 +40,14 @@ jobs: python-version: 3.12 - name: build binary - run: CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go + run: | + go mod edit -go=${{ matrix.go-version }} + go mod tidy + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go - name: Deploy docker image run: | - sudo docker run -d --network="host" --name=unbound --volume=$PWD/testsdata/unbound/unbound_${{ matrix.mode }}.conf:/opt/unbound/etc/unbound/unbound.conf:z -v /tmp/:/opt/unbound/etc/unbound/tmp/:z mvance/unbound:${{ matrix.unbound }} + sudo docker run -d --network="host" --name=unbound --volume=$PWD/tests/testsdata/unbound/unbound_${{ matrix.mode }}.conf:/opt/unbound/etc/unbound/unbound.conf:z -v /tmp/:/opt/unbound/etc/unbound/tmp/:z mvance/unbound:${{ matrix.unbound }} until (dig -p 5553 www.github.com @127.0.0.1 | grep NOERROR); do sleep 5.0; done - name: Test ${{ matrix.mode }} @@ -58,7 +61,7 @@ jobs: strategy: matrix: - go-version: [ '1.21' ] + go-version: [ '1.22' ] coredns: [ '1.10.1', '1.11.1' ] mode: [ 'tcp' ] @@ -72,11 +75,14 @@ jobs: python-version: 3.12 - name: build binary - run: CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go + run: | + go mod edit -go=${{ matrix.go-version }} + go mod tidy + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go - name: Deploy coredns docker image run: | - sudo docker run -d --network="host" --name=coredns -v $PWD/testsdata/:$PWD/testsdata/ -v /tmp/:/tmp/ coredns/coredns:${{ matrix.coredns }} -conf $PWD/testsdata/coredns/coredns_${{ matrix.mode }}.conf + sudo docker run -d --network="host" --name=coredns -v $PWD/tests/testsdata/:$PWD/tests/testsdata/ -v /tmp/:/tmp/ coredns/coredns:${{ matrix.coredns }} -conf $PWD/tests/testsdata/coredns/coredns_${{ matrix.mode }}.conf until (dig -p 5553 www.github.com @127.0.0.1 | grep NOERROR); do sleep 5.0; done - name: Test ${{ matrix.mode }} @@ -90,7 +96,7 @@ jobs: strategy: matrix: - go-version: [ '1.21' ] + go-version: [ '1.22' ] coredns: [ '1.11.1' ] mode: [ 'tls' ] @@ -104,7 +110,10 @@ jobs: python-version: 3.12 - name: build binary - run: CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go + run: | + go mod edit -go=${{ matrix.go-version }} + go mod tidy + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go - name: Generate certificate run: | @@ -113,12 +122,12 @@ jobs: openssl req -new -passin file:passphrase.txt -key server.key -out server.csr -subj "/C=FR/O=krkr/OU=Domain Control Validated/CN=*.krkr.io" openssl rsa -in server.key -passin file:passphrase.txt -out dnscollector.key openssl x509 -req -days 36500 -in server.csr -signkey dnscollector.key -out dnscollector.crt - mv dnscollector.key ./testsdata/ - mv dnscollector.crt ./testsdata/ + mv dnscollector.key ./tests/testsdata/ + mv dnscollector.crt ./tests/testsdata/ - name: Deploy coredns docker image run: | - sudo docker run -d --network="host" --name=coredns -v $PWD/testsdata/:$PWD/testsdata/ -v /tmp/:/tmp/ coredns/coredns:${{ matrix.coredns }} -conf $PWD/testsdata/coredns/coredns_${{ matrix.mode }}.conf + sudo docker run -d --network="host" --name=coredns -v $PWD/tests/testsdata/:$PWD/tests/testsdata/ -v /tmp/:/tmp/ coredns/coredns:${{ matrix.coredns }} -conf $PWD/tests/testsdata/coredns/coredns_${{ matrix.mode }}.conf until (dig -p 5553 www.github.com @127.0.0.1 | grep NOERROR); do sleep 5.0; done - name: Test ${{ matrix.mode }} @@ -134,7 +143,7 @@ jobs: strategy: matrix: - go-version: [ '1.21' ] + go-version: [ '1.22' ] dnsdist: [ '17', '18', '19' ] mode: [ 'dnstaptcp', 'dnstapunix' ] @@ -149,7 +158,10 @@ jobs: python-version: 3.12 - name: build binary - run: CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go + run: | + go mod edit -go=${{ matrix.go-version }} + go mod tidy + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go - name: add pdns user run: | @@ -158,7 +170,7 @@ jobs: - name: Deploy dnsdist docker image run: | - sudo docker run -d --network="host" --name=dnsdist --volume=$PWD/testsdata/powerdns/dnsdist_${{ matrix.mode }}.conf:/etc/dnsdist/conf.d/dnsdist.conf:z -v /tmp/:/tmp/ powerdns/dnsdist-${{ matrix.dnsdist }} + sudo docker run -d --network="host" --name=dnsdist --volume=$PWD/tests/testsdata/powerdns/dnsdist_${{ matrix.mode }}.conf:/etc/dnsdist/conf.d/dnsdist.conf:z -v /tmp/:/tmp/ powerdns/dnsdist-${{ matrix.dnsdist }} until (dig -p 5553 www.github.com @127.0.0.1 | grep NOERROR); do sleep 5.0; done - name: Test ${{ matrix.mode }} @@ -166,3 +178,49 @@ jobs: sudo python3 -m pip install dnstap_pb fstrm dnspython sudo python3 -m pip install --upgrade protobuf sudo -E python3 -m unittest tests.dnsquery_${{ matrix.mode }} -v + + dnsdist_doq: + runs-on: ubuntu-22.04 + + strategy: + matrix: + go-version: [ '1.22' ] + dnsdist: [ '19' ] + + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version: ${{ matrix.go-version }} + - uses: actions/setup-python@v5 + with: + python-version: 3.12 + + - name: build binary + run: | + go mod edit -go=${{ matrix.go-version }} + go mod tidy + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go + + - name: download q + run: | + wget https://github.com/natesales/q/releases/download/v0.19.2/q_0.19.2_linux_amd64.tar.gz + tar xvf q_0.19.2_linux_amd64.tar.gz + + - name: Generate certificate + run: | + openssl genrsa 2048 > ca.key + openssl req -days 365 -new -x509 -nodes -key ca.key -out ca.crt -config <(echo -e "[ req ]\nprompt = no\ndistinguished_name = req_distinguished_name\n[ req_distinguished_name ]\ncountryName = LU\nstateOrProvinceName = Space\nlocalityName = Moon\norganizationName = Github\norganizationalUnitName = Lab\ncommonName = dnscollector.dev\nemailAddress = admin@dnscollector.dev") + echo -e "[ req ]\nprompt = no\ndistinguished_name = req_distinguished_name\nreq_extensions = req_ext\n[ req_distinguished_name ]\ncountryName = LU\nstateOrProvinceName = Space\nlocalityName = Moon\norganizationName = Github\norganizationalUnitName = DNScollector\ncommonName = dnscollector.dev\nemailAddress = admin@dnscollector.dev\n[ req_ext ]\nsubjectAltName = DNS: dnscollector.dev, IP: 127.0.0.1" > server.conf + openssl req -newkey rsa:2048 -nodes -keyout server.key -out server.csr --config server.conf + openssl x509 -req -days 365 -in server.csr -out server.crt -CA ca.crt -CAkey ca.key -extensions req_ext -extfile server.conf + sudo chmod 644 server.key + + - name: Deploy dnsdist docker image + run: | + sudo docker run -d --network="host" --name=dnsdist --volume=$PWD/tests/testsdata/powerdns/dnsdist_dox.conf:/etc/dnsdist/conf.d/dnsdist.conf:z --volume=$PWD/server.key:/etc/dnsdist/conf.d/server.key:z --volume=$PWD/server.crt:/etc/dnsdist/conf.d/server.crt:z powerdns/dnsdist-${{ matrix.dnsdist }} + until (dig -p 5553 www.github.com @127.0.0.1 | grep NOERROR); do sleep 5.0; done + + - name: Tests + run: | + sudo python3 -m unittest tests.dnsquery_dnstapdoq -v diff --git a/.github/workflows/testing-go.yml b/.github/workflows/testing-go.yml index 866bdbec..c9331893 100644 --- a/.github/workflows/testing-go.yml +++ b/.github/workflows/testing-go.yml @@ -22,21 +22,14 @@ jobs: strategy: matrix: os-version: ['ubuntu-22.04' ] #, 'macos-latest' - go-version: [ '1.20', '1.21' ] + go-version: [ '1.21', '1.22' ] package: - '.' - 'pkgconfig' - - 'pkglinker' - - 'pkgutils' + - 'pkginit' - 'dnsutils' - - 'collectors' - - 'loggers' + - 'workers' - 'transformers' - - 'netlib' - - 'processors' - # exclude: - # - os-version: macos-latest - # go-version: '1.20' runs-on: ${{ matrix.os-version }} @@ -50,28 +43,28 @@ jobs: run: | go version sudo go version - + - name: Set up Go for root if: runner.os != 'macOS' run: | which go sudo which go sudo ln -sf `which go` `sudo which go` || true - + - name: Show Go version run: | go version sudo go version - name: Test ${{ matrix.package }} - run: sudo go test -timeout 120s ./${{ matrix.package }}/ -race -cover -v + run: sudo go test -timeout 240s ./${{ matrix.package }}/ -race -cover -v int: runs-on: ubuntu-22.04 strategy: matrix: - go-version: ['1.20', '1.21'] + go-version: [ '1.21', '1.22' ] package: ['config', 'clientquery_dnstaptcp', 'clientquery_dnstapunix' ] steps: @@ -84,7 +77,10 @@ jobs: python-version: '3.12' - name: build binary - run: CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o go-dnscollector *.go + run: | + go mod edit -go=${{ matrix.go-version }} + go mod tidy + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o go-dnscollector *.go - name: Test ${{ matrix.package }} run: | @@ -97,7 +93,7 @@ jobs: strategy: matrix: - go-version: ['1.21'] + go-version: [ '1.21', '1.22' ] steps: - uses: actions/checkout@v4 @@ -106,10 +102,13 @@ jobs: go-version: ${{ matrix.go-version }} - name: build binary - run: CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go + run: | + go mod edit -go=${{ matrix.go-version }} + go mod tidy + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go - name: run collector - run: ./go-dnscollector -config ./testsdata/config_prom.yml & + run: ./go-dnscollector -config ./tests/testsdata/config_prom.yml & - name: download dnstap generator run: | @@ -117,7 +116,7 @@ jobs: tar xf go-dnstap-generator_${{ env.GENTAP }}_linux_amd64.tar.gz ./go-dnstap-generator -i 127.0.0.1 -p 6000 -n 1 env: - GENTAP: "0.5.0" + GENTAP: "0.7.0" - name: check prometheus format metrics run: | @@ -126,7 +125,7 @@ jobs: cd prometheus-$PROM_VERSION.linux-amd64/ curl -u admin:changeme http://127.0.0.1:8081/metrics | ./promtool check metrics env: - PROM_VERSION: "2.47.0" + PROM_VERSION: "2.51.1" count: runs-on: ubuntu-latest @@ -138,11 +137,11 @@ jobs: - name: Set up Go uses: actions/setup-go@v5 with: - go-version: "1.21" + go-version: "1.22" - id: count_tests run: | - data=$(sudo go test -v ./collectors ./processors ./dnsutils ./netlib ./loggers ./transformers ./pkgconfig ./pkglinker ./pkgutils ././ 2>&1 | grep -c RUN) + data=$(sudo go test -timeout 360s -v ./workers ./dnsutils ./transformers ./pkgconfig ./pkginit ./telemetry ././ 2>&1 | grep -c RUN) echo "Count of Tests: $data" echo "data=$data" >> $GITHUB_OUTPUT diff --git a/.github/workflows/testing-powerdns.yml b/.github/workflows/testing-powerdns.yml index a9d7d170..d759dc65 100644 --- a/.github/workflows/testing-powerdns.yml +++ b/.github/workflows/testing-powerdns.yml @@ -26,8 +26,8 @@ jobs: strategy: matrix: - go-version: [ '1.21' ] - dnsdist: [ '17', '18', '19' ] + go-version: [ '1.22' ] + dnsdist: [ '18', '19' ] steps: - uses: actions/checkout@v4 @@ -35,13 +35,18 @@ jobs: with: go-version: ${{ matrix.go-version }} - uses: actions/setup-python@v5 - + with: + python-version: 3.12 + - name: build binary - run: CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go + run: | + go mod edit -go=${{ matrix.go-version }} + go mod tidy + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go - name: Deploy dnsdist docker image run: | - sudo docker run -d --network="host" --name=dnsdist --volume=$PWD/testsdata/powerdns/dnsdist_protobuf.conf:/etc/dnsdist/conf.d/dnsdist.conf:z -v /tmp/:/tmp/ powerdns/dnsdist-${{ matrix.dnsdist }} + sudo docker run -d --network="host" --name=dnsdist --volume=$PWD/tests/testsdata/powerdns/dnsdist_protobuf.conf:/etc/dnsdist/conf.d/dnsdist.conf:z -v /tmp/:/tmp/ powerdns/dnsdist-${{ matrix.dnsdist }} until (dig -p 5553 www.github.com @127.0.0.1 | grep NOERROR); do sleep 5.0; done - name: Test dns query @@ -54,8 +59,8 @@ jobs: strategy: matrix: - go-version: [ '1.21' ] - recursor: [ '47', '48', '49' ] + go-version: [ '1.22' ] + recursor: [ '48', '49' ] steps: - uses: actions/checkout@v4 @@ -63,13 +68,18 @@ jobs: with: go-version: ${{ matrix.go-version }} - uses: actions/setup-python@v5 + with: + python-version: 3.12 - name: build binary - run: CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go + run: | + go mod edit -go=${{ matrix.go-version }} + go mod tidy + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-s -w" -o go-dnscollector *.go - name: Deploy recursor docker image run: | - sudo docker run -d --network="host" --name=recursor --volume=$PWD/testsdata/powerdns/pdns_recursor.lua:/etc/powerdns/recursor.lua:z --volume=$PWD/testsdata/powerdns/pdns_recursor.conf:/etc/powerdns/recursor.conf:z powerdns/pdns-recursor-${{ matrix.recursor }} + sudo docker run -d --network="host" --name=recursor --volume=$PWD/tests/testsdata/powerdns/pdns_recursor.lua:/etc/powerdns/recursor.lua:z --volume=$PWD/tests/testsdata/powerdns/pdns_recursor.conf:/etc/powerdns/recursor.conf:z powerdns/pdns-recursor-${{ matrix.recursor }} until (dig -p 5553 www.github.com @127.0.0.1 | grep NOERROR); do sleep 5.0; done - name: Test send query diff --git a/.gitignore b/.gitignore index 6cbf1513..24923bdd 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,10 @@ # Python __pycache__/ +# ignore some specifics files go-dnscollector bin/ -include/ \ No newline at end of file +include/ +docs/_integration/elasticsearch/data/ +docs/_integration/kafka/data/ +docs/_integration/fluentd/data/ \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml index e8958ea5..64902ef7 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -25,4 +25,4 @@ linters: - gocritic # list of linters to use in the future: - #- gosec + #- gosec \ No newline at end of file diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json deleted file mode 100644 index 8d1f3ca6..00000000 --- a/.vscode/c_cpp_properties.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "configurations": [ - { - "name": "Linux", - "includePath": [ - "${workspaceFolder}/**", - "${workspaceFolder}/xdp/headers" - ], - "defines": [], - "compilerPath": "/usr/bin/clang", - "cStandard": "c17", - "cppStandard": "c++14", - "intelliSenseMode": "linux-clang-x64", - "configurationProvider": "ms-vscode.makefile-tools" - } - ], - "version": 4 -} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 99a756b1..980aac99 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21.5-alpine3.17 as builder +FROM golang:1.22.4-alpine3.19 as builder ARG VERSION @@ -8,7 +8,7 @@ RUN apk add git \ && CGO_ENABLED=0 go build -ldflags="-s -w -X 'github.com/prometheus/common/version.Version=$VERSION'" -FROM alpine:3.19.1 +FROM alpine:3.20.0 RUN apk add --no-cache tzdata \ && mkdir -p /etc/dnscollector/ /var/dnscollector/ \ @@ -20,7 +20,7 @@ USER dnscollector COPY --from=builder /build/go-dnscollector /bin/go-dnscollector COPY --from=builder /build/docker-config.yml ./etc/dnscollector/config.yml -EXPOSE 6000/tcp 8080/tcp +EXPOSE 6000/tcp 8080/tcp 9165/tcp ENTRYPOINT ["/bin/go-dnscollector"] diff --git a/Makefile b/Makefile index bbbf9c11..1ffafb69 100644 --- a/Makefile +++ b/Makefile @@ -2,12 +2,13 @@ BINARY_NAME := go-dnscollector GO_VERSION := $(shell go env GOVERSION | sed -n 's/go\([0-9]\+\.[0-9]\+\).*/\1/p') -GO_LOGGER := 0.4.0 -GO_POWERDNS_PROTOBUF := 1.1.0 -GO_DNSTAP_PROTOBUF := 1.0.0 -GO_FRAMESTREAM := 0.7.0 -GO_CLIENTSYSLOG := 0.3.0 +GO_LOGGER := 1.0.0 +GO_POWERDNS_PROTOBUF := 1.1.1 +GO_DNSTAP_PROTOBUF := 1.0.1 +GO_FRAMESTREAM := 0.10.0 +GO_CLIENTSYSLOG := 0.4.0 GO_TOPMAP := 1.0.0 +GO_NETUTILS := 0.4.0 BUILD_TIME := $(shell LANG=en_US date +"%F_%T_%z") COMMIT := $(shell git rev-parse --short HEAD) @@ -40,13 +41,14 @@ goversion: check-go @echo "Go version: $(GO_VERSION)" # Installs project dependencies. -dep: check-go +dep: goversion @go get github.com/dmachard/go-logger@v$(GO_LOGGER) @go get github.com/dmachard/go-powerdns-protobuf@v$(GO_POWERDNS_PROTOBUF) @go get github.com/dmachard/go-dnstap-protobuf@v$(GO_DNSTAP_PROTOBUF) @go get github.com/dmachard/go-framestream@v$(GO_FRAMESTREAM) @go get github.com/dmachard/go-clientsyslog@v$(GO_CLIENTSYSLOG) @go get github.com/dmachard/go-topmap@v$(GO_TOPMAP) + @go get github.com/dmachard/go-netutils@v$(GO_NETUTILS) @go mod edit -go=$(GO_VERSION) @go mod tidy @@ -70,13 +72,11 @@ lint: tests: check-go @go test -race -cover -v @go test ./pkgconfig/ -race -cover -v - @go test ./pkglinker/ -race -cover -v - @go test ./netlib/ -race -cover -v - @go test -timeout 90s ./dnsutils/ -race -cover -v + @go test ./pkginit/ -race -cover -v + @go test ./netutils/ -race -cover -v + @go test ./telemetry/ -race -cover -v @go test -timeout 90s ./transformers/ -race -cover -v - @go test -timeout 90s ./collectors/ -race -cover -v - @go test -timeout 90s ./loggers/ -race -cover -v - @go test -timeout 90s ./processors/ -race -cover -v + @go test -timeout 180s ./workers/ -race -cover -v # Cleans the project using go clean. clean: check-go diff --git a/README.md b/README.md index 90eeab54..967de097 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,9 @@

Go Report -Go version -Go tests -Go lines +Go version +Go tests +Go bench +Go lines

@@ -13,10 +14,11 @@ release

-`DNS-collector` acts as a passive high speed **ingestor** with **pipelining** support for your DNS logs, written in **Golang**. It allows enhancing your DNS logs by adding metadata, extracting usage patterns, and facilitating security analysis. The DNS traffic can be collected and aggregated from simultaneously [sources](./docs/collectors.md) like DNStap streams, network interface or log files and relays it to multiple other [listeners](./docs/loggers.md) with some [transformations](./docs/transformers.md) on it ([traffic filtering](./docs/transformers.md#dns-filtering), [user privacy](./docs/transformers.md#user-privacy), ...). +`DNS-collector` acts as a passive high speed **ingestor** with **pipelining** support for your DNS logs, written in **Golang**. It allows enhancing your DNS logs by adding metadata, extracting usage patterns, and facilitating security analysis. The DNS traffic can be collected and aggregated from simultaneously [sources](./docs/workers.md) like DNStap streams, network interface or log files and relays it to multiple other [listeners](./docs/workers.md) with some [transformations](./docs/transformers.md) on it ([traffic filtering](./docs/transformers.md#dns-filtering), [user privacy](./docs/transformers.md#user-privacy), ...). > Additionally, DNS-collector also support > +> - [Extended](https://github.com/dmachard/go-dns-collector/blob/main/docs/extended_dnstap.md) DNStap with TLS encryption, compression, and more metadata capabilities > - DNS protocol conversions (to [plain text](https://github.com/dmachard/go-dns-collector/blob/main/docs/configuration.md#custom-text-format), [JSON](https://github.com/dmachard/go-dns-collector/blob/main/docs/dnsjson.md), and more... ) > - DNS parser with [Extension Mechanisms for DNS (EDNS)](https://github.com/dmachard/go-dns-collector/blob/main/docs/dnsparser.md) support > - IPv4/v6 defragmentation and TCP reassembly @@ -30,12 +32,12 @@ [![overview](./docs/_images/overview.png)](./docs/running_mode.md) -- **[Collectors](./docs/collectors.md)** +- **[Collectors & Loggers](./docs/workers.md)** - *Listen for logging traffic with streaming network protocols* - [`DNStap`](docs/collectors/collector_dnstap.md#dns-tap) with `tls`|`tcp`|`unix` transports support and [`proxifier`](docs/collectors/collector_dnstap.md#dns-tap-proxifier) - [`PowerDNS`](docs/collectors/collector_powerdns.md) streams with full support - - [`DNSMessage`](docs/collectors/collector_dnsmessage.md) for internal DNS data structure + - [`DNSMessage`](docs/collectors/collector_dnsmessage.md) to route DNS messages based on specific dns fields - [`TZSP`](docs/collectors/collector_tzsp.md) protocol support - *Live capture on a network interface* - [`AF_PACKET`](docs/collectors/collector_afpacket.md) socket with BPF filter @@ -43,33 +45,32 @@ - *Read text or binary files as input* - Read and tail on [`Plain text`](docs/collectors/collector_tail.md) files - Ingest [`PCAP`](docs/collectors/collector_fileingestor.md) or [`DNSTap`](docs/collectors/collector_fileingestor.md) files by watching a directory - -- **[Loggers](./docs/loggers.md)** - - *Local storage of your DNS logs in text or binary formats* - [`Stdout`](docs/loggers/logger_stdout.md) console in text or binary output - [`File`](docs/loggers/logger_file.md) with automatic rotation and compression - *Provide metrics and API* - - [`Prometheus`](docs/loggers/logger_prometheus.md) metrics + - [`Prometheus`](docs/loggers/logger_prometheus.md) exporter - [`Statsd`](docs/loggers/logger_statsd.md) support - [`REST API`](docs/loggers/logger_restapi.md) with [swagger](https://generator.swagger.io/?url=https://raw.githubusercontent.com/dmachard/go-dnscollector/main/docs/swagger.yml) to search DNS domains - *Send to remote host with generic transport protocol* - - [`TCP`](docs/loggers/logger_tcp.md) + - Raw [`TCP`](docs/loggers/logger_tcp.md) client - [`Syslog`](docs/loggers/logger_syslog.md) with TLS support - - [`DNSTap`](docs/loggers/logger_dnstap.md) protobuf messages with TLS support + - [`DNSTap`](docs/loggers/logger_dnstap.md) protobuf client - *Send to various sinks* - [`Fluentd`](docs/loggers/logger_fluentd.md) - [`InfluxDB`](docs/loggers/logger_influxdb.md) - - [`Loki`](docs/loggers/logger_loki.md) + - [`Loki`](docs/loggers/logger_loki.md) client - [`ElasticSearch`](docs/loggers/logger_elasticsearch.md) - [`Scalyr`](docs/loggers/logger_scalyr.md) - - [`Redis`](docs/loggers/logger_redis.md) - - [`Kafka`](docs/loggers/logger_kafka.md) + - [`Redis`](docs/loggers/logger_redis.md) publisher + - [`Kafka`](docs/loggers/logger_kafka.md) producer + - [`ClickHouse`](docs/loggers/logger_clickhouse.md) client *(not yet production ready)* - *Send to security tools* - [`Falco`](docs/loggers/logger_falco.md) - **[Transformers](./docs/transformers.md)** + - Custom [Relabeling](docs/transformers/transform_relabeling.md) for JSON structure - Add additionnal [Tags](docs/transformers/transform_atags.md) - Traffic [Filtering](docs/transformers/transform_trafficfiltering.md) and [Reducer](docs/transformers/transform_trafficreducer.md) - Latency [Computing](docs/transformers/transform_latency.md) @@ -108,6 +109,33 @@ INFO: 2023/12/24 14:43:29.043730 main - config OK! The [`_examples`](./docs/_examples) folder from documentation contains a number of [various configurations](./docs/examples.md) to get you started with the DNS-collector in different ways. +The [`_integration`](./docs/_integration) folder contains DNS-collector `configuration` files and `docker compose` examples for popular tools: + +- [Fluentd](./docs/_integration/fluentd/README.md) +- [Elasticsearch](./docs/_integration/elasticsearch/README.md) +- [Kafka](./docs/_integration/kafka/README.md) +- [InfluxDB](./docs/_integration/influxdb/README.md) +- [Prometheus](./docs/_integration/prometheus/README.md) +- [Loki](./docs/_integration/loki/README.md) + +## Telemetry + +Performance metrics are available to evaluate the efficiency of your pipelines. These metrics allow you to track: +- The number of incoming and outgoing packets processed by each worker +- The number of packets matching the policies applied (forwarded, dropped) +- The number of "discarded" packets +- Memory consumption +- CPU consumption + +A [build-in](./docs/dashboards/grafana_exporter.json) dashboard is available for monitoring these metrics. + +![dashboard](docs/_images/dashboard_global.png) + +## Performance + +Tuning may be necessary to deal with a large traffic loads. +Please refer to the [performance tuning](./docs/performance.md) guide if needed. + ## Contributing See the [development guide](./docs/development.md) for more information on how to build it yourself. diff --git a/collectors/dnsmessage.go b/collectors/dnsmessage.go deleted file mode 100644 index 563fd8f2..00000000 --- a/collectors/dnsmessage.go +++ /dev/null @@ -1,385 +0,0 @@ -package collectors - -import ( - "bufio" - "fmt" - "net/http" - "os" - "reflect" - "regexp" - "strings" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" -) - -func isFileSource(matchSource string) bool { - return strings.HasPrefix(matchSource, "file://") -} - -func isURLSource(matchSource string) bool { - return strings.HasPrefix(matchSource, "http://") || strings.HasPrefix(matchSource, "https://") -} - -type MatchSource struct { - regexList []*regexp.Regexp - stringList []string -} - -type DNSMessage struct { - doneRun chan bool - doneMonitor chan bool - stopRun chan bool - stopMonitor chan bool - config *pkgconfig.Config - configChan chan *pkgconfig.Config - inputChan chan dnsutils.DNSMessage - logger *logger.Logger - name string - // RoutingHandler pkgutils.RoutingHandler - droppedRoutes []pkgutils.Worker - defaultRoutes []pkgutils.Worker - dropped chan string - droppedCount map[string]int -} - -func NewDNSMessage(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *DNSMessage { - logger.Info(pkgutils.PrefixLogCollector+"[%s] dnsmessage - enabled", name) - s := &DNSMessage{ - doneRun: make(chan bool), - doneMonitor: make(chan bool), - stopRun: make(chan bool), - stopMonitor: make(chan bool), - config: config, - configChan: make(chan *pkgconfig.Config), - inputChan: make(chan dnsutils.DNSMessage, config.Collectors.DNSMessage.ChannelBufferSize), - logger: logger, - name: name, - // RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - dropped: make(chan string), - droppedCount: map[string]int{}, - } - s.ReadConfig() - return s -} - -func (c *DNSMessage) GetName() string { return c.name } - -func (c *DNSMessage) AddDroppedRoute(wrk pkgutils.Worker) { - // c.RoutingHandler.AddDroppedRoute(wrk) - c.droppedRoutes = append(c.droppedRoutes, wrk) -} - -func (c *DNSMessage) AddDefaultRoute(wrk pkgutils.Worker) { - // c.RoutingHandler.AddDefaultRoute(wrk) - c.defaultRoutes = append(c.defaultRoutes, wrk) -} - -// deprecated function -func (c *DNSMessage) SetLoggers(loggers []pkgutils.Worker) {} - -// deprecated function -func (c *DNSMessage) Loggers() ([]chan dnsutils.DNSMessage, []string) { - return nil, nil -} - -func (c *DNSMessage) ReadConfigMatching(value interface{}) { - reflectedValue := reflect.ValueOf(value) - if reflectedValue.Kind() == reflect.Map { - keys := reflectedValue.MapKeys() - matchSrc := "" - srcKind := dnsutils.MatchingKindString - for _, k := range keys { - v := reflectedValue.MapIndex(k) - if k.Interface().(string) == "match-source" { - matchSrc = v.Interface().(string) - } - if k.Interface().(string) == "source-kind" { - srcKind = v.Interface().(string) - } - } - if len(matchSrc) > 0 { - sourceData, err := c.LoadData(matchSrc, srcKind) - if err != nil { - c.logger.Fatal(err) - } - if len(sourceData.regexList) > 0 { - value.(map[interface{}]interface{})[srcKind] = sourceData.regexList - } - if len(sourceData.stringList) > 0 { - value.(map[interface{}]interface{})[srcKind] = sourceData.stringList - } - } - } -} - -func (c *DNSMessage) GetInputChannel() chan dnsutils.DNSMessage { - return c.inputChan -} - -func (c *DNSMessage) ReadConfig() { - // load external file for include - if len(c.config.Collectors.DNSMessage.Matching.Include) > 0 { - for _, value := range c.config.Collectors.DNSMessage.Matching.Include { - c.ReadConfigMatching(value) - } - } - // load external file for exclude - if len(c.config.Collectors.DNSMessage.Matching.Exclude) > 0 { - for _, value := range c.config.Collectors.DNSMessage.Matching.Exclude { - c.ReadConfigMatching(value) - } - } -} - -func (c *DNSMessage) LoadData(matchSource string, srcKind string) (MatchSource, error) { - if isFileSource(matchSource) { - dataSource, err := c.LoadFromFile(matchSource, srcKind) - if err != nil { - c.logger.Fatal(err) - } - return dataSource, nil - } else if isURLSource(matchSource) { - dataSource, err := c.LoadFromURL(matchSource, srcKind) - if err != nil { - c.logger.Fatal(err) - } - return dataSource, nil - } - return MatchSource{}, fmt.Errorf("match source not supported %s", matchSource) -} - -func (c *DNSMessage) LoadFromURL(matchSource string, srcKind string) (MatchSource, error) { - c.LogInfo("loading matching source from url=%s", matchSource) - resp, err := http.Get(matchSource) - if err != nil { - return MatchSource{}, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - return MatchSource{}, fmt.Errorf("invalid status code: %d", resp.StatusCode) - } - - matchSources := MatchSource{} - scanner := bufio.NewScanner(resp.Body) - - switch srcKind { - case dnsutils.MatchingKindRegexp: - for scanner.Scan() { - matchSources.regexList = append(matchSources.regexList, regexp.MustCompile(scanner.Text())) - } - c.LogInfo("remote source loaded with %d entries kind=%s", len(matchSources.regexList), srcKind) - case dnsutils.MatchingKindString: - for scanner.Scan() { - matchSources.stringList = append(matchSources.stringList, scanner.Text()) - } - c.LogInfo("remote source loaded with %d entries kind=%s", len(matchSources.stringList), srcKind) - } - - return matchSources, nil -} - -func (c *DNSMessage) LoadFromFile(filePath string, srcKind string) (MatchSource, error) { - localFile := strings.TrimPrefix(filePath, "file://") - - c.LogInfo("loading matching source from file=%s", localFile) - file, err := os.Open(localFile) - if err != nil { - return MatchSource{}, fmt.Errorf("unable to open file: %w", err) - } - - matchSources := MatchSource{} - scanner := bufio.NewScanner(file) - - switch srcKind { - case dnsutils.MatchingKindRegexp: - for scanner.Scan() { - matchSources.regexList = append(matchSources.regexList, regexp.MustCompile(scanner.Text())) - } - c.LogInfo("file loaded with %d entries kind=%s", len(matchSources.regexList), srcKind) - case dnsutils.MatchingKindString: - for scanner.Scan() { - matchSources.stringList = append(matchSources.stringList, scanner.Text()) - } - c.LogInfo("file loaded with %d entries kind=%s", len(matchSources.stringList), srcKind) - } - - return matchSources, nil -} - -func (c *DNSMessage) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration...") - c.configChan <- config -} - -func (c *DNSMessage) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogCollector+"["+c.name+"] dnsmessage - "+msg, v...) -} - -func (c *DNSMessage) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogCollector+"["+c.name+"] dnsmessage - "+msg, v...) -} - -func (c *DNSMessage) Stop() { - c.LogInfo("stopping collector...") - - // read done channel and block until run is terminated - c.LogInfo("stopping to run...") - c.stopRun <- true - <-c.doneRun - - c.LogInfo("stopping monitor...") - c.stopMonitor <- true - <-c.doneMonitor -} - -func (c *DNSMessage) Run() { - c.LogInfo("starting collector...") - var err error - - // prepare next channels - // defaultRoutes, defaultNames := c.RoutingHandler.GetDefaultRoutes() - // droppedRoutes, droppedNames := c.RoutingHandler.GetDroppedRoutes() - defaultRoutes, defaultNames := pkgutils.GetRoutes(c.defaultRoutes) - droppedRoutes, droppedNames := pkgutils.GetRoutes(c.droppedRoutes) - - // prepare transforms - subprocessors := transformers.NewTransforms(&c.config.IngoingTransformers, c.logger, c.name, defaultRoutes, 0) - - // start goroutine to count dropped messsages - go c.MonitorNextStanzas() - - // read incoming dns message - c.LogInfo("waiting dns message to process...") -RUN_LOOP: - for { - select { - case <-c.stopRun: - c.doneRun <- true - break RUN_LOOP - - case cfg := <-c.configChan: - - // save the new config - c.config = cfg - c.ReadConfig() - - case dm, opened := <-c.inputChan: - if !opened { - c.LogInfo("channel closed, exit") - return - } - - // matching enabled, filtering DNS messages ? - matched := true - matchedInclude := false - matchedExclude := false - - if len(c.config.Collectors.DNSMessage.Matching.Include) > 0 { - err, matchedInclude = dm.Matching(c.config.Collectors.DNSMessage.Matching.Include) - if err != nil { - c.LogError(err.Error()) - } - if matched && matchedInclude { - matched = true - } else { - matched = false - } - } - - if len(c.config.Collectors.DNSMessage.Matching.Exclude) > 0 { - err, matchedExclude = dm.Matching(c.config.Collectors.DNSMessage.Matching.Exclude) - if err != nil { - c.LogError(err.Error()) - } - if matched && !matchedExclude { - matched = true - } else { - matched = false - } - } - - // apply tranforms on matched packets only - // init dns message with additionnals parts if necessary - if matched { - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - // c.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - for i := range droppedRoutes { - select { - case droppedRoutes[i] <- dm: - default: - c.dropped <- droppedNames[i] - } - } - continue - } - } - - // drop packet ? - if !matched { - // c.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - for i := range droppedRoutes { - select { - case droppedRoutes[i] <- dm: - default: - c.dropped <- droppedNames[i] - } - } - continue - } - - // send to next - // c.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - for i := range defaultRoutes { - select { - case defaultRoutes[i] <- dm: - default: - c.dropped <- defaultNames[i] - } - } - - } - - } - c.LogInfo("run terminated") -} - -func (c *DNSMessage) MonitorNextStanzas() { - watchInterval := 10 * time.Second - bufferFull := time.NewTimer(watchInterval) -FOLLOW_LOOP: - for { - select { - case <-c.stopMonitor: - close(c.dropped) - bufferFull.Stop() - c.doneMonitor <- true - break FOLLOW_LOOP - - case loggerName := <-c.dropped: - if _, ok := c.droppedCount[loggerName]; !ok { - c.droppedCount[loggerName] = 1 - } else { - c.droppedCount[loggerName]++ - } - - case <-bufferFull.C: - - for v, k := range c.droppedCount { - if k > 0 { - c.LogError("stanza[%s] buffer is full, %d dnsmessage(s) dropped", v, k) - c.droppedCount[v] = 0 - } - } - bufferFull.Reset(watchInterval) - - } - } - c.LogInfo("monitor terminated") -} diff --git a/collectors/dnsmessage_test.go b/collectors/dnsmessage_test.go deleted file mode 100644 index 47c0e902..00000000 --- a/collectors/dnsmessage_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package collectors - -import ( - "fmt" - "regexp" - "testing" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" - "github.com/dmachard/go-logger" -) - -func Test_DnsMessage_BufferLoggerIsFull(t *testing.T) { - // redirect stdout output to bytes buffer - logsChan := make(chan logger.LogEntry, 10) - lg := logger.New(true) - lg.SetOutputChannel((logsChan)) - - // init the collector and run-it - config := pkgconfig.GetFakeConfig() - c := NewDNSMessage(nil, config, lg, "test") - - // init next logger with a buffer of one element - nxt := pkgutils.NewFakeLoggerWithBufferSize(1) - c.AddDefaultRoute(nxt) - - // run collector - go c.Run() - - // add a shot of dnsmessages to collector - dmIn := dnsutils.GetFakeDNSMessage() - for i := 0; i < 512; i++ { - c.GetInputChannel() <- dmIn - } - - // waiting monitor to run in consumer - time.Sleep(12 * time.Second) - - for entry := range logsChan { - fmt.Println(entry) - pattern := regexp.MustCompile(processors.ExpectedBufferMsg511) - if pattern.MatchString(entry.Message) { - break - } - } - - // read dnsmessage from next logger - dmOut := <-nxt.GetInputChannel() - if dmOut.DNS.Qname != processors.ExpectedQname2 { - t.Errorf("invalid qname in dns message: %s", dmOut.DNS.Qname) - } - - // send second shot of packets to consumer - for i := 0; i < 1024; i++ { - c.GetInputChannel() <- dmIn - } - - // waiting monitor to run in consumer - time.Sleep(12 * time.Second) - - for entry := range logsChan { - fmt.Println(entry) - pattern := regexp.MustCompile(processors.ExpectedBufferMsg1023) - if pattern.MatchString(entry.Message) { - break - } - } - // read dnsmessage from next logger - dm2 := <-nxt.GetInputChannel() - if dm2.DNS.Qname != processors.ExpectedQname2 { - t.Errorf("invalid qname in dns message: %s", dm2.DNS.Qname) - } - - // stop all - c.Stop() - nxt.Stop() -} diff --git a/collectors/dnstap.go b/collectors/dnstap.go deleted file mode 100644 index d530f211..00000000 --- a/collectors/dnstap.go +++ /dev/null @@ -1,431 +0,0 @@ -package collectors - -import ( - "bufio" - "crypto/tls" - "errors" - "io" - "net" - "os" - "strconv" - "sync" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" - "github.com/dmachard/go-framestream" - "github.com/dmachard/go-logger" -) - -type Dnstap struct { - doneRun chan bool - doneMonitor chan bool - stopRun chan bool - stopMonitor chan bool - stopCalled bool - listen net.Listener - conns []net.Conn - sockPath string - defaultRoutes []pkgutils.Worker - droppedRoutes []pkgutils.Worker - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - name string - connMode string - connID int - droppedCount int - droppedProcessor chan int - tapProcessors []processors.DNSTapProcessor - sync.RWMutex -} - -func NewDnstap(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *Dnstap { - logger.Info(pkgutils.PrefixLogCollector+"[%s] dnstap - enabled", name) - s := &Dnstap{ - doneRun: make(chan bool), - doneMonitor: make(chan bool), - stopRun: make(chan bool), - stopMonitor: make(chan bool), - droppedProcessor: make(chan int), - config: config, - configChan: make(chan *pkgconfig.Config), - defaultRoutes: loggers, - logger: logger, - name: name, - } - s.ReadConfig() - return s -} - -func (c *Dnstap) GetName() string { return c.name } - -func (c *Dnstap) AddDroppedRoute(wrk pkgutils.Worker) { - c.droppedRoutes = append(c.droppedRoutes, wrk) -} - -func (c *Dnstap) AddDefaultRoute(wrk pkgutils.Worker) { - c.defaultRoutes = append(c.defaultRoutes, wrk) -} - -func (c *Dnstap) SetLoggers(loggers []pkgutils.Worker) { - c.defaultRoutes = loggers -} - -func (c *Dnstap) Loggers() ([]chan dnsutils.DNSMessage, []string) { - return pkgutils.GetRoutes(c.defaultRoutes) -} - -func (c *Dnstap) ReadConfig() { - if !pkgconfig.IsValidTLS(c.config.Collectors.Dnstap.TLSMinVersion) { - c.logger.Fatal("collector=dnstap - invalid tls min version") - } - - c.sockPath = c.config.Collectors.Dnstap.SockPath - c.connMode = "tcp" - - if len(c.config.Collectors.Dnstap.SockPath) > 0 { - c.connMode = "unix" - } else if c.config.Collectors.Dnstap.TLSSupport { - c.connMode = "tls" - } -} - -func (c *Dnstap) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration...") - c.configChan <- config -} - -func (c *Dnstap) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogCollector+"["+c.name+"] dnstap - "+msg, v...) -} - -func (c *Dnstap) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogCollector+"["+c.name+" dnstap - "+msg, v...) -} - -// func (c *Dnstap) LogConnInfo(connID int, msg string, v ...interface{}) { -// prefix := fmt.Sprintf(pkgutils.PrefixLogCollector+"[%s] dnstap#%d - ", c.name, connID) -// c.logger.Info(prefix+msg, v...) -// } - -// func (c *Dnstap) LogConnError(connID int, msg string, v ...interface{}) { -// prefix := fmt.Sprintf(pkgutils.PrefixLogCollector+"[%s] dnstap#%d - ", c.name, connID) -// c.logger.Error(prefix+msg, v...) -// } - -func (c *Dnstap) HandleConn(conn net.Conn) { - // close connection on function exit - defer conn.Close() - - var connID int - c.Lock() - c.connID++ - connID = c.connID - c.Unlock() - - // get peer address - peer := conn.RemoteAddr().String() - c.LogInfo("new connection #%d from %s", connID, peer ) - - // start dnstap processor - dnstapProcessor := processors.NewDNSTapProcessor( - connID, - c.config, - c.logger, - c.name, - c.config.Collectors.Dnstap.ChannelBufferSize, - ) - c.Lock() - c.tapProcessors = append(c.tapProcessors, dnstapProcessor) - c.Unlock() - - // run processor - go dnstapProcessor.Run(c.defaultRoutes, c.droppedRoutes) - - // frame stream library - r := bufio.NewReader(conn) - w := bufio.NewWriter(conn) - fs := framestream.NewFstrm(r, w, conn, 5*time.Second, []byte("protobuf:dnstap.Dnstap"), true) - - // init framestream receiver - if err := fs.InitReceiver(); err != nil { - c.LogError("conn #%d - stream initialization: %s", connID, err) - } else { - c.LogInfo("conn #%d - receiver framestream initialized", connID) - } - - // process incoming frame and send it to dnstap consumer channel - var err error - var frame *framestream.Frame - for { - frame, err = fs.RecvFrame(false) - if err != nil { - connClosed := false - - var opErr *net.OpError - if errors.As(err, &opErr) { - if errors.Is(opErr, net.ErrClosed) { - connClosed = true - } - } - if errors.Is(err, io.EOF) { - connClosed = true - } - - if connClosed { - c.LogInfo("conn #%d - connection closed with peer %s", connID, peer) - } else { - c.LogError("conn #%d - framestream reader error: %s", connID, err) - } - - // the Stop function is already called, don't stop again - if !c.stopCalled { - dnstapProcessor.Stop() - } - break - } - - if frame.IsControl() { - if err := fs.ResetReceiver(frame); err != nil { - if errors.Is(err, io.EOF) { - c.LogInfo("conn #%d - framestream reseted by sender", connID) - } else { - c.LogError("conn #%d - unexpected control framestream: %s", connID, err) - } - - } - break - } - - // send payload to the channel - select { - case dnstapProcessor.GetChannel() <- frame.Data(): // Successful send to channel - default: - c.droppedProcessor <- 1 - } - } - - // to avoid lock if the Stop function is already called - if c.stopCalled { - c.LogInfo("conn #%d - connection handler exited", connID) - return - } - - // here the connection is closed, - // then removes the current tap processor from the list - c.Lock() - for i, t := range c.tapProcessors { - if t.ConnID == connID { - c.tapProcessors = append(c.tapProcessors[:i], c.tapProcessors[i+1:]...) - } - } - - // finnaly removes the current connection from the list - for j, cn := range c.conns { - if cn == conn { - c.conns = append(c.conns[:j], c.conns[j+1:]...) - conn = nil - } - } - c.Unlock() - - c.LogInfo("conn #%d - connection handler terminated", connID) -} - -func (c *Dnstap) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *Dnstap) Stop() { - c.Lock() - defer c.Unlock() - - // to avoid some lock situations when the remose side closes - // the connection at the same time of this Stop function - c.stopCalled = true - c.LogInfo("stopping collector...") - - // stop all powerdns processors - c.LogInfo("cleanup all active processors...") - for _, tapProc := range c.tapProcessors { - tapProc.Stop() - } - - // closing properly current connections if exists - c.LogInfo("closing connected peers...") - for _, conn := range c.conns { - netlib.Close(conn, c.config.Collectors.Dnstap.ResetConn) - } - - // Finally close the listener to unblock accept - c.LogInfo("stop listening...") - c.listen.Close() - - // stop monitor goroutine - c.LogInfo("stopping monitor...") - c.stopMonitor <- true - <-c.doneMonitor - - // read done channel and block until run is terminated - c.LogInfo("stopping run...") - c.stopRun <- true - <-c.doneRun -} - -func (c *Dnstap) Listen() error { - c.Lock() - defer c.Unlock() - - c.LogInfo("running in background...") - - var err error - var listener net.Listener - addrlisten := c.config.Collectors.Dnstap.ListenIP + ":" + strconv.Itoa(c.config.Collectors.Dnstap.ListenPort) - - if len(c.sockPath) > 0 { - _ = os.Remove(c.sockPath) - } - - // listening with tls enabled ? - if c.config.Collectors.Dnstap.TLSSupport { - c.LogInfo("tls support enabled") - var cer tls.Certificate - cer, err = tls.LoadX509KeyPair(c.config.Collectors.Dnstap.CertFile, c.config.Collectors.Dnstap.KeyFile) - if err != nil { - c.logger.Fatal("loading certificate failed:", err) - } - - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{cer}, - MinVersion: tls.VersionTLS12, - } - - // update tls min version according to the user config - tlsConfig.MinVersion = pkgconfig.TLSVersion[c.config.Collectors.Dnstap.TLSMinVersion] - - if len(c.sockPath) > 0 { - listener, err = tls.Listen(netlib.SocketUnix, c.sockPath, tlsConfig) - } else { - listener, err = tls.Listen(netlib.SocketTCP, addrlisten, tlsConfig) - } - - } else { - // basic listening - if len(c.sockPath) > 0 { - listener, err = net.Listen(netlib.SocketUnix, c.sockPath) - } else { - listener, err = net.Listen(netlib.SocketTCP, addrlisten) - } - } - - // something is wrong ? - if err != nil { - return err - } - c.LogInfo("is listening on %s://%s", c.connMode, listener.Addr()) - c.listen = listener - return nil -} - -func (c *Dnstap) MonitorCollector() { - watchInterval := 10 * time.Second - bufferFull := time.NewTimer(watchInterval) -MONITOR_LOOP: - for { - select { - case <-c.droppedProcessor: - c.droppedCount++ - case <-c.stopMonitor: - close(c.droppedProcessor) - bufferFull.Stop() - c.doneMonitor <- true - break MONITOR_LOOP - case <-bufferFull.C: - if c.droppedCount > 0 { - c.LogError("processor buffer is full, %d packet(s) dropped", c.droppedCount) - c.droppedCount = 0 - } - bufferFull.Reset(watchInterval) - } - } - c.LogInfo("monitor terminated") -} - -func (c *Dnstap) Run() { - c.LogInfo("starting collector...") - if c.listen == nil { - if err := c.Listen(); err != nil { - c.logger.Fatal(pkgutils.PrefixLogCollector+"["+c.name+"] dnstap listening failed: ", err) - } - } - - // start goroutine to count dropped messsages - go c.MonitorCollector() - - // goroutine to Accept() blocks waiting for new connection. - acceptChan := make(chan net.Conn) - go func() { - for { - conn, err := c.listen.Accept() - if err != nil { - return - } - acceptChan <- conn - } - }() - -RUN_LOOP: - for { - select { - case <-c.stopRun: - close(acceptChan) - c.doneRun <- true - break RUN_LOOP - - case cfg := <-c.configChan: - - // save the new config - c.config = cfg - c.ReadConfig() - - // refresh config for all conns - for i := range c.tapProcessors { - c.tapProcessors[i].ConfigChan <- cfg - } - - case conn, opened := <-acceptChan: - if !opened { - return - } - - if (c.connMode == "tls" || c.connMode == "tcp") && c.config.Collectors.Dnstap.RcvBufSize > 0 { - before, actual, err := netlib.SetSockRCVBUF( - conn, - c.config.Collectors.Dnstap.RcvBufSize, - c.config.Collectors.Dnstap.TLSSupport, - ) - if err != nil { - c.logger.Fatal(pkgutils.PrefixLogCollector+"["+c.name+"] dnstap - unable to set SO_RCVBUF: ", err) - } - c.LogInfo("set SO_RCVBUF option, value before: %d, desired: %d, actual: %d", before, - c.config.Collectors.Dnstap.RcvBufSize, actual) - } - - // to avoid lock if the Stop function is already called - if c.stopCalled { - continue - } - - c.Lock() - c.conns = append(c.conns, conn) - c.Unlock() - go c.HandleConn(conn) - } - - } - c.LogInfo("run terminated") -} diff --git a/collectors/dnstap_proxifier.go b/collectors/dnstap_proxifier.go deleted file mode 100644 index d0c59c52..00000000 --- a/collectors/dnstap_proxifier.go +++ /dev/null @@ -1,269 +0,0 @@ -package collectors - -import ( - "bufio" - "crypto/tls" - "net" - "os" - "strconv" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-framestream" - "github.com/dmachard/go-logger" -) - -type DnstapProxifier struct { - doneRun chan bool - stopRun chan bool - listen net.Listener - conns []net.Conn - sockPath string - defaultRoutes []pkgutils.Worker - droppedRoutes []pkgutils.Worker - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - name string - stopping bool - RoutingHandler pkgutils.RoutingHandler -} - -func NewDnstapProxifier(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *DnstapProxifier { - logger.Info(pkgutils.PrefixLogCollector+"[%s] dnstaprelay - enabled", name) - s := &DnstapProxifier{ - doneRun: make(chan bool), - stopRun: make(chan bool), - config: config, - configChan: make(chan *pkgconfig.Config), - defaultRoutes: loggers, - logger: logger, - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - } - s.ReadConfig() - return s -} - -func (c *DnstapProxifier) GetName() string { return c.name } - -func (c *DnstapProxifier) AddDroppedRoute(wrk pkgutils.Worker) { - c.droppedRoutes = append(c.droppedRoutes, wrk) -} - -func (c *DnstapProxifier) AddDefaultRoute(wrk pkgutils.Worker) { - c.defaultRoutes = append(c.defaultRoutes, wrk) -} - -func (c *DnstapProxifier) SetLoggers(loggers []pkgutils.Worker) { - c.defaultRoutes = loggers -} - -func (c *DnstapProxifier) Loggers() []chan dnsutils.DNSMessage { - channels := []chan dnsutils.DNSMessage{} - for _, p := range c.defaultRoutes { - channels = append(channels, p.GetInputChannel()) - } - return channels -} - -func (c *DnstapProxifier) ReadConfig() { - if !pkgconfig.IsValidTLS(c.config.Collectors.DnstapProxifier.TLSMinVersion) { - c.logger.Fatal(pkgutils.PrefixLogCollector + "[" + c.name + "] dnstaprelay - invalid tls min version") - } - - c.sockPath = c.config.Collectors.DnstapProxifier.SockPath -} - -func (c *DnstapProxifier) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration...") - c.configChan <- config -} - -func (c *DnstapProxifier) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogCollector+"["+c.name+"] dnstaprelay - "+msg, v...) -} - -func (c *DnstapProxifier) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogCollector+"["+c.name+"] dnstaprelay - "+msg, v...) -} - -func (c *DnstapProxifier) HandleFrame(recvFrom chan []byte, sendTo []chan dnsutils.DNSMessage) { - for data := range recvFrom { - // init DNS message container - dm := dnsutils.DNSMessage{} - dm.Init() - - // register payload - dm.DNSTap.Payload = data - - // forward to outputs - for i := range sendTo { - sendTo[i] <- dm - } - } -} - -func (c *DnstapProxifier) HandleConn(conn net.Conn) { - // close connection on function exit - defer conn.Close() - - // get peer address - peer := conn.RemoteAddr().String() - c.LogInfo("new connection from %s\n", peer) - - recvChan := make(chan []byte, 512) - go c.HandleFrame(recvChan, c.Loggers()) - - // frame stream library - r := bufio.NewReader(conn) - w := bufio.NewWriter(conn) - fs := framestream.NewFstrm(r, w, conn, 5*time.Second, []byte("protobuf:dnstap.Dnstap"), true) - - // init framestream receiver - if err := fs.InitReceiver(); err != nil { - c.LogError("error stream receiver initialization: %s", err) - return - } else { - c.LogInfo("receiver framestream initialized") - } - - // process incoming frame and send it to recv channel - err := fs.ProcessFrame(recvChan) - // if err != nil && !c.stopping { - // c.LogError("transport error: %s", err) - if err != nil { - if !c.stopping { - c.LogError("transport error: %s", err) - } - } - - close(recvChan) - - c.LogInfo("%s - connection closed\n", peer) -} - -func (c *DnstapProxifier) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *DnstapProxifier) Stop() { - c.LogInfo("stopping collector...") - c.stopping = true - - // closing properly current connections if exists - for _, conn := range c.conns { - peer := conn.RemoteAddr().String() - c.LogInfo("%s - closing connection...", peer) - conn.Close() - } - // Finally close the listener to unblock accept - c.LogInfo("stop listening...") - c.listen.Close() - - // read done channel and block until run is terminated - c.stopRun <- true - <-c.doneRun -} - -func (c *DnstapProxifier) Listen() error { - c.LogInfo("running in background...") - - var err error - var listener net.Listener - addrlisten := c.config.Collectors.DnstapProxifier.ListenIP + ":" + strconv.Itoa(c.config.Collectors.DnstapProxifier.ListenPort) - - if len(c.sockPath) > 0 { - _ = os.Remove(c.sockPath) - } - - // listening with tls enabled ? - if c.config.Collectors.DnstapProxifier.TLSSupport { - c.LogInfo("tls support enabled") - var cer tls.Certificate - cer, err = tls.LoadX509KeyPair(c.config.Collectors.DnstapProxifier.CertFile, c.config.Collectors.DnstapProxifier.KeyFile) - if err != nil { - c.logger.Fatal("loading certificate failed:", err) - } - - // prepare tls configuration - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{cer}, - MinVersion: tls.VersionTLS12, - } - - // update tls min version according to the user config - tlsConfig.MinVersion = pkgconfig.TLSVersion[c.config.Collectors.DnstapProxifier.TLSMinVersion] - - if len(c.sockPath) > 0 { - listener, err = tls.Listen(netlib.SocketUnix, c.sockPath, tlsConfig) - } else { - listener, err = tls.Listen(netlib.SocketTCP, addrlisten, tlsConfig) - } - } else { - // basic listening - if len(c.sockPath) > 0 { - listener, err = net.Listen(netlib.SocketUnix, c.sockPath) - } else { - listener, err = net.Listen(netlib.SocketTCP, addrlisten) - } - } - - // something is wrong ? - if err != nil { - return err - } - c.LogInfo("is listening on %s", listener.Addr()) - c.listen = listener - return nil -} - -func (c *DnstapProxifier) Run() { - c.LogInfo("starting collector...") - if c.listen == nil { - if err := c.Listen(); err != nil { - c.logger.Fatal("collector dnstap listening failed: ", err) - } - } - - // goroutine to Accept() blocks waiting for new connection. - acceptChan := make(chan net.Conn) - go func() { - for { - conn, err := c.listen.Accept() - if err != nil { - return - } - acceptChan <- conn - } - }() - -RUN_LOOP: - for { - select { - case <-c.stopRun: - close(acceptChan) - c.doneRun <- true - break RUN_LOOP - - case cfg := <-c.configChan: - - // save the new config - c.config = cfg - c.ReadConfig() - - case conn, opened := <-acceptChan: - if !opened { - return - } - - c.conns = append(c.conns, conn) - go c.HandleConn(conn) - } - } - - c.LogInfo("run terminated") -} diff --git a/collectors/dnstap_test.go b/collectors/dnstap_test.go deleted file mode 100644 index 7bb3986a..00000000 --- a/collectors/dnstap_test.go +++ /dev/null @@ -1,170 +0,0 @@ -package collectors - -import ( - "bufio" - "fmt" - "log" - "net" - "regexp" - "testing" - "time" - - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" - "github.com/dmachard/go-framestream" - "github.com/dmachard/go-logger" - "google.golang.org/protobuf/proto" -) - -func Test_DnstapCollector(t *testing.T) { - testcases := []struct { - name string - mode string - address string - listenPort int - operation string - }{ - { - name: "tcp_default", - mode: netlib.SocketTCP, - address: ":6000", - listenPort: 0, - operation: "CLIENT_QUERY", - }, - { - name: "tcp_custom_port", - mode: netlib.SocketTCP, - address: ":7000", - listenPort: 7000, - operation: "CLIENT_QUERY", - }, - { - name: "unix_default", - mode: netlib.SocketUnix, - address: "/tmp/dnscollector.sock", - listenPort: 0, - operation: "CLIENT_QUERY", - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - g := pkgutils.NewFakeLogger() - - config := pkgconfig.GetFakeConfig() - if tc.listenPort > 0 { - config.Collectors.Dnstap.ListenPort = tc.listenPort - } - if tc.mode == netlib.SocketUnix { - config.Collectors.Dnstap.SockPath = tc.address - } - - c := NewDnstap([]pkgutils.Worker{g}, config, logger.New(false), "test") - if err := c.Listen(); err != nil { - log.Fatal("collector listening error: ", err) - } - - go c.Run() - - conn, err := net.Dial(tc.mode, tc.address) - if err != nil { - t.Error("could not connect: ", err) - } - defer conn.Close() - - r := bufio.NewReader(conn) - w := bufio.NewWriter(conn) - fs := framestream.NewFstrm(r, w, conn, 5*time.Second, []byte("protobuf:dnstap.Dnstap"), true) - if err := fs.InitSender(); err != nil { - t.Fatalf("framestream init error: %s", err) - } else { - frame := &framestream.Frame{} - - // get fake dns question - dnsquery, err := processors.GetFakeDNS() - if err != nil { - t.Fatalf("dns question pack error") - } - - // get fake dnstap message - dtQuery := processors.GetFakeDNSTap(dnsquery) - - // serialize to bytes - data, err := proto.Marshal(dtQuery) - if err != nil { - t.Fatalf("dnstap proto marshal error %s", err) - } - - // send query - frame.Write(data) - if err := fs.SendFrame(frame); err != nil { - t.Fatalf("send frame error %s", err) - } - } - - // waiting message in channel - msg := <-g.GetInputChannel() - if msg.DNSTap.Operation != tc.operation { - t.Errorf("want %s, got %s", tc.operation, msg.DNSTap.Operation) - } - - c.Stop() - }) - } -} - -// Testcase for https://github.com/dmachard/go-dnscollector/issues/461 -// Support Bind9 with dnstap closing. -func Test_DnstapCollector_CloseFrameStream(t *testing.T) { - // redirect stdout output to bytes buffer - logsChan := make(chan logger.LogEntry, 10) - lg := logger.New(true) - lg.SetOutputChannel((logsChan)) - - config := pkgconfig.GetFakeConfig() - config.Collectors.Dnstap.SockPath = "/tmp/dnscollector.sock" - - // start the collector in unix mode - g := pkgutils.NewFakeLogger() - c := NewDnstap([]pkgutils.Worker{g}, config, lg, "test") - if err := c.Listen(); err != nil { - log.Fatal("collector listening error: ", err) - } - - go c.Run() - - // simulate dns server connection to collector - conn, err := net.Dial(netlib.SocketUnix, "/tmp/dnscollector.sock") - if err != nil { - t.Error("could not connect: ", err) - } - defer conn.Close() - - r := bufio.NewReader(conn) - w := bufio.NewWriter(conn) - fs := framestream.NewFstrm(r, w, conn, 5*time.Second, []byte("protobuf:dnstap.Dnstap"), true) - if err := fs.InitSender(); err != nil { - t.Fatalf("framestream init error: %s", err) - } - - // checking reset - errClose := fs.ResetSender() - if errClose != nil { - t.Errorf("reset sender error: %s", errClose) - } - - regxp := ".*framestream reseted by sender.*" - for entry := range logsChan { - fmt.Println(entry) - pattern := regexp.MustCompile(regxp) - if pattern.MatchString(entry.Message) { - break - } - } - - // cleanup - c.Stop() - -} diff --git a/collectors/file_ingestor.go b/collectors/file_ingestor.go deleted file mode 100644 index 7a5ea16c..00000000 --- a/collectors/file_ingestor.go +++ /dev/null @@ -1,480 +0,0 @@ -package collectors - -import ( - "errors" - "fmt" - "io" - "log" - "math" - "os" - "path/filepath" - "sync" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" - "github.com/dmachard/go-logger" - framestream "github.com/farsightsec/golang-framestream" - "github.com/fsnotify/fsnotify" - "github.com/google/gopacket" - "github.com/google/gopacket/layers" - "github.com/google/gopacket/pcapgo" -) - -var waitFor = 10 * time.Second - -func IsValidMode(mode string) bool { - switch mode { - case - pkgconfig.ModePCAP, - pkgconfig.ModeDNSTap: - return true - } - return false -} - -type FileIngestor struct { - done chan bool - exit chan bool - droppedRoutes []pkgutils.Worker - defaultRoutes []pkgutils.Worker - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - watcher *fsnotify.Watcher - watcherTimers map[string]*time.Timer - dnsProcessor processors.DNSProcessor - dnstapProcessor processors.DNSTapProcessor - filterDNSPort int - identity string - name string - mu sync.Mutex -} - -func NewFileIngestor(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *FileIngestor { - logger.Info(pkgutils.PrefixLogCollector+"[%s] fileingestor - enabled", name) - s := &FileIngestor{ - done: make(chan bool), - exit: make(chan bool), - config: config, - configChan: make(chan *pkgconfig.Config), - defaultRoutes: loggers, - logger: logger, - name: name, - watcherTimers: make(map[string]*time.Timer), - } - s.ReadConfig() - return s -} - -func (c *FileIngestor) GetName() string { return c.name } - -func (c *FileIngestor) AddDroppedRoute(wrk pkgutils.Worker) { - c.droppedRoutes = append(c.droppedRoutes, wrk) -} - -func (c *FileIngestor) AddDefaultRoute(wrk pkgutils.Worker) { - c.defaultRoutes = append(c.defaultRoutes, wrk) -} - -func (c *FileIngestor) SetLoggers(loggers []pkgutils.Worker) { - c.defaultRoutes = loggers -} - -func (c *FileIngestor) Loggers() ([]chan dnsutils.DNSMessage, []string) { - return pkgutils.GetRoutes(c.defaultRoutes) -} - -func (c *FileIngestor) ReadConfig() { - if !IsValidMode(c.config.Collectors.FileIngestor.WatchMode) { - c.logger.Fatal(pkgutils.PrefixLogCollector+"["+c.name+"]fileingestor - invalid mode: ", c.config.Collectors.FileIngestor.WatchMode) - } - - c.identity = c.config.GetServerIdentity() - c.filterDNSPort = c.config.Collectors.FileIngestor.PcapDNSPort - - c.LogInfo("watching directory [%s] to find [%s] files", - c.config.Collectors.FileIngestor.WatchDir, - c.config.Collectors.FileIngestor.WatchMode) -} - -func (c *FileIngestor) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration...") - c.configChan <- config -} - -func (c *FileIngestor) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogCollector+"["+c.name+"] fileingestor - "+msg, v...) -} - -func (c *FileIngestor) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogCollector+"["+c.name+"] fileingestor - "+msg, v...) -} - -func (c *FileIngestor) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *FileIngestor) Stop() { - c.LogInfo("stopping collector...") - - // exit to close properly - c.exit <- true - - // read done channel and block until run is terminated - <-c.done - close(c.done) -} - -func (c *FileIngestor) ProcessFile(filePath string) { - switch c.config.Collectors.FileIngestor.WatchMode { - case pkgconfig.ModePCAP: - // process file with pcap extension only - if filepath.Ext(filePath) == ".pcap" || filepath.Ext(filePath) == ".pcap.gz" { - c.LogInfo("file ready to process %s", filePath) - go c.ProcessPcap(filePath) - } - case pkgconfig.ModeDNSTap: - // process dnstap - if filepath.Ext(filePath) == ".fstrm" { - c.LogInfo("file ready to process %s", filePath) - go c.ProcessDnstap(filePath) - } - } -} - -func (c *FileIngestor) ProcessPcap(filePath string) { - // open the file - f, err := os.Open(filePath) - if err != nil { - c.LogError("unable to read file: %s", err) - return - } - defer f.Close() - - // it is a pcap file ? - pcapHandler, err := pcapgo.NewReader(f) - if err != nil { - c.LogError("unable to read pcap file: %s", err) - return - } - - fileName := filepath.Base(filePath) - c.LogInfo("processing pcap file [%s]...", fileName) - - if pcapHandler.LinkType() != layers.LinkTypeEthernet { - c.LogError("pcap file [%s] ignored: %s", filePath, pcapHandler.LinkType()) - return - } - - dnsChan := make(chan netlib.DNSPacket) - udpChan := make(chan gopacket.Packet) - tcpChan := make(chan gopacket.Packet) - fragIP4Chan := make(chan gopacket.Packet) - fragIP6Chan := make(chan gopacket.Packet) - - packetSource := gopacket.NewPacketSource(pcapHandler, pcapHandler.LinkType()) - packetSource.DecodeOptions.Lazy = true - packetSource.NoCopy = true - - // defrag ipv4 - go netlib.IPDefragger(fragIP4Chan, udpChan, tcpChan) - // defrag ipv6 - go netlib.IPDefragger(fragIP6Chan, udpChan, tcpChan) - // tcp assembly - go netlib.TCPAssembler(tcpChan, dnsChan, c.filterDNSPort) - // udp processor - go netlib.UDPProcessor(udpChan, dnsChan, c.filterDNSPort) - - go func() { - nbPackets := 0 - lastReceivedTime := time.Now() - for { - select { - case dnsPacket, noMore := <-dnsChan: - if !noMore { - goto end - } - - lastReceivedTime = time.Now() - // prepare dns message - dm := dnsutils.DNSMessage{} - dm.Init() - - dm.NetworkInfo.Family = dnsPacket.IPLayer.EndpointType().String() - dm.NetworkInfo.QueryIP = dnsPacket.IPLayer.Src().String() - dm.NetworkInfo.ResponseIP = dnsPacket.IPLayer.Dst().String() - dm.NetworkInfo.QueryPort = dnsPacket.TransportLayer.Src().String() - dm.NetworkInfo.ResponsePort = dnsPacket.TransportLayer.Dst().String() - dm.NetworkInfo.Protocol = dnsPacket.TransportLayer.EndpointType().String() - dm.NetworkInfo.IPDefragmented = dnsPacket.IPDefragmented - dm.NetworkInfo.TCPReassembled = dnsPacket.TCPReassembled - - dm.DNS.Payload = dnsPacket.Payload - dm.DNS.Length = len(dnsPacket.Payload) - - dm.DNSTap.Identity = c.identity - dm.DNSTap.TimeSec = dnsPacket.Timestamp.Second() - dm.DNSTap.TimeNsec = int(dnsPacket.Timestamp.UnixNano()) - - // count it - nbPackets++ - - // send DNS message to DNS processor - c.dnsProcessor.GetChannel() <- dm - case <-time.After(10 * time.Second): - elapsed := time.Since(lastReceivedTime) - if elapsed >= 10*time.Second { - close(dnsChan) - } - } - } - end: - c.LogInfo("pcap file [%s]: %d DNS packet(s) detected", fileName, nbPackets) - }() - - nbPackets := 0 - for { - packet, err := packetSource.NextPacket() - - if errors.Is(err, io.EOF) { - break - } - if err != nil { - c.LogError("unable to read packet: %s", err) - break - } - - nbPackets++ - - // some security checks - if packet.NetworkLayer() == nil { - continue - } - if packet.TransportLayer() == nil { - continue - } - - // ipv4 fragmented packet ? - if packet.NetworkLayer().LayerType() == layers.LayerTypeIPv4 { - ip4 := packet.NetworkLayer().(*layers.IPv4) - if ip4.Flags&layers.IPv4MoreFragments == 1 || ip4.FragOffset > 0 { - fragIP4Chan <- packet - continue - } - } - - // ipv6 fragmented packet ? - if packet.NetworkLayer().LayerType() == layers.LayerTypeIPv6 { - v6frag := packet.Layer(layers.LayerTypeIPv6Fragment) - if v6frag != nil { - fragIP6Chan <- packet - continue - } - } - - // tcp or udp packets ? - if packet.TransportLayer().LayerType() == layers.LayerTypeUDP { - udpChan <- packet - } - if packet.TransportLayer().LayerType() == layers.LayerTypeTCP { - tcpChan <- packet - } - - } - - c.LogInfo("pcap file [%s] processing terminated, %d packet(s) read", fileName, nbPackets) - - // remove it ? - if c.config.Collectors.FileIngestor.DeleteAfter { - c.LogInfo("delete file [%s]", fileName) - os.Remove(filePath) - } - - // close chan - close(fragIP4Chan) - close(fragIP6Chan) - close(udpChan) - close(tcpChan) - - // remove event timer for this file - c.RemoveEvent(filePath) -} - -func (c *FileIngestor) ProcessDnstap(filePath string) error { - // open the file - f, err := os.Open(filePath) - if err != nil { - return err - } - defer f.Close() - - dnstapDecoder, err := framestream.NewDecoder(f, &framestream.DecoderOptions{ - ContentType: []byte("protobuf:dnstap.Dnstap"), - Bidirectional: false, - }) - - if err != nil { - return fmt.Errorf("failed to create framestream Decoder: %w", err) - } - - fileName := filepath.Base(filePath) - c.LogInfo("processing dnstap file [%s]", fileName) - for { - buf, err := dnstapDecoder.Decode() - if errors.Is(err, io.EOF) { - break - } - - newbuf := make([]byte, len(buf)) - copy(newbuf, buf) - - c.dnstapProcessor.GetChannel() <- newbuf - } - - // remove it ? - c.LogInfo("processing of [%s] terminated", fileName) - if c.config.Collectors.FileIngestor.DeleteAfter { - c.LogInfo("delete file [%s]", fileName) - os.Remove(filePath) - } - - // remove event timer for this file - c.RemoveEvent(filePath) - - return nil -} - -func (c *FileIngestor) RegisterEvent(filePath string) { - // Get timer. - c.mu.Lock() - t, ok := c.watcherTimers[filePath] - c.mu.Unlock() - - // No timer yet, so create one. - if !ok { - t = time.AfterFunc(math.MaxInt64, func() { c.ProcessFile(filePath) }) - t.Stop() - - c.mu.Lock() - c.watcherTimers[filePath] = t - c.mu.Unlock() - } - - // Reset the timer for this path, so it will start from 100ms again. - t.Reset(waitFor) -} - -func (c *FileIngestor) RemoveEvent(filePath string) { - c.mu.Lock() - delete(c.watcherTimers, filePath) - c.mu.Unlock() -} - -func (c *FileIngestor) Run() { - c.LogInfo("starting collector...") - - c.dnsProcessor = processors.NewDNSProcessor(c.config, c.logger, c.name, c.config.Collectors.FileIngestor.ChannelBufferSize) - go c.dnsProcessor.Run(c.defaultRoutes, c.droppedRoutes) - - // start dnstap subprocessor - c.dnstapProcessor = processors.NewDNSTapProcessor( - 0, - c.config, - c.logger, - c.name, - c.config.Collectors.FileIngestor.ChannelBufferSize, - ) - go c.dnstapProcessor.Run(c.defaultRoutes, c.droppedRoutes) - - // read current folder content - entries, err := os.ReadDir(c.config.Collectors.FileIngestor.WatchDir) - if err != nil { - c.LogError("unable to read folder: %s", err) - } - - for _, entry := range entries { - // ignore folder - if entry.IsDir() { - continue - } - - // prepare filepath - fn := filepath.Join(c.config.Collectors.FileIngestor.WatchDir, entry.Name()) - - switch c.config.Collectors.FileIngestor.WatchMode { - case pkgconfig.ModePCAP: - // process file with pcap extension - if filepath.Ext(fn) == ".pcap" || filepath.Ext(fn) == ".pcap.gz" { - go c.ProcessPcap(fn) - } - case pkgconfig.ModeDNSTap: - // process dnstap - if filepath.Ext(fn) == ".fstrm" { - go c.ProcessDnstap(fn) - } - } - } - - // then watch for new one - c.watcher, err = fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - // register the folder to watch - err = c.watcher.Add(c.config.Collectors.FileIngestor.WatchDir) - if err != nil { - log.Fatal(err) - } - - go func() { - for { - select { - // new config provided? - case cfg, opened := <-c.configChan: - if !opened { - return - } - c.config = cfg - c.ReadConfig() - - c.dnsProcessor.ConfigChan <- cfg - c.dnstapProcessor.ConfigChan <- cfg - - case event, ok := <-c.watcher.Events: - if !ok { // Channel was closed (i.e. Watcher.Close() was called). - return - } - - // detect activity on file - if !event.Has(fsnotify.Create) && !event.Has(fsnotify.Write) { - continue - } - - // register the event by the name - c.RegisterEvent(event.Name) - - case err, ok := <-c.watcher.Errors: - if !ok { - return - } - c.LogError("error:", err) - } - } - }() - - <-c.exit - - // stop watching - c.watcher.Close() - - // stop processors - c.dnsProcessor.Stop() - c.dnstapProcessor.Stop() - - c.LogInfo("run terminated") - c.done <- true -} diff --git a/collectors/file_ingestor_test.go b/collectors/file_ingestor_test.go deleted file mode 100644 index cdafd3c1..00000000 --- a/collectors/file_ingestor_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package collectors - -import ( - "testing" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-logger" -) - -func Test_FileIngestor_Pcap(t *testing.T) { - g := pkgutils.NewFakeLogger() - config := pkgconfig.GetFakeConfig() - - // watch tests data folder - config.Collectors.FileIngestor.WatchDir = "./../testsdata/pcap/" - - // init collector - c := NewFileIngestor([]pkgutils.Worker{g}, config, logger.New(false), "test") - go c.Run() - - // waiting message in channel - for { - // read dns message from channel - msg := <-g.GetInputChannel() - - // check qname - if msg.DNSTap.Operation == dnsutils.DNSTapClientQuery { - break - } - } -} diff --git a/collectors/powerdns.go b/collectors/powerdns.go deleted file mode 100644 index b233feeb..00000000 --- a/collectors/powerdns.go +++ /dev/null @@ -1,388 +0,0 @@ -package collectors - -import ( - "bufio" - "crypto/tls" - "errors" - "fmt" - "io" - "net" - "strconv" - "sync" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" - "github.com/dmachard/go-logger" - powerdns_protobuf "github.com/dmachard/go-powerdns-protobuf" -) - -type ProtobufPowerDNS struct { - doneRun chan bool - stopRun chan bool - doneMonitor chan bool - stopMonitor chan bool - cleanup chan bool - stopCalled bool - listen net.Listener - connID int - conns []net.Conn - droppedRoutes []pkgutils.Worker - defaultRoutes []pkgutils.Worker - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - name string - droppedCount int - dropped chan int - pdnsProcessors []*processors.PdnsProcessor - sync.RWMutex -} - -func NewProtobufPowerDNS(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *ProtobufPowerDNS { - logger.Info(pkgutils.PrefixLogCollector+"[%s] powerdns - enabled", name) - s := &ProtobufPowerDNS{ - doneRun: make(chan bool), - doneMonitor: make(chan bool), - stopRun: make(chan bool), - stopMonitor: make(chan bool), - cleanup: make(chan bool), - dropped: make(chan int), - config: config, - configChan: make(chan *pkgconfig.Config), - defaultRoutes: loggers, - logger: logger, - name: name, - } - s.ReadConfig() - return s -} - -func (c *ProtobufPowerDNS) GetName() string { return c.name } - -func (c *ProtobufPowerDNS) AddDroppedRoute(wrk pkgutils.Worker) { - c.droppedRoutes = append(c.droppedRoutes, wrk) -} - -func (c *ProtobufPowerDNS) AddDefaultRoute(wrk pkgutils.Worker) { - c.defaultRoutes = append(c.defaultRoutes, wrk) -} - -func (c *ProtobufPowerDNS) SetLoggers(loggers []pkgutils.Worker) { - c.defaultRoutes = loggers -} - -func (c *ProtobufPowerDNS) Loggers() ([]chan dnsutils.DNSMessage, []string) { - return pkgutils.GetRoutes(c.defaultRoutes) -} - -func (c *ProtobufPowerDNS) ReadConfig() { - if !pkgconfig.IsValidTLS(c.config.Collectors.PowerDNS.TLSMinVersion) { - c.logger.Fatal(pkgutils.PrefixLogCollector + "[" + c.name + "] powerdns - invalid tls min version") - } -} - -func (c *ProtobufPowerDNS) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration...") - c.configChan <- config -} - -func (c *ProtobufPowerDNS) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogCollector+"["+c.name+"] powerdns - "+msg, v...) -} - -func (c *ProtobufPowerDNS) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogCollector+"["+c.name+"] powerdns - "+msg, v...) -} - -// func (c *ProtobufPowerDNS) LogConnInfo(connID int, msg string, v ...interface{}) { -// prefix := fmt.Sprintf(pkgutils.PrefixLogCollector+"[%s] powerdns#%d - ", c.name, connID) -// c.logger.Info(prefix+msg, v...) -// } - -// func (c *ProtobufPowerDNS) LogConnError(connID int, msg string, v ...interface{}) { -// prefix := fmt.Sprintf(pkgutils.PrefixLogCollector+"[%s] powerdns#%d - ", c.name, connID) -// c.logger.Error(prefix+msg, v...) -// } - -func (c *ProtobufPowerDNS) HandleConn(conn net.Conn) { - // close connection on function exit - defer conn.Close() - - var connID int - c.Lock() - c.connID++ - connID = c.connID - c.Unlock() - - // get peer address - peer := conn.RemoteAddr().String() - c.LogInfo("new connection #%d from %s", connID, peer) - - // start protobuf subprocessor - pdnsProc := processors.NewPdnsProcessor(connID, c.config, c.logger, c.name, c.config.Collectors.PowerDNS.ChannelBufferSize) - c.Lock() - c.pdnsProcessors = append(c.pdnsProcessors, &pdnsProc) - c.Unlock() - go pdnsProc.Run(c.defaultRoutes, c.droppedRoutes) - - r := bufio.NewReader(conn) - pbs := powerdns_protobuf.NewProtobufStream(r, conn, 5*time.Second) - - var err error - var payload *powerdns_protobuf.ProtoPayload - - for { - payload, err = pbs.RecvPayload(false) - if err != nil { - connClosed := false - - var opErr *net.OpError - if errors.As(err, &opErr) { - if errors.Is(opErr, net.ErrClosed) { - connClosed = true - } - } - if errors.Is(err, io.EOF) { - connClosed = true - } - - if connClosed { - c.LogInfo("conn #%d - connection closed with peer %s", connID, peer) - } else { - c.LogError("conn #%d - powerdns reader error: %s", connID, err) - } - - // stop processor - // the Stop function is already called, don't stop again - if !c.stopCalled { - pdnsProc.Stop() - } - break - } - - // send payload to the channel - select { - case pdnsProc.GetChannel() <- payload.Data(): // Successful send - default: - c.dropped <- 1 - } - } - - // to avoid lock if the Stop function is already called - if c.stopCalled { - c.LogInfo("conn #%d - connection handler exited", connID) - return - } - - // here the connection is closed, - // then removes the current tap processor from the list - c.Lock() - for i, t := range c.pdnsProcessors { - if t.ConnID == connID { - c.pdnsProcessors = append(c.pdnsProcessors[:i], c.pdnsProcessors[i+1:]...) - } - } - - // finnaly removes the current connection from the list - for j, cn := range c.conns { - if cn == conn { - c.conns = append(c.conns[:j], c.conns[j+1:]...) - conn = nil - } - } - c.Unlock() - - c.LogInfo("conn #%d - connection handler terminated", connID) -} - -func (c *ProtobufPowerDNS) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *ProtobufPowerDNS) Stop() { - c.Lock() - defer c.Unlock() - - // to avoid some lock situations when the remose side closes - // the connection at the same time of this Stop function - c.stopCalled = true - c.LogInfo("stopping collector...") - - // stop all powerdns processors - c.LogInfo("cleanup all active processors...") - for _, pdnsProc := range c.pdnsProcessors { - pdnsProc.Stop() - } - - // closing properly current connections if exists - c.LogInfo("closing connected peers...") - for _, conn := range c.conns { - peer := conn.RemoteAddr().String() - c.LogInfo("%s - closing connection...", peer) - netlib.Close(conn, c.config.Collectors.PowerDNS.ResetConn) - } - - // Finally close the listener to unblock accept - c.LogInfo("stop listening...") - c.listen.Close() - - // stop monitor goroutine - c.LogInfo("stopping monitor...") - c.stopMonitor <- true - <-c.doneMonitor - - // read done channel and block until run is terminated - c.LogInfo("stopping run...") - c.stopRun <- true - <-c.doneRun -} - -func (c *ProtobufPowerDNS) Listen() error { - c.Lock() - defer c.Unlock() - - c.LogInfo("running in background...") - - var err error - var listener net.Listener - addrlisten := c.config.Collectors.PowerDNS.ListenIP + ":" + strconv.Itoa(c.config.Collectors.PowerDNS.ListenPort) - - // listening with tls enabled ? - if c.config.Collectors.PowerDNS.TLSSupport { - c.LogInfo("tls support enabled") - var cer tls.Certificate - cer, err = tls.LoadX509KeyPair(c.config.Collectors.PowerDNS.CertFile, c.config.Collectors.PowerDNS.KeyFile) - if err != nil { - c.logger.Fatal("loading certificate failed:", err) - } - - // prepare tls configuration - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{cer}, - MinVersion: tls.VersionTLS12, - } - - // update tls min version according to the user config - tlsConfig.MinVersion = pkgconfig.TLSVersion[c.config.Collectors.PowerDNS.TLSMinVersion] - - listener, err = tls.Listen(netlib.SocketTCP, addrlisten, tlsConfig) - } else { - listener, err = net.Listen(netlib.SocketTCP, addrlisten) - } - // something is wrong ? - if err != nil { - return err - } - c.LogInfo("is listening on %s", listener.Addr()) - c.listen = listener - return nil -} - -func (c *ProtobufPowerDNS) MonitorCollector() { - watchInterval := 10 * time.Second - bufferFull := time.NewTimer(watchInterval) -MONITOR_LOOP: - for { - select { - case <-c.stopMonitor: - close(c.dropped) - bufferFull.Stop() - c.doneMonitor <- true - break MONITOR_LOOP - - case <-c.dropped: - c.droppedCount++ - - case <-bufferFull.C: - if c.droppedCount > 0 { - c.LogError("recv buffer is full, %d packet(s) dropped", c.droppedCount) - c.droppedCount = 0 - } - bufferFull.Reset(watchInterval) - } - } - c.LogInfo("monitor terminated") -} - -func (c *ProtobufPowerDNS) Run() { - - c.LogInfo("starting collector...") - if c.listen == nil { - if err := c.Listen(); err != nil { - prefixlog := fmt.Sprintf("[%s] ", c.name) - c.logger.Fatal(prefixlog+"collector=powerdns listening failed: ", err) - } - } - - // start goroutine to count dropped messsages - go c.MonitorCollector() - - // goroutine to Accept() blocks waiting for new connection. - acceptChan := make(chan net.Conn) - go func() { - for { - conn, err := c.listen.Accept() - if err != nil { - return - } - acceptChan <- conn - } - }() - -RUN_LOOP: - for { - select { - case <-c.stopRun: - close(acceptChan) - c.doneRun <- true - break RUN_LOOP - - case cfg := <-c.configChan: - // save the new config - c.config = cfg - c.ReadConfig() - - // refresh config for all conns - for i := range c.pdnsProcessors { - c.pdnsProcessors[i].ConfigChan <- cfg - } - - case conn, opened := <-acceptChan: - if !opened { - return - } - - if c.config.Collectors.Dnstap.RcvBufSize > 0 { - before, actual, err := netlib.SetSockRCVBUF( - conn, - c.config.Collectors.Dnstap.RcvBufSize, - c.config.Collectors.Dnstap.TLSSupport, - ) - if err != nil { - c.logger.Fatal("Unable to set SO_RCVBUF: ", err) - } - c.LogInfo("set SO_RCVBUF option, value before: %d, desired: %d, actual: %d", - before, - c.config.Collectors.Dnstap.RcvBufSize, - actual) - } - - // to avoid lock if the Stop function is already called - if c.stopCalled { - continue - } - - c.Lock() - c.conns = append(c.conns, conn) - c.Unlock() - go c.HandleConn(conn) - - } - } - - c.LogInfo("run terminated") -} diff --git a/collectors/powerdns_test.go b/collectors/powerdns_test.go deleted file mode 100644 index 241748e3..00000000 --- a/collectors/powerdns_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package collectors - -import ( - "log" - "net" - "testing" - - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-logger" -) - -func TestPowerDNS_Run(t *testing.T) { - g := pkgutils.NewFakeLogger() - - c := NewProtobufPowerDNS([]pkgutils.Worker{g}, pkgconfig.GetFakeConfig(), logger.New(false), "test") - if err := c.Listen(); err != nil { - log.Fatal("collector powerdns listening error: ", err) - } - go c.Run() - - conn, err := net.Dial(netlib.SocketTCP, ":6001") - if err != nil { - t.Error("could not connect to TCP server: ", err) - } - defer conn.Close() -} diff --git a/collectors/sniffer_afpacket.go b/collectors/sniffer_afpacket.go deleted file mode 100644 index 499219b2..00000000 --- a/collectors/sniffer_afpacket.go +++ /dev/null @@ -1,452 +0,0 @@ -//go:build linux -// +build linux - -package collectors - -import ( - "encoding/binary" - "errors" - "net" - "os" - "syscall" - "time" - "unsafe" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" - "github.com/dmachard/go-logger" - "github.com/google/gopacket" - "github.com/google/gopacket/layers" - "golang.org/x/net/bpf" - "golang.org/x/sys/unix" -) - -// Convert a uint16 to host byte order (big endian) -func Htons(v uint16) int { - return int((v << 8) | (v >> 8)) -} - -func GetBPFFilterIngress(port int) []bpf.Instruction { - // bpf filter: (ip or ip6 ) and (udp or tcp) and port 53 - // fragmented packets are ignored - var filter = []bpf.Instruction{ - // Load eth.type (2 bytes at offset 12) and push-it in register A - bpf.LoadAbsolute{Off: 12, Size: 2}, - // if eth.type == IPv4 continue with the next instruction - bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0x0800, SkipTrue: 0, SkipFalse: 8}, - // Load ip.proto (1 byte at offset 23) and push-it in register A - bpf.LoadAbsolute{Off: 23, Size: 1}, - // ip.proto == UDP ? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0x11, SkipTrue: 1, SkipFalse: 0}, - // ip.proto == TCP ? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0x6, SkipTrue: 0, SkipFalse: 12}, - // load flags and fragment offset (2 bytes at offset 20) to ignore fragmented packet - bpf.LoadAbsolute{Off: 20, Size: 2}, - // Only look at the last 13 bits of the data saved in regiter A - // 0x1fff == 0001 1111 1111 1111 (fragment offset) - // If any of the data in fragment offset is true, ignore the packet - bpf.JumpIf{Cond: bpf.JumpBitsSet, Val: 0x1fff, SkipTrue: 10, SkipFalse: 0}, - // Load ip.length - // Register X = ip header len * 4 - bpf.LoadMemShift{Off: 14}, - // Load source port in tcp or udp (2 bytes at offset x+14) - bpf.LoadIndirect{Off: 14, Size: 2}, - // source port equal to 53 ? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: uint32(port), SkipTrue: 6, SkipFalse: 7}, - // if eth.type == IPv6 continue with the next instruction - bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0x86dd, SkipTrue: 0, SkipFalse: 6}, - // Load ipv6.nxt (2 bytes at offset 12) and push-it in register A - bpf.LoadAbsolute{Off: 20, Size: 1}, - // ip.proto == UDP ? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0x11, SkipTrue: 1, SkipFalse: 0}, - // ip.proto == TCP ? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0x6, SkipTrue: 0, SkipFalse: 3}, - // Load source port tcp or udp (2 bytes at offset 54) - bpf.LoadAbsolute{Off: 54, Size: 2}, - // source port equal to 53 ? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: uint32(port), SkipTrue: 0, SkipFalse: 1}, - // Keep the packet and send up to 65k of the packet to userspace - bpf.RetConstant{Val: 0xFFFF}, - // Ignore packet - bpf.RetConstant{Val: 0}, - } - return filter -} - -func GetBpfFilter(port int) []bpf.Instruction { - // bpf filter: (ip or ip6 ) and (udp or tcp) and port 53 - // fragmented packets are ignored - var filter = []bpf.Instruction{ - // Load eth.type (2 bytes at offset 12) and push-it in register A - bpf.LoadAbsolute{Off: 12, Size: 2}, - // if eth.type == IPv4 continue with the next instruction - bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0x0800, SkipTrue: 0, SkipFalse: 10}, - // Load ip.proto (1 byte at offset 23) and push-it in register A - bpf.LoadAbsolute{Off: 23, Size: 1}, - // ip.proto == UDP ? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0x11, SkipTrue: 1, SkipFalse: 0}, - // ip.proto == TCP ? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0x6, SkipTrue: 0, SkipFalse: 16}, - // load flags and fragment offset (2 bytes at offset 20) to ignore fragmented packet - bpf.LoadAbsolute{Off: 20, Size: 2}, - // Only look at the last 13 bits of the data saved in regiter A - // 0x1fff == 0001 1111 1111 1111 (fragment offset) - // If any of the data in fragment offset is true, ignore the packet - bpf.JumpIf{Cond: bpf.JumpBitsSet, Val: 0x1fff, SkipTrue: 14, SkipFalse: 0}, - // Load ip.length - // Register X = ip header len * 4 - bpf.LoadMemShift{Off: 14}, - // Load source port in tcp or udp (2 bytes at offset x+14) - bpf.LoadIndirect{Off: 14, Size: 2}, - // source port equal to 53 ? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: uint32(port), SkipTrue: 10, SkipFalse: 0}, - // Load destination port in tcp or udp (2 bytes at offset x+16) - bpf.LoadIndirect{Off: 16, Size: 2}, - // destination port equal to 53 ? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: uint32(port), SkipTrue: 8, SkipFalse: 9}, - // if eth.type == IPv6 continue with the next instruction - bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0x86dd, SkipTrue: 0, SkipFalse: 8}, - // Load ipv6.nxt (2 bytes at offset 12) and push-it in register A - bpf.LoadAbsolute{Off: 20, Size: 1}, - // ip.proto == UDP ? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0x11, SkipTrue: 1, SkipFalse: 0}, - // ip.proto == TCP ? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: 0x6, SkipTrue: 0, SkipFalse: 5}, - // Load source port tcp or udp (2 bytes at offset 54) - bpf.LoadAbsolute{Off: 54, Size: 2}, - // source port equal to 53 ? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: uint32(port), SkipTrue: 2, SkipFalse: 0}, - // Load destination port tcp or udp (2 bytes at offset 56) - bpf.LoadAbsolute{Off: 56, Size: 2}, - // destination port equal to 53 ? - bpf.JumpIf{Cond: bpf.JumpEqual, Val: uint32(port), SkipTrue: 0, SkipFalse: 1}, - // Keep the packet and send up to 65k of the packet to userspace - bpf.RetConstant{Val: 0xFFFF}, - // Ignore packet - bpf.RetConstant{Val: 0}, - } - return filter -} - -func ApplyBpfFilter(filter []bpf.Instruction, fd int) (err error) { - var assembled []bpf.RawInstruction - if assembled, err = bpf.Assemble(filter); err != nil { - return err - } - - prog := &unix.SockFprog{ - Len: uint16(len(assembled)), - Filter: (*unix.SockFilter)(unsafe.Pointer(&assembled[0])), - } - - return unix.SetsockoptSockFprog(fd, syscall.SOL_SOCKET, syscall.SO_ATTACH_FILTER, prog) -} - -func RemoveBpfFilter(fd int) (err error) { - return syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_DETACH_FILTER, 0) -} - -type AfpacketSniffer struct { - done chan bool - exit chan bool - fd int - identity string - defaultRoutes []pkgutils.Worker - droppedRoutes []pkgutils.Worker - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - name string -} - -func NewAfpacketSniffer(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *AfpacketSniffer { - logger.Info(pkgutils.PrefixLogCollector+"[%s] afpacket sniffer - enabled", name) - s := &AfpacketSniffer{ - done: make(chan bool), - exit: make(chan bool), - config: config, - configChan: make(chan *pkgconfig.Config), - defaultRoutes: loggers, - logger: logger, - name: name, - } - s.ReadConfig() - return s -} - -func (c *AfpacketSniffer) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogCollector+"["+c.name+"] afpacket sniffer- "+msg, v...) -} - -func (c *AfpacketSniffer) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogCollector+"["+c.name+"] afpacket sniffer - "+msg, v...) -} - -func (c *AfpacketSniffer) GetName() string { return c.name } - -func (c *AfpacketSniffer) AddDroppedRoute(wrk pkgutils.Worker) { - c.droppedRoutes = append(c.droppedRoutes, wrk) -} - -func (c *AfpacketSniffer) AddDefaultRoute(wrk pkgutils.Worker) { - c.defaultRoutes = append(c.defaultRoutes, wrk) -} - -func (c *AfpacketSniffer) SetLoggers(loggers []pkgutils.Worker) { - c.defaultRoutes = loggers -} - -func (c *AfpacketSniffer) Loggers() ([]chan dnsutils.DNSMessage, []string) { - return pkgutils.GetRoutes(c.defaultRoutes) -} - -func (c *AfpacketSniffer) ReadConfig() { - c.identity = c.config.GetServerIdentity() -} - -func (c *AfpacketSniffer) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration...") - c.configChan <- config -} - -func (c *AfpacketSniffer) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *AfpacketSniffer) Stop() { - c.LogInfo("stopping collector...") - - // exit to close properly - c.exit <- true - - // read done channel and block until run is terminated - <-c.done - close(c.done) -} - -func (c *AfpacketSniffer) Listen() error { - // raw socket - fd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, Htons(syscall.ETH_P_ALL)) - if err != nil { - return err - } - - // bind to device ? - if c.config.Collectors.AfpacketLiveCapture.Device != "" { - iface, err := net.InterfaceByName(c.config.Collectors.AfpacketLiveCapture.Device) - if err != nil { - return err - } - - ll := syscall.SockaddrLinklayer{ - Ifindex: iface.Index, - } - - if err := syscall.Bind(fd, &ll); err != nil { - return err - } - - c.LogInfo("binding with success to iface %q (index %d)", iface.Name, iface.Index) - } - - // set nano timestamp - err = syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_TIMESTAMPNS, 1) - if err != nil { - return err - } - - filter := GetBpfFilter(c.config.Collectors.AfpacketLiveCapture.Port) - err = ApplyBpfFilter(filter, fd) - if err != nil { - return err - } - - c.LogInfo("BPF filter applied") - - c.fd = fd - return nil -} - -func (c *AfpacketSniffer) Run() { - c.LogInfo("starting collector...") - defer RemoveBpfFilter(c.fd) - defer syscall.Close(c.fd) - - if c.fd == 0 { - if err := c.Listen(); err != nil { - c.LogError("init raw socket failed: %v\n", err) - os.Exit(1) // nolint - } - } - - dnsProcessor := processors.NewDNSProcessor( - c.config, - c.logger, - c.name, - c.config.Collectors.AfpacketLiveCapture.ChannelBufferSize, - ) - go dnsProcessor.Run(c.defaultRoutes, c.droppedRoutes) - - dnsChan := make(chan netlib.DNSPacket) - udpChan := make(chan gopacket.Packet) - tcpChan := make(chan gopacket.Packet) - fragIP4Chan := make(chan gopacket.Packet) - fragIP6Chan := make(chan gopacket.Packet) - - netDecoder := &netlib.NetDecoder{} - - // defrag ipv4 - go netlib.IPDefragger(fragIP4Chan, udpChan, tcpChan) - // defrag ipv6 - go netlib.IPDefragger(fragIP6Chan, udpChan, tcpChan) - // tcp assembly - go netlib.TCPAssembler(tcpChan, dnsChan, 0) - // udp processor - go netlib.UDPProcessor(udpChan, dnsChan, 0) - - // goroutine to read all packets reassembled - go func() { - // prepare dns message - dm := dnsutils.DNSMessage{} - - for { - select { - // new config provided? - case cfg, opened := <-c.configChan: - if !opened { - return - } - c.config = cfg - c.ReadConfig() - - // send the config to the dns processor - dnsProcessor.ConfigChan <- cfg - - // dns message to read ? - case dnsPacket := <-dnsChan: - // reset - dm.Init() - - dm.NetworkInfo.Family = dnsPacket.IPLayer.EndpointType().String() - dm.NetworkInfo.QueryIP = dnsPacket.IPLayer.Src().String() - dm.NetworkInfo.ResponseIP = dnsPacket.IPLayer.Dst().String() - dm.NetworkInfo.QueryPort = dnsPacket.TransportLayer.Src().String() - dm.NetworkInfo.ResponsePort = dnsPacket.TransportLayer.Dst().String() - dm.NetworkInfo.Protocol = dnsPacket.TransportLayer.EndpointType().String() - - dm.DNS.Payload = dnsPacket.Payload - dm.DNS.Length = len(dnsPacket.Payload) - - dm.DNSTap.Identity = c.identity - - timestamp := dnsPacket.Timestamp.UnixNano() - seconds := timestamp / int64(time.Second) - dm.DNSTap.TimeSec = int(seconds) - dm.DNSTap.TimeNsec = int(timestamp - seconds*int64(time.Second)*int64(time.Nanosecond)) - - // send DNS message to DNS processor - dnsProcessor.GetChannel() <- dm - } - } - }() - - go func() { - buf := make([]byte, 65536) - oob := make([]byte, 100) - - for { - // flags, from - bufN, oobn, _, _, err := syscall.Recvmsg(c.fd, buf, oob, 0) - if err != nil { - if errors.Is(err, syscall.EINTR) { - continue - } else { - panic(err) - } - } - if bufN == 0 { - panic("buf empty") - } - if bufN > len(buf) { - panic("buf overflow") - } - if oobn == 0 { - panic("oob missing") - } - - scms, err := syscall.ParseSocketControlMessage(oob[:oobn]) - if err != nil { - panic(err) - } - if len(scms) != 1 { - continue - } - scm := scms[0] - if scm.Header.Type != syscall.SCM_TIMESTAMPNS { - panic("scm timestampns missing") - } - tsec := binary.LittleEndian.Uint32(scm.Data[:4]) - nsec := binary.LittleEndian.Uint32(scm.Data[8:12]) - timestamp := time.Unix(int64(tsec), int64(nsec)) - - // copy packet data from buffer - pkt := make([]byte, bufN) - copy(pkt, buf[:bufN]) - - // decode minimal layers - packet := gopacket.NewPacket(pkt, netDecoder, gopacket.NoCopy) - packet.Metadata().CaptureLength = len(packet.Data()) - packet.Metadata().Length = len(packet.Data()) - packet.Metadata().Timestamp = timestamp - - // some security checks - if packet.NetworkLayer() == nil { - continue - } - if packet.TransportLayer() == nil { - continue - } - - // ipv4 fragmented packet ? - if packet.NetworkLayer().LayerType() == layers.LayerTypeIPv4 { - ip4 := packet.NetworkLayer().(*layers.IPv4) - if ip4.Flags&layers.IPv4MoreFragments == 1 || ip4.FragOffset > 0 { - fragIP4Chan <- packet - continue - } - } - - // ipv6 fragmented packet ? - if packet.NetworkLayer().LayerType() == layers.LayerTypeIPv6 { - v6frag := packet.Layer(layers.LayerTypeIPv6Fragment) - if v6frag != nil { - fragIP6Chan <- packet - continue - } - } - - // tcp or udp packets ? - if packet.TransportLayer().LayerType() == layers.LayerTypeUDP { - udpChan <- packet - } - - if packet.TransportLayer().LayerType() == layers.LayerTypeTCP { - tcpChan <- packet - } - } - - }() - - <-c.exit - close(dnsChan) - close(c.configChan) - - // stop dns processor - dnsProcessor.Stop() - - c.LogInfo("run terminated") - c.done <- true -} diff --git a/collectors/sniffer_afpacket_darwin.go b/collectors/sniffer_afpacket_darwin.go deleted file mode 100644 index fff10b02..00000000 --- a/collectors/sniffer_afpacket_darwin.go +++ /dev/null @@ -1,85 +0,0 @@ -//go:build darwin -// +build darwin - -package collectors - -import ( - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-logger" -) - -type AfpacketSniffer struct { - done chan bool - exit chan bool - defaultRoutes []pkgutils.Worker - config *pkgconfig.Config - logger *logger.Logger - name string -} - -// workaround for macos, not yet supported -func NewAfpacketSniffer(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *AfpacketSniffer { - logger.Info(pkgutils.PrefixLogCollector+"[%s] AFPACKET sniffer - enabled", name) - s := &AfpacketSniffer{ - done: make(chan bool), - exit: make(chan bool), - config: config, - defaultRoutes: loggers, - logger: logger, - name: name, - } - s.ReadConfig() - return s -} - -func (c *AfpacketSniffer) GetName() string { return c.name } - -func (c *AfpacketSniffer) AddDroppedRoute(wrk pkgutils.Worker) { - // TODO -} - -func (c *AfpacketSniffer) AddDefaultRoute(wrk pkgutils.Worker) { - c.defaultRoutes = append(c.defaultRoutes, wrk) -} - -func (c *AfpacketSniffer) SetLoggers(loggers []pkgutils.Worker) { - c.defaultRoutes = loggers -} - -func (c *AfpacketSniffer) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogCollector+"["+c.name+"] dnssniffer - "+msg, v...) -} - -func (c *AfpacketSniffer) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogCollector+"["+c.name+"] dnssniffer - "+msg, v...) -} - -func (c *AfpacketSniffer) Loggers() ([]chan dnsutils.DNSMessage, []string) { - return pkgutils.GetRoutes(c.defaultRoutes) -} - -func (c *AfpacketSniffer) ReadConfig() {} - -func (c *AfpacketSniffer) ReloadConfig(config *pkgconfig.Config) {} - -func (c *AfpacketSniffer) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *AfpacketSniffer) Stop() { - c.LogInfo("stopping collector...") - - // exit to close properly - c.exit <- true - - // read done channel and block until run is terminated - <-c.done - close(c.done) -} - -func (c *AfpacketSniffer) Run() { - c.LogInfo("run terminated") - c.done <- true -} diff --git a/collectors/sniffer_afpacket_freebsd.go b/collectors/sniffer_afpacket_freebsd.go deleted file mode 100644 index d19453b4..00000000 --- a/collectors/sniffer_afpacket_freebsd.go +++ /dev/null @@ -1,85 +0,0 @@ -//go:build freebsd -// +build freebsd - -package collectors - -import ( - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-logger" -) - -type AfpacketSniffer struct { - done chan bool - exit chan bool - defaultRoutes []pkgutils.Worker - config *pkgconfig.Config - logger *logger.Logger - name string -} - -// workaround for freebsd, not yet supported -func NewAfpacketSniffer(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *AfpacketSniffer { - logger.Info(pkgutils.PrefixLogCollector+"[%s] AFPACKET sniffer - enabled", name) - s := &AfpacketSniffer{ - done: make(chan bool), - exit: make(chan bool), - config: config, - defaultRoutes: loggers, - logger: logger, - name: name, - } - s.ReadConfig() - return s -} - -func (c *AfpacketSniffer) GetName() string { return c.name } - -func (c *AfpacketSniffer) AddDroppedRoute(wrk pkgutils.Worker) { - // TODO -} - -func (c *AfpacketSniffer) AddDefaultRoute(wrk pkgutils.Worker) { - c.defaultRoutes = append(c.defaultRoutes, wrk) -} - -func (c *AfpacketSniffer) SetLoggers(loggers []pkgutils.Worker) { - c.defaultRoutes = loggers -} - -func (c *AfpacketSniffer) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogCollector+"["+c.name+"] AFPACKET sniffer - "+msg, v...) -} - -func (c *AfpacketSniffer) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogCollector+"["+c.name+"] AFPACKET sniffer - "+msg, v...) -} - -func (c *AfpacketSniffer) Loggers() ([]chan dnsutils.DNSMessage, []string) { - return pkgutils.GetRoutes(c.defaultRoutes) -} - -func (c *AfpacketSniffer) ReadConfig() {} - -func (c *AfpacketSniffer) ReloadConfig(config *pkgconfig.Config) {} - -func (c *AfpacketSniffer) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *AfpacketSniffer) Stop() { - c.LogInfo("stopping collector...") - - // exit to close properly - c.exit <- true - - // read done channel and block until run is terminated - <-c.done - close(c.done) -} - -func (c *AfpacketSniffer) Run() { - c.LogInfo("Not supported") - c.done <- true -} diff --git a/collectors/sniffer_afpacket_windows.go b/collectors/sniffer_afpacket_windows.go deleted file mode 100644 index 8fea3720..00000000 --- a/collectors/sniffer_afpacket_windows.go +++ /dev/null @@ -1,85 +0,0 @@ -//go:build windows -// +build windows - -package collectors - -import ( - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-logger" -) - -type AfpacketSniffer struct { - done chan bool - exit chan bool - defaultRoutes []pkgutils.Worker - config *pkgconfig.Config - logger *logger.Logger - name string -} - -// workaround for macos, not yet supported -func NewAfpacketSniffer(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *AfpacketSniffer { - logger.Info(pkgutils.PrefixLogCollector+"[%s] AFPACKET sniffer - enabled", name) - s := &AfpacketSniffer{ - done: make(chan bool), - exit: make(chan bool), - config: config, - defaultRoutes: loggers, - logger: logger, - name: name, - } - s.ReadConfig() - return s -} - -func (c *AfpacketSniffer) GetName() string { return c.name } - -func (c *AfpacketSniffer) AddDroppedRoute(wrk pkgutils.Worker) { - // TODO -} - -func (c *AfpacketSniffer) AddDefaultRoute(wrk pkgutils.Worker) { - c.defaultRoutes = append(c.defaultRoutes, wrk) -} - -func (c *AfpacketSniffer) SetLoggers(loggers []pkgutils.Worker) { - c.defaultRoutes = loggers -} - -func (c *AfpacketSniffer) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogCollector+"["+c.name+"] AFPACKET sniffer - "+msg, v...) -} - -func (c *AfpacketSniffer) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogCollector+"["+c.name+"] AFPACKET sniffer - "+msg, v...) -} - -func (c *AfpacketSniffer) Loggers() ([]chan dnsutils.DNSMessage, []string) { - return pkgutils.GetRoutes(c.defaultRoutes) -} - -func (c *AfpacketSniffer) ReadConfig() {} - -func (c *AfpacketSniffer) ReloadConfig(config *pkgconfig.Config) {} - -func (c *AfpacketSniffer) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *AfpacketSniffer) Stop() { - c.LogInfo("stopping collector...") - - // exit to close properly - c.exit <- true - - // read done channel and block until run is terminated - <-c.done - close(c.done) -} - -func (c *AfpacketSniffer) Run() { - c.LogInfo("Not supported") - c.done <- true -} diff --git a/collectors/sniffer_xdp.go b/collectors/sniffer_xdp.go deleted file mode 100644 index e41c9b58..00000000 --- a/collectors/sniffer_xdp.go +++ /dev/null @@ -1,280 +0,0 @@ -//go:build linux || darwin || freebsd -// +build linux darwin freebsd - -package collectors - -import ( - "bytes" - "encoding/binary" - "fmt" - "net" - "os" - "time" - - "github.com/cilium/ebpf/link" - "github.com/cilium/ebpf/perf" - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" - "github.com/dmachard/go-dnscollector/xdp" - "github.com/dmachard/go-logger" - "golang.org/x/sys/unix" -) - -func GetIPAddress[T uint32 | [4]uint32](ip T, mapper func(T) net.IP) net.IP { - return mapper(ip) -} - -func ConvertIP4(ip uint32) net.IP { - addr := make(net.IP, net.IPv4len) - binary.BigEndian.PutUint32(addr, ip) - return addr -} - -func ConvertIP6(ip [4]uint32) net.IP { - addr := make(net.IP, net.IPv6len) - binary.LittleEndian.PutUint32(addr[0:], ip[0]) - binary.LittleEndian.PutUint32(addr[4:], ip[1]) - binary.LittleEndian.PutUint32(addr[8:], ip[2]) - binary.LittleEndian.PutUint32(addr[12:], ip[3]) - return addr -} - -type XDPSniffer struct { - done chan bool - exit chan bool - identity string - defaultRoutes []pkgutils.Worker - droppedRoutes []pkgutils.Worker - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - name string -} - -func NewXDPSniffer(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *XDPSniffer { - logger.Info(pkgutils.PrefixLogCollector+"[%s] xdp sniffer - enabled", name) - s := &XDPSniffer{ - done: make(chan bool), - exit: make(chan bool), - config: config, - configChan: make(chan *pkgconfig.Config), - defaultRoutes: loggers, - logger: logger, - name: name, - } - s.ReadConfig() - return s -} - -func (c *XDPSniffer) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogCollector+"["+c.name+"] xdp sniffer - "+msg, v...) -} - -func (c *XDPSniffer) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogCollector+"["+c.name+"] xdp sniffer - "+msg, v...) -} - -func (c *XDPSniffer) GetName() string { return c.name } - -func (c *XDPSniffer) AddDroppedRoute(wrk pkgutils.Worker) { - c.droppedRoutes = append(c.droppedRoutes, wrk) -} - -func (c *XDPSniffer) AddDefaultRoute(wrk pkgutils.Worker) { - c.defaultRoutes = append(c.defaultRoutes, wrk) -} - -func (c *XDPSniffer) SetLoggers(loggers []pkgutils.Worker) { - c.defaultRoutes = loggers -} - -func (c *XDPSniffer) Loggers() ([]chan dnsutils.DNSMessage, []string) { - return pkgutils.GetRoutes(c.defaultRoutes) -} - -func (c *XDPSniffer) ReadConfig() { - c.identity = c.config.GetServerIdentity() -} - -func (c *XDPSniffer) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration...") - c.configChan <- config -} - -func (c *XDPSniffer) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *XDPSniffer) Stop() { - c.LogInfo("stopping collector...") - - // exit to close properly - c.exit <- true - - // read done channel and block until run is terminated - <-c.done - close(c.done) -} - -func (c *XDPSniffer) Run() { - c.LogInfo("starting collector...") - - dnsProcessor := processors.NewDNSProcessor( - c.config, - c.logger, - c.name, - c.config.Collectors.XdpLiveCapture.ChannelBufferSize, - ) - go dnsProcessor.Run(c.defaultRoutes, c.droppedRoutes) - - iface, err := net.InterfaceByName(c.config.Collectors.XdpLiveCapture.Device) - if err != nil { - c.LogError("lookup network iface: %s", err) - os.Exit(1) - } - - // Load pre-compiled programs into the kernel. - objs := xdp.BpfObjects{} - if err := xdp.LoadBpfObjects(&objs, nil); err != nil { - c.LogError("loading BPF objects: %s", err) - os.Exit(1) - } - defer objs.Close() - - // Attach the program. - l, err := link.AttachXDP(link.XDPOptions{ - Program: objs.XdpSniffer, - Interface: iface.Index, - }) - if err != nil { - c.LogError("could not attach XDP program: %s", err) - os.Exit(1) // nolint - } - defer l.Close() - - c.LogInfo("XDP program attached to iface %q (index %d)", iface.Name, iface.Index) - - perfEvent, err := perf.NewReader(objs.Pkts, 1<<24) - if err != nil { - panic(err) - } - - dnsChan := make(chan dnsutils.DNSMessage) - - // goroutine to read all packets reassembled - go func() { - for { - select { - // new config provided? - case cfg, opened := <-c.configChan: - if !opened { - return - } - c.config = cfg - c.ReadConfig() - - // send the config to the dns processor - dnsProcessor.ConfigChan <- cfg - - // dns message to read ? - case dm := <-dnsChan: - - // update identity with config ? - dm.DNSTap.Identity = c.identity - - dnsProcessor.GetChannel() <- dm - - } - } - }() - - go func() { - var pkt xdp.BpfPktEvent - for { - // The data submitted via bpf_perf_event_output. - record, err := perfEvent.Read() - if err != nil { - c.LogError("BPF reading map: %s", err) - break - } - - if record.LostSamples != 0 { - c.LogError("BPF dump: Dropped %d samples from kernel perf buffer", record.LostSamples) - continue - } - - reader := bytes.NewReader(record.RawSample) - if err := binary.Read(reader, binary.LittleEndian, &pkt); err != nil { - c.LogError("BPF reading sample: %s", err) - break - } - - // adjust arrival time - timenow := time.Now().UTC() - var ts unix.Timespec - unix.ClockGettime(unix.CLOCK_MONOTONIC, &ts) - elapsed := time.Since(timenow) * time.Nanosecond - delta3 := time.Duration(uint64(unix.TimespecToNsec(ts))-pkt.Timestamp) * time.Nanosecond - tsAdjusted := timenow.Add(-(delta3 + elapsed)) - - // convert ip - var saddr, daddr net.IP - if pkt.IpVersion == 0x0800 { - saddr = GetIPAddress(pkt.SrcAddr, ConvertIP4) - daddr = GetIPAddress(pkt.DstAddr, ConvertIP4) - } else { - saddr = GetIPAddress(pkt.SrcAddr6, ConvertIP6) - daddr = GetIPAddress(pkt.DstAddr6, ConvertIP6) - } - - // prepare DnsMessage - dm := dnsutils.DNSMessage{} - dm.Init() - - dm.DNSTap.TimeSec = int(tsAdjusted.Unix()) - dm.DNSTap.TimeNsec = int(tsAdjusted.UnixNano() - tsAdjusted.Unix()*1e9) - - if pkt.SrcPort == 53 { - dm.DNSTap.Operation = dnsutils.DNSTapClientResponse - } else { - dm.DNSTap.Operation = dnsutils.DNSTapClientQuery - } - - dm.NetworkInfo.QueryIP = saddr.String() - dm.NetworkInfo.QueryPort = fmt.Sprint(pkt.SrcPort) - dm.NetworkInfo.ResponseIP = daddr.String() - dm.NetworkInfo.ResponsePort = fmt.Sprint(pkt.DstPort) - - if pkt.IpVersion == 0x0800 { - dm.NetworkInfo.Family = netlib.ProtoIPv4 - } else { - dm.NetworkInfo.Family = netlib.ProtoIPv6 - } - - if pkt.IpProto == 0x11 { - dm.NetworkInfo.Protocol = netlib.ProtoUDP - dm.DNS.Payload = record.RawSample[int(pkt.PktOffset)+int(pkt.PayloadOffset):] - dm.DNS.Length = len(dm.DNS.Payload) - } else { - dm.NetworkInfo.Protocol = netlib.ProtoTCP - dm.DNS.Payload = record.RawSample[int(pkt.PktOffset)+int(pkt.PayloadOffset)+2:] - dm.DNS.Length = len(dm.DNS.Payload) - } - - dnsChan <- dm - } - }() - - <-c.exit - close(dnsChan) - close(c.configChan) - - // stop dns processor - dnsProcessor.Stop() - - c.LogInfo("run terminated") - c.done <- true -} diff --git a/collectors/sniffer_xdp_windows.go b/collectors/sniffer_xdp_windows.go deleted file mode 100644 index 7cb4008e..00000000 --- a/collectors/sniffer_xdp_windows.go +++ /dev/null @@ -1,84 +0,0 @@ -//go:build windows -// +build windows - -package collectors - -import ( - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-logger" -) - -type XDPSniffer struct { - done chan bool - exit chan bool - identity string - defaultRoutes []pkgutils.Worker - config *pkgconfig.Config - logger *logger.Logger - name string -} - -func NewXDPSniffer(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *XDPSniffer { - logger.Info(pkgutils.PrefixLogCollector+"[%s] xdp sniffer enabled", name) - s := &XDPSniffer{ - done: make(chan bool), - exit: make(chan bool), - config: config, - defaultRoutes: loggers, - logger: logger, - name: name, - } - s.ReadConfig() - return s -} - -func (c *XDPSniffer) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogCollector+"["+c.name+"] XDP sniffer - "+msg, v...) -} - -func (c *XDPSniffer) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogCollector+"["+c.name+"] XDP sniffer - "+msg, v...) -} - -func (c *XDPSniffer) GetName() string { return c.name } - -func (c *XDPSniffer) AddDroppedRoute(wrk pkgutils.Worker) { - // TODO -} - -func (c *XDPSniffer) AddDefaultRoute(wrk pkgutils.Worker) { - c.defaultRoutes = append(c.defaultRoutes, wrk) -} - -func (c *XDPSniffer) SetLoggers(loggers []pkgutils.Worker) { - c.defaultRoutes = loggers -} - -func (c *XDPSniffer) Loggers() ([]chan dnsutils.DNSMessage, []string) { - return pkgutils.GetRoutes(c.defaultRoutes) -} - -func (c *XDPSniffer) ReadConfig() {} - -func (c *XDPSniffer) ReloadConfig(config *pkgconfig.Config) {} - -func (c *XDPSniffer) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *XDPSniffer) Stop() { - c.LogInfo("stopping collector...") - - // exit to close properly - c.exit <- true - - // read done channel and block until run is terminated - <-c.done - close(c.done) -} -func (c *XDPSniffer) Run() { - c.LogInfo("Not supported") - c.done <- true -} diff --git a/collectors/tzsp.go b/collectors/tzsp.go deleted file mode 100644 index 7cdf484a..00000000 --- a/collectors/tzsp.go +++ /dev/null @@ -1,267 +0,0 @@ -//go:build linux -// +build linux - -// Written by Noel Kuntze - -package collectors - -import ( - "encoding/binary" - "fmt" - "net" - "syscall" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" - "github.com/dmachard/go-logger" - "github.com/google/gopacket" - "github.com/google/gopacket/layers" - "github.com/rs/tzsp" -) - -type TZSPSniffer struct { - done chan bool - exit chan bool - listen net.UDPConn - defaultRoutes []pkgutils.Worker - droppedRoutes []pkgutils.Worker - config *pkgconfig.Config - logger *logger.Logger - name string - identity string -} - -func NewTZSP(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *TZSPSniffer { - logger.Info(pkgutils.PrefixLogCollector+"[%s] tzsp - enabled", name) - s := &TZSPSniffer{ - done: make(chan bool), - exit: make(chan bool), - config: config, - defaultRoutes: loggers, - logger: logger, - name: name, - } - s.ReadConfig() - return s -} - -func (c *TZSPSniffer) GetName() string { return c.name } - -func (c *TZSPSniffer) AddDroppedRoute(wrk pkgutils.Worker) { - c.droppedRoutes = append(c.droppedRoutes, wrk) -} - -func (c *TZSPSniffer) AddDefaultRoute(wrk pkgutils.Worker) { - c.defaultRoutes = append(c.defaultRoutes, wrk) -} - -func (c *TZSPSniffer) SetLoggers(loggers []pkgutils.Worker) { - c.defaultRoutes = loggers -} - -func (c *TZSPSniffer) Loggers() ([]chan dnsutils.DNSMessage, []string) { - return pkgutils.GetRoutes(c.defaultRoutes) -} - -func (c *TZSPSniffer) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogCollector+"["+c.name+"] tzsp "+msg, v...) -} - -func (c *TZSPSniffer) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogCollector+"["+c.name+"] tzsp - "+msg, v...) -} - -func (c *TZSPSniffer) ReadConfig() { - c.identity = c.config.GetServerIdentity() -} - -func (c *TZSPSniffer) ReloadConfig(config *pkgconfig.Config) { - // TODO implement reload configuration -} - -func (c *TZSPSniffer) Listen() error { - c.logger.Info("running in background...") - - ServerAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", c.config.Collectors.Tzsp.ListenIP, c.config.Collectors.Tzsp.ListenPort)) - if err != nil { - return err - } - - ServerConn, err := net.ListenUDP("udp", ServerAddr) - if err != nil { - return err - } - file, err := ServerConn.File() - - if err != nil { - return err - } - - err = syscall.SetsockoptInt(int(file.Fd()), syscall.SOL_SOCKET, syscall.SO_TIMESTAMPNS, 1) - - if err != nil { - return err - } - c.LogInfo("is listening on %s", ServerConn.LocalAddr()) - c.listen = *ServerConn - return nil -} - -func (c *TZSPSniffer) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *TZSPSniffer) Stop() { - c.LogInfo("stopping collector...") - - // Finally close the listener to unblock accept - c.exit <- true - - // read done channel and block until run is terminated - <-c.done - close(c.done) -} - -func (c *TZSPSniffer) Run() { - c.logger.Info("starting collector...") - - if err := c.Listen(); err != nil { - c.logger.Fatal("collector=tzsp listening failed: ", err) - } - - dnsProcessor := processors.NewDNSProcessor(c.config, c.logger, c.name, c.config.Collectors.Tzsp.ChannelBufferSize) - go dnsProcessor.Run(c.defaultRoutes, c.droppedRoutes) - - go func() { - buf := make([]byte, 65536) - oob := make([]byte, 100) - for { - // flags, from - bufN, oobn, _, _, err := c.listen.ReadMsgUDPAddrPort(buf, oob) - if err != nil { - panic(err) - } - if bufN == 0 { - panic("buf empty") - } - if bufN > len(buf) { - panic("buf overflow") - } - if oobn == 0 { - panic("oob missing") - } - c.LogInfo("Packet received") - scms, err := syscall.ParseSocketControlMessage(oob[:oobn]) - if err != nil { - panic(err) - } - if len(scms) != 1 { - c.LogInfo("len(scms) != 1") - continue - } - scm := scms[0] - if scm.Header.Type != syscall.SCM_TIMESTAMPNS { - panic("scm timestampns missing") - } - tsec := binary.LittleEndian.Uint32(scm.Data[:4]) - nsec := binary.LittleEndian.Uint32(scm.Data[8:12]) - - // copy packet data from buffer - pkt := make([]byte, bufN) - copy(pkt, buf[:bufN]) - - tzspPacket, err := tzsp.Parse(pkt) - - if err != nil { - c.LogError("Failed to parse packet: ", err) - continue - } - - var eth layers.Ethernet - var ip4 layers.IPv4 - var ip6 layers.IPv6 - var tcp layers.TCP - var udp layers.UDP - parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip4, &ip6, &tcp, &udp) - decodedLayers := make([]gopacket.LayerType, 0, 4) - - // decode-it - parser.DecodeLayers(tzspPacket.Data, &decodedLayers) - - dm := dnsutils.DNSMessage{} - dm.Init() - - ignorePacket := false - for _, layertyp := range decodedLayers { - switch layertyp { - case layers.LayerTypeIPv4: - dm.NetworkInfo.Family = netlib.ProtoIPv4 - dm.NetworkInfo.QueryIP = ip4.SrcIP.String() - dm.NetworkInfo.ResponseIP = ip4.DstIP.String() - - case layers.LayerTypeIPv6: - dm.NetworkInfo.QueryIP = ip6.SrcIP.String() - dm.NetworkInfo.ResponseIP = ip6.DstIP.String() - dm.NetworkInfo.Family = netlib.ProtoIPv6 - - case layers.LayerTypeUDP: - dm.NetworkInfo.QueryPort = fmt.Sprint(int(udp.SrcPort)) - dm.NetworkInfo.ResponsePort = fmt.Sprint(int(udp.DstPort)) - dm.DNS.Payload = udp.Payload - dm.DNS.Length = len(udp.Payload) - dm.NetworkInfo.Protocol = netlib.ProtoUDP - - case layers.LayerTypeTCP: - // ignore SYN/ACK packet - // Note: disabled because SYN/SYN+Ack might contain data if TCP Fast open is used - // if !tcp.PSH { - // ignore_packet = true - // continue - // } - if len(tcp.Payload) < 12 { - // packet way too short; 12 byte is the minimum size a DNS packet (header only, - // no questions, answers, authorities, or additional RRs) - continue - } - dnsLengthField := binary.BigEndian.Uint16(tcp.Payload[0:2]) - if len(tcp.Payload) < int(dnsLengthField) { - ignorePacket = true - continue - } - - dm.NetworkInfo.QueryPort = fmt.Sprint(int(tcp.SrcPort)) - dm.NetworkInfo.ResponsePort = fmt.Sprint(int(tcp.DstPort)) - dm.DNS.Payload = tcp.Payload[2:] - dm.DNS.Length = len(tcp.Payload[2:]) - dm.NetworkInfo.Protocol = netlib.ProtoTCP - } - } - - if !ignorePacket { - dm.DNSTap.Identity = c.identity - - // set timestamp - dm.DNSTap.TimeSec = int(tsec) - dm.DNSTap.TimeNsec = int(nsec) - - // just decode QR - if len(dm.DNS.Payload) < 4 { - continue - } - - dnsProcessor.GetChannel() <- dm - } - } - }() - - <-c.exit - - // stop dns processor - dnsProcessor.Stop() - - c.LogInfo("run terminated") - c.done <- true -} diff --git a/collectors/tzsp_darwin.go b/collectors/tzsp_darwin.go deleted file mode 100644 index f59ffa03..00000000 --- a/collectors/tzsp_darwin.go +++ /dev/null @@ -1,85 +0,0 @@ -//go:build darwin -// +build darwin - -package collectors - -import ( - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-logger" -) - -type TZSPSniffer struct { - done chan bool - exit chan bool - defaultRoutes []pkgutils.Worker - config *pkgconfig.Config - logger *logger.Logger - name string -} - -// workaround for macos, not yet supported -func NewTZSP(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *TZSPSniffer { - logger.Info(pkgutils.PrefixLogCollector+"[%s] tzsp - enabled", name) - s := &TZSPSniffer{ - done: make(chan bool), - exit: make(chan bool), - config: config, - defaultRoutes: loggers, - logger: logger, - name: name, - } - s.ReadConfig() - return s -} - -func (c *TZSPSniffer) GetName() string { return c.name } - -func (c *TZSPSniffer) AddDroppedRoute(wrk pkgutils.Worker) { - // TODO -} - -func (c *TZSPSniffer) AddDefaultRoute(wrk pkgutils.Worker) { - c.defaultRoutes = append(c.defaultRoutes, wrk) -} - -func (c *TZSPSniffer) SetLoggers(loggers []pkgutils.Worker) { - c.defaultRoutes = loggers -} - -func (c *TZSPSniffer) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogCollector+"["+c.name+"] tzsp - "+msg, v...) -} - -func (c *TZSPSniffer) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogCollector+"["+c.name+"] tzsp - "+msg, v...) -} - -func (c *TZSPSniffer) Loggers() ([]chan dnsutils.DNSMessage, []string) { - return pkgutils.GetRoutes(c.defaultRoutes) -} - -func (c *TZSPSniffer) ReadConfig() {} - -func (c *TZSPSniffer) ReloadConfig(config *pkgconfig.Config) {} - -func (c *TZSPSniffer) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *TZSPSniffer) Stop() { - c.LogInfo("stopping collector...") - - // exit to close properly - c.exit <- true - - // read done channel and block until run is terminated - <-c.done - close(c.done) -} - -func (c *TZSPSniffer) Run() { - c.LogInfo("run terminated") - c.done <- true -} diff --git a/collectors/tzsp_freebsd.go b/collectors/tzsp_freebsd.go deleted file mode 100644 index eaa3e6e7..00000000 --- a/collectors/tzsp_freebsd.go +++ /dev/null @@ -1,85 +0,0 @@ -//go:build freebsd -// +build freebsd - -package collectors - -import ( - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-logger" -) - -type TZSPSniffer struct { - done chan bool - exit chan bool - defaultRoutes []pkgutils.Worker - config *pkgconfig.Config - logger *logger.Logger - name string -} - -// workaround for macos, not yet supported -func NewTZSP(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *TZSPSniffer { - logger.Info(pkgutils.PrefixLogCollector+"[%s] tzsp - enabled", name) - s := &TZSPSniffer{ - done: make(chan bool), - exit: make(chan bool), - config: config, - defaultRoutes: loggers, - logger: logger, - name: name, - } - s.ReadConfig() - return s -} - -func (c *TZSPSniffer) GetName() string { return c.name } - -func (c *TZSPSniffer) AddDroppedRoute(wrk pkgutils.Worker) { - // TODO -} - -func (c *TZSPSniffer) AddDefaultRoute(wrk pkgutils.Worker) { - c.defaultRoutes = append(c.defaultRoutes, wrk) -} - -func (c *TZSPSniffer) SetLoggers(loggers []pkgutils.Worker) { - c.defaultRoutes = loggers -} - -func (c *TZSPSniffer) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogCollector+"["+c.name+"] tzsp - "+msg, v...) -} - -func (c *TZSPSniffer) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogCollector+"["+c.name+"] tzsp - "+msg, v...) -} - -func (c *TZSPSniffer) Loggers() ([]chan dnsutils.DNSMessage, []string) { - return pkgutils.GetRoutes(c.defaultRoutes) -} - -func (c *TZSPSniffer) ReadConfig() {} - -func (c *TZSPSniffer) ReloadConfig(config *pkgconfig.Config) {} - -func (c *TZSPSniffer) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *TZSPSniffer) Stop() { - c.LogInfo("stopping collector...") - - // exit to close properly - c.exit <- true - - // read done channel and block until run is terminated - <-c.done - close(c.done) -} - -func (c *TZSPSniffer) Run() { - c.LogInfo("run terminated") - c.done <- true -} diff --git a/collectors/tzsp_windows.go b/collectors/tzsp_windows.go deleted file mode 100644 index f0a91e17..00000000 --- a/collectors/tzsp_windows.go +++ /dev/null @@ -1,85 +0,0 @@ -//go:build windows -// +build windows - -package collectors - -import ( - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-logger" -) - -type TZSPSniffer struct { - done chan bool - exit chan bool - defaultRoutes []pkgutils.Worker - config *pkgconfig.Config - logger *logger.Logger - name string -} - -// workaround for macos, not yet supported -func NewTZSP(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *TZSPSniffer { - logger.Info(pkgutils.PrefixLogCollector+"[%s] tzsp - enabled", name) - s := &TZSPSniffer{ - done: make(chan bool), - exit: make(chan bool), - config: config, - defaultRoutes: loggers, - logger: logger, - name: name, - } - s.ReadConfig() - return s -} - -func (c *TZSPSniffer) GetName() string { return c.name } - -func (c *TZSPSniffer) AddDroppedRoute(wrk pkgutils.Worker) { - // TODO -} - -func (c *TZSPSniffer) AddDefaultRoute(wrk pkgutils.Worker) { - c.defaultRoutes = append(c.defaultRoutes, wrk) -} - -func (c *TZSPSniffer) SetLoggers(loggers []pkgutils.Worker) { - c.defaultRoutes = loggers -} - -func (c *TZSPSniffer) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogCollector+"["+c.name+"] tzsp - "+msg, v...) -} - -func (c *TZSPSniffer) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogCollector+"["+c.name+"] tzsp - "+msg, v...) -} - -func (c *TZSPSniffer) Loggers() ([]chan dnsutils.DNSMessage, []string) { - return pkgutils.GetRoutes(c.defaultRoutes) -} - -func (c *TZSPSniffer) ReadConfig() {} - -func (c *TZSPSniffer) ReloadConfig(config *pkgconfig.Config) {} - -func (c *TZSPSniffer) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *TZSPSniffer) Stop() { - c.LogInfo("stopping collector...") - - // exit to close properly - c.exit <- true - - // read done channel and block until run is terminated - <-c.done - close(c.done) -} - -func (c *TZSPSniffer) Run() { - c.LogInfo("run terminated") - c.done <- true -} diff --git a/config.yml b/config.yml index ca356deb..8dda8063 100644 --- a/config.yml +++ b/config.yml @@ -1,915 +1,72 @@ ################################################ # global configuration +# more details: https://github.com/dmachard/go-dnscollector/blob/main/docs/configuration.md#global ################################################ global: - # If turned on, log some applications messages trace: - # debug informations verbose: true - # log malformed packet - # log-malformed: false - # # filename is the file to write logs to. - # filename: "" - # # maximum size in megabytes of the log file it gets rotated - # max-size: 10 - # # maximum number of old log files to retain - # max-backups: 10 - - # Set the server identity name - # comment the following line to use the hostname server-identity: "dns-collector" - - # default directives for text format output - # - timestamp-rfc3339ns: timestamp rfc3339 format, with nano support - # - timestamp-unixms: unix timestamp with ms support - # - timestamp-unixus: unix timestamp with us support - # - timestamp-unixns: unix timestamp with nano support - # - localtime: local time - # - identity: dnstap identity - # - version: dnstap version - # - extra: dnstap extra as string - # - operation: dnstap operation - # - opcode: dns opcode (integer) - # - rcode: dns return code - # - queryip: dns query ip - # - queryport: dns query port - # - responseip: dns response ip - # - responseport: dns response port - # - id: dns id - # - family: ip protocol version INET or INET6 - # - protocol: protocol UDP, TCP - # - length: the length of the query or reply in bytes - # - length-unit: the length of the query or reply in bytes with unit - # - qtype: dns qtype - # - qname: dns qname - # - latency: computed latency between queries and replies - # - answercount: the number of answer - # - ttl: answer ttl, only the first one value - # - answer: rdata answer, only the first one, prefer to use the JSON format if you wamt all answers - # - malformed: malformed dns packet, integer value 1/0 - # - qr: query or reply flag, string value Q/R - # - tc: truncated flag - # - aa: authoritative answer - # - ra: recursion available - # - ad: authenticated data - # - edns-csubnet: client subnet - # - df: ip defragmentation flag - # - tr: tcp reassembled flag text-format: "timestamp-rfc3339ns identity operation rcode queryip queryport family protocol length-unit qname qtype latency" - # default text field delimiter text-format-delimiter: " " - # default text field boundary text-format-boundary: "\"" - -# create your dns collector, please refer bellow to see the list -# of supported collectors, loggers and transformers -multiplexer: - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - transforms: - normalize: - qname-lowercase: false - - loggers: - - name: console - stdout: - mode: text - - routes: - - from: [ tap ] - to: [ console ] - -# /!\ experimental, pipelling running mode /!\ -# pipelines: -# - name: main-input -# dnstap: -# listen-ip: 0.0.0.0 -# listen-port: 6000 -# routing-policy: -# default: [ filter ] - -# - name: second-input -# dnstap: -# listen-ip: 0.0.0.0 -# listen-port: 6002 -# extended-support: true -# routing-policy: -# default: [ console ] - -# - name: filter -# dnsmessage: -# matching: -# include: -# dns.qname: "^.*\\.google\\.com$" -# transforms: -# atags: -# tags: [ "google"] -# routing-policy: -# dropped: [ outputfile ] -# default: [ central ] - -# - name: central -# dnstapclient: -# transport: tcp -# remote-address: 127.0.0.1 -# remote-port: 6002 -# flush-interval: 5 -# extended-support: true - -# - name: console -# stdout: -# mode: flat-json - -# - name: outputfile -# logfile: -# file-path: "/tmp/dnstap.log" -# max-size: 1000 -# max-files: 10 -# mode: text + pid-file: "" + worker: + interval-monitor: 10 + buffer-size: 4096 + telemetry: + enabled: true + web-path: "/metrics" + web-listen: ":9165" + prometheus-prefix: "dnscollector_exporter" + tls-support: false + tls-cert-file: "" + tls-key-file: "" + client-ca-file: "" + basic-auth-enable: false + basic-auth-login: admin + basic-auth-pwd: changeme ################################################ -# list of supported collectors +# Pipelining configuration +# more details: https://github.com/dmachard/go-dnscollector/blob/main/docs/running_mode.md#pipelining +# workers: https://github.com/dmachard/go-dnscollector/blob/main/docs/workers.md +# transformers: https://github.com/dmachard/go-dnscollector/blob/main/docs/transformers.md ################################################ - -# # dnstap standard -# dnstap: -# # listen on ip -# listen-ip: 0.0.0.0 -# # listening on port -# listen-port: 6000 -# # unix socket path -# sock-path: null -# # tls support -# tls-support: false -# # tls min version -# tls-min-version: 1.2 -# # certificate server file -# cert-file: "" -# # private key server file -# key-file: "" -# # Sets the socket receive buffer in bytes SO_RCVBUF, set to zero to use the default system value -# sock-rcvbuf: 0 -# # Reset TCP connection on exit -# reset-conn: true -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 -# # Disable the minimalist DNS parser -# disable-dnsparser: true -# # Decode the extended extra field sent by DNScollector -# extended-support: false - -# # dnstap proxifier with no protobuf decoding. -# dnstap-relay: -# # listen on ip -# listen-ip: 0.0.0.0 -# # listening on port -# listen-port: 6000 -# # unix socket path -# sock-path: null -# # tls support -# tls-support: false -# # tls min version -# tls-min-version: 1.2 -# # certificate server file -# cert-file: "" -# # private key server file -# key-file: "" - -# # live capture with AF_PACKET -# afpacket-sniffer: -# # filter on source and destination port -# port: 53 -# # if "" bind on all interfaces -# device: wlp2s0 -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 - -# # live capture with XDP -# xdp-sniffer: -# # bind on device -# device: wlp2s0 -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 - -# # ingest pcap file -# file-ingestor: -# # directory to watch for pcap files to ingest -# watch-dir: /tmp -# # watch the directory pcap file with *.pcap extension or dnstap stream with *.fstrm extension -# # watch mode: pcap|dnstap -# watch-mode: pcap -# # filter only on source and destination port -# pcap-dns-port: 53 -# # delete pcap file after ingest -# delete-after: false -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 - -# # read text file -# tail: -# # file to follow -# file-path: null -# # Use the exact layout numbers described https://golang.org/src/time/format.go -# time-layout: "2006-01-02T15:04:05.999999999Z07:00" -# # regexp pattern for queries -# # example for unbound: "query: (?P[^ ]*) (?P[^ ]*) (?P[^ ]*)" -# pattern-query: "^(?P[^ ]*) (?P[^ ]*) (?P.*_QUERY) (?P[^ ]*) -# (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) -# (?P[^ ]*)b (?P[^ ]*) (?P[^ ]*) (?P[^ ]*)$" -# # regexp pattern for replies -# # example for unbound: "reply: (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) IN (?P[^ ]*) (?P[^ ]*)" -# pattern-reply: "^(?P[^ ]*) (?P[^ ]*) (?P.*_RESPONSE) (?P[^ ]*) -# (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*)b -# (?P[^ ]*) (?P[^ ]*) (?P[^ ]*)$" - -# # protobuf powerdns -# # The text format can be customized with the following additionnals directives: -# # - powerdns-tags[:INDEX]: get all tags separated by comma or one tag at provided index -# # - powerdns-original-request-client: powerdns metadata, get edns subclient -# # - powerdns-applied-policy: powerdns metadata, get applied policy -# # - powerdns-metadata[:KEY]: get all metadata separated by comma or specific one if a valid [KEY] is provided -# powerdns: -# # listen on ip -# listen-ip: 0.0.0.0 -# # listening on port -# listen-port: 6001 -# # tls support -# tls-support: false -# # tls min version -# tls-min-version: 1.2 -# # certificate server file -# cert-file: "" -# # private key server file -# key-file: "" -# # Reset TCP connection on exit -# reset-conn: true -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 -# # Reconstruct DNS payload -# add-dns-payload: false - -# # tzsp (TaZmen Sniffer Protocol) -# tzsp: -# # listen on ip -# listen-ip: 0.0.0.0 -# # listen on port -# listen-port: 10000 -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + transforms: + normalize: + qname-lowercase: true + routing-policy: + forward: [ console ] + dropped: [ ] + + - name: console + stdout: + mode: text ################################################ -# list of supported loggers +# DEPRECATED - multiplexer configuration +# more details: https://github.com/dmachard/go-dnscollector/blob/main/docs/running_mode.md#multiplexer +# workers: https://github.com/dmachard/go-dnscollector/blob/main/docs/workers.md +# transformers: https://github.com/dmachard/go-dnscollector/blob/main/docs/transformers.md ################################################ - -# # print received dns traffic to stdout -# stdout: -# # output format: text|json|flat-json|pcap -# mode: text -# # output text format, please refer to the top of this file to see all available directives -# text-format: "timestamp-rfc3339ns identity operation rcode queryip queryport family protocol length qname qtype latency" -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 - -# # rest api server -# restapi: -# # listening IP -# listen-ip: 0.0.0.0 -# # listening port -# listen-port: 8080 -# # default login -# basic-auth-login: admin -# # default password -# basic-auth-pwd: changeme -# # tls support -# tls-support: false -# # certificate server file -# cert-file: "" -# # private key server file -# key-file: "" -# # default number of items on top -# top-n: 100 -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 - -# # prometheus metrics server -# prometheus: -# # listening IP -# listen-ip: 0.0.0.0 -# # listening port -# listen-port: 8081 -# # default login -# basic-auth-login: admin -# # default password -# basic-auth-pwd: changeme -# # enable basic authentication -# basic-auth-enable: true -# # tls support -# tls-support: false -# # tls mutual -# tls-mutual: false -# # tls min version -# tls-min-version: 1.2 -# # certificate server file -# cert-file: "" -# # private key server file -# key-file: "" -# # prometheus prefix -# prometheus-prefix: "dnscollector" -# # default number of items on top -# top-n: 10 -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 -# # compute histogram for qnames length, latencies, queries and replies size repartition -# histogram-metrics-enabled: false -# # compute requesters metrics - total and top requesters -# requesters-metrics-enabled: true -# # compute domains metrics - total and top domains -# domains-metrics-enabled: true -# # compute NOERROR domains metrics - total and top domains -# noerror-metrics-enabled: true -# # compute NOERROR domains metrics - total and top domains -# servfail-metrics-enabled: true -# # compute NXDOMAIN domains metrics - total and top domains -# nonexistent-metrics-enabled: true -# # compute TIMEOUT domains metrics - total and top domains -# timeout-metrics-enabled: true -# # prometheus-labels: (list of strings) labels to add to metrics. Currently supported labels: stream_id, resolver, stream_global -# prometheus-labels: ["stream_id"] -# # LRU (least-recently-used) cache size for observed clients DNS -# requesters-cache-size: 250000 -# # maximum time (in seconds) before eviction from the LRU cache -# requesters-cache-ttl: 3600 -# # LRU (least-recently-used) cache size for observed domains -# domains-cache-size: 500000 -# # maximum time (in seconds) before eviction from the LRU cache -# domains-cache-ttl: 3600 -# # LRU (least-recently-used) cache size for observed NOERROR domains -# noerror-domains-cache-size: 500000 -# # maximum time (in seconds) before eviction from the LRU cache -# noerror-domains-cache-ttl: 3600 -# # LRU (least-recently-used) cache size for observed SERVFAIL domains -# servfail-domains-cache-size: 500000 -# # maximum time (in seconds) before eviction from the LRU cache -# servfail-domains-cache-ttl: 3600 -# # LRU (least-recently-used) cache size for observed NX domains -# nonexistent-domains-cache-size: 500000 -# # maximum time (in seconds) before eviction from the LRU cache -# nonexistent-domains-cache-ttl: 3600 -# # LRU (least-recently-used) cache size for observed other domains (suspicious, tlds, ...) -# default-domains-cache-size: 500000 -# # maximum time (in seconds) before eviction from the LRU cache -# default-domains-cache-ttl: 3600 - -# # write captured dns traffic to text or binary files with rotation and compression support -# logfile: -# # output logfile name -# file-path: /tmp/test.log -# # maximum size in megabytes of the file before rotation -# # A minimum of max-size*max-files megabytes of space disk must be available -# max-size: 100 -# # maximum number of files to retain. -# # Set to zero if you want to disable this feature -# max-files: 10 -# # flush buffer to log file every X seconds -# flush-interval: 10 -# # compress log file -# compress: false -# # compress interval -# # checking every X seconds if new log files must be compressed -# compress-interval: 5 -# # run external script after each file compress step -# compress-postcommand: null -# # output format: text|json|pcap|dnstap|flat-json -# mode: text -# # output text format, please refer to the top of this file to see all available directives -# text-format: "timestamp-rfc3339ns identity operation rcode queryip queryport family protocol length qname qtype latency" -# # run external script after each file rotation -# postrotate-command: null -# # delete file on script success -# postrotate-delete-success: true -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 - -# # resend captured dns traffic to another dnstap collector or to unix socket -# dnstapclient: -# # network transport to use: unix|tcp|tcp+tls -# transport: tcp -# # remote address -# remote-address: 10.0.0.1 -# # remote tcp port -# remote-port: 6000 -# # connect timeout -# connect-timeout: 5 -# # interval in second between retry reconnect -# retry-interval: 10 -# # interval in second before to flush the buffer -# flush-interval: 30 -# # insecure skip verify -# tls-insecure: false -# # tls min version -# tls-min-version: 1.2 -# # provide CA file to verify the server certificate -# ca-file: "" -# # provide client certificate file for mTLS -# cert-file: "" -# # provide client private key file for mTLS -# key-file: "" -# # server identity, if empty use the global one or hostname -# server-id: "dnscollector" -# # overwrite original identity -# overwrite-identity: false -# # number of dns messages in buffer -# buffer-size: 100 -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 -# # Extend the DNStap message by incorporating additional transformations, such as filtering and ATags, into the extra field. -# extended-support: false - -# # resend captured dns traffic to a tcp remote destination or to unix socket -# tcpclient: -# # network transport to use: unix|tcp|tcp+tls -# transport: tcp -# # remote address or unix socket path -# remote-address: 127.0.0.1 -# # remote tcp port -# remote-port: 9999 -# # connect timeout -# connect-timeout: 5 -# # interval in second between retry reconnect -# retry-interval: 10 -# # interval in second before to flush the buffer -# flush-interval: 30 -# # insecure skip verify -# tls-insecure: false -# # tls min version -# tls-min-version: 1.2 -# # trusted certificate file -# ca-file: "" -# # provide client certificate file for mTLS -# cert-file: "" -# # provide client private key file for mTLS -# key-file: "" -# # output format: text|json|flat-json -# mode: flat-json -# # output text format, please refer to the top of this file to see all available directives -# text-format: "timestamp-rfc3339ns identity operation rcode queryip queryport family protocol length qname qtype latency" -# # delimiter to use between payload sent -# delimiter: "\n" -# # how many DNS messages will be buffered before being sent -# buffer-size: 100 -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 - -# # Send captured traffic to a redis channel, mapped on TCP client logger options -# redispub: -# # output format: text|json|flat-json -# mode: flat-json -# # remote address -# remote-address: 127.0.0.1 -# # remote tcp port -# remote-port: 6379 -# # connect timeout -# connect-timeout: 5 -# retry-interval: 10 -# flush-interval: 2 -# # enable insecure tls -# tls-insecure: false -# # trusted certificate file -# ca-file: "" -# # provide client certificate file for mTLS -# cert-file: "" -# # provide client private key file for mTLS -# key-file: "" -# # output text format, please refer to the top of this file to see all available directives -# text-format: "timestamp-rfc3339ns identity operation rcode queryip queryport family protocol length qname qtype latency" -# delimiter: "\n" -# # how many DNS messages will be buffered before being sent -# buffer-size: 100 -# # Name of the channel to publish into -# redis-channel: dns-collector -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 - -# # redirect captured dns traffic to a remote syslog server or local one -# syslog: -# # Set the syslog logging severity -# severity: INFO -# # Set the syslog logging facility -# facility: DAEMON -# # Transport to use to a remote log daemon or local one -# # local|tcp|udp|unix or tcp+tls -# transport: local -# # Remote address host:port -# remote-address: "" -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 -# # interval in second between retry reconnect -# retry-interval: 10 -# # output text format, please refer to the top of this file to see all available directives -# text-format: "timestamp-rfc3339ns identity operation rcode queryip queryport family protocol length qname qtype latency" -# # output format: text|json|flat-json -# mode: text -# # insecure mode, skip certificate verify -# tls-insecure: false -# # tls min version -# tls-min-version: 1.2 -# # provide CA file to verify the server certificate -# ca-file: "" -# # provide client certificate file for mTLS -# cert-file: "" -# # provide client private key file for mTLS -# key-file: "" -# # set syslog formatter between unix, rfc3164 (default) or rfc5424 -# formatter: "rfc3164" -# # set syslog framer: `none` or `rfc5425` -# framer: "none" -# # set syslog hostname -# hostname: "" -# # set syslog program name -# app-name: "" -# # Syslog tag or MSGID -# tag: "" -# # Replace NULl char in Qname with the specified character -# replace-null-char: � -# # how many DNS messages will be buffered before being sent -# buffer-size: 100 -# # interval in second before to flush the buffer -# flush-interval: 30 - -# # elasticsearch backend, basic support -# elasticsearch: -# # remote server url -# server: "http://127.0.0.1:9200/" -# # Elasticsearch index for ingestion -# index: "indexname" -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 -# # Size of batches sent to ES via _bulk -# bulk-size: 100 -# # interval in seconds before to flush the buffer -# flush-interval: 30 - -# # resend captured dns traffic to a remote fluentd server or to unix socket -# fluentd: -# # network transport to use: tcp|unix|tcp+tls -# transport: tcp -# # remote address -# remote-address: 127.0.0.1 -# # remote tcp port -# remote-port: 24224 -# # connect timeout in seconds -# connect-timeout: 5 -# # interval in second between retry reconnect -# retry-interval: 10 -# # interval in second before to flush the buffer -# flush-interval: 30 -# # tag name -# tag: "dns.collector" -# # insecure tls, skip certificate and hostname verify -# tls-insecure: false -# # tls min version -# tls-min-version: 1.2 -# # provide CA file to verify the server certificate -# ca-file: "" -# # provide client certificate file for mTLS -# cert-file: "" -# # provide client private key file for mTLS -# key-file: "" -# # how many DNS messages will be buffered before being sent -# buffer-size: 100 -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 - -# # resend captured dns traffic to a InfluxDB database -# influxdb: -# # InfluxDB server url -# server-url: "http://localhost:8086" -# # authentication token -# auth-token: "" -# # enable tls -# tls-support: false -# # insecure skip verify -# tls-insecure: false -# # tls min version -# tls-min-version: 1.2 -# # provide CA file to verify the server certificate -# ca-file: "" -# # provide client certificate file for mTLS -# cert-file: "" -# # provide client private key file for mTLS -# key-file: "" -# # bucket -# bucket: "dns" -# # Organization -# organization: "dnscollector" -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 - -# # resend captured dns traffic to a Loki Server -# lokiclient: -# # Loki server url -# server-url: "http://lokiwriter.home.lab/loki/api/v1/push" -# # Job name -# job-name: "dnscollector" -# # output format: text|json|flat-json -# mode: text -# # flush batch every X seconds -# flush-interval: 5 -# # batch size for log entries in bytes -# batch-size: 1048576 -# # interval in second between before to retry to send log entries -# retry-interval: 10 -# # output text format, please refer to the default text format to see all available directives -# # use this parameter if you want a specific format -# text-format: "localtime identity qr queryip family protocol qname qtype rcode" -# # Proxy URL -# proxy-url: "" -# # insecure skip verify -# tls-insecure: false -# # tls min version -# tls-min-version: 1.2 -# # provide CA file to verify the server certificate -# ca-file: "" -# # provide client certificate file for mTLS -# cert-file: "" -# # provide client private key file for mTLS -# key-file: "" -# # basic auth login -# basic-auth-login: "" -# # basic auth password -# basic-auth-pwd: "" -# # path to a file containing the basic auth password -# basic-auth-pwd-file: "" -# # tenant/organisation id. If omitted or empty, no X-Scope-OrgID header is sent. -# tenant-id: "" -# # Describes how to relabel targets. -# # Usage very similar to https://grafana.com/docs/loki/latest/clients/promtail/configuration/#relabel_configs. -# # Labels are accessible by prefixing with `__` and using the key name as used -# # when outputting in the flat-json mode with `.` replaced by `_`. -# relabel-configs: -# - source_labels: ["__dns_qtype"] -# target_label: "qtype" -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 - -# # forward to statsd proxy -# statsd: -# # network transport to use: udp|tcp|tcp+tls -# transport: udp -# # remote address -# remote-address: 127.0.0.1 -# # remote tcp port -# remote-port: 8125 -# # connect timeout in seconds -# connect-timeout: 5 -# # insecure tls, skip certificate verify -# tls-insecure: false -# # provide CA file to verify the server certificate -# ca-file: "" -# # provide client certificate file for mTLS -# cert-file: "" -# # provide client private key file for mTLS -# key-file: "" -# # prefix -# prefix: "dnscollector" -# # flush every X seconds -# flush-interval: 10 -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 - -# # Send captured traffic to Scalyr/dataset.com -# # Uses the api/addEvents endpoint, see https://app.eu.scalyr.com/help/api#addEvents -# scalyrclient: -# # output format: text|json|flat-json -# mode: text -# # output text format, please refer to the top of this file to see all available directives -# text-format: "timestamp-rfc3339ns identity operation rcode queryip queryport family protocol length qname qtype latency" -# # Any "session" information for the Scalyr backend. By default, "serverHost" is set to the hostname of the machine -# sessioninfo: {} -# # Any arbitrary attributes for the logs that are sent -# attrs: {} -# # Hostname where the endpoint resides -# server-url: app.scalyr.com -# # API Token with Write permissions, required! -# apikey: "" -# # When using json and text mode, the parser Scalyr should use, required -# parser: "" -# # How often to flush logs, in seconds -# flush-interval: 30 -# # Proxy URL -# proxy-url: "" -# # insecure skip verify -# tls-insecure: false -# # tls min version -# tls-min-version: 1.2 -# ca-file: "" -# # provide client certificate file for mTLS -# cert-file: "" -# # provide client private key file for mTLS -# key-file: "" -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 - -# # resend captured dns traffic to a kafka sink -# kafkaproducer: -# # remote address -# remote-address: 127.0.0.1 -# # remote tcp port -# remote-port: 9092 -# # connect timeout -# connect-timeout: 5 -# # interval in second between retry reconnect -# retry-interval: 10 -# # interval in second before to flush the buffer -# flush-interval: 30 -# # enable tls -# tls-support: false -# # insecure skip verify -# tls-insecure: false -# # enable SASL -# sasl-support: false -# # SASL mechanism: PLAIN|SCRAM-SHA-512 -# sasl-mechanism: PLAIN -# # SASL username -# sasl-username: false -# # SASL password -# sasl-password: false -# # output format: text|json|flat-json -# mode: flat-json -# # how many DNS messages will be buffered before being sent -# buffer-size: 100 -# # Kafka topic to forward messages to -# topic: "dnscollector" -# # Kafka partition -# partition: 0 -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 -# # Compression for Kafka messages: none, gzip, lz4, snappy, zstd -# compression: none - -# # Send captured traffic to falco (https://falco.org/), for security and advanced inspection -# falco: -# # remote falco plugin endpoint -# url: "http://127.0.0.1:9200" -# # Channel buffer size for incoming packets, number of packet before to drop it. -# chan-buffer-size: 65535 - -################################################ -# list of transforms to apply on collectors or loggers -################################################ - -# # Use this transformer to add base64 dns payload in JSON ouput -# # additionnals directive for text format -# # - extracted-dns-payload: dns payload encoded in base64 -# extract: -# # enable payload base64 encoding -# add-payload: true - -# # Use this transformer to detect trafic duplication -# # additionnals directive for text format -# # - reducer-occurences: number of occurences detected -# # - cumulative-length: sum of the length of each occurences -# reducer: -# # enable detector -# repetitive-traffic-detector: true -# # limit to qname+1 instead of the complete qname to detect repetition -# qname-plus-one: false -# # watch interval in seconds -# watch-interval: 5 - -# # Use this transformer to compute latency and detect timeout on queries -# # additionnals directive for text format -# # - computed-latency: computed latency between queries and replies -# latency: -# # Measure latency between replies and queries -# measure-latency: false -# # Detect queries without replies -# unanswered-queries: false -# # timeout in second for queries -# queries-timeout: 2 - -# # Use this option to protect user privacy -# user-privacy: -# # IP-Addresses are anonymities by zeroing the host-part of an address. -# anonymize-ip: false -# # summarize IPv4 down to the /integer level, default is /16 -# anonymize-v4bits: "/8" -# # summarize IPv6 down to the /integer level, default is /64 -# anonymize-v6bits: "::/64" -# # Reduce Qname to second level only, for exemple mail.google.com be replaced by google.com -# minimaze-qname: false -# # Hashes the query and response IP with the specified algorithm. -# hash-ip: false -# # Algorithm to use for IP hashing, currently supported `sha1` (default), `sha256`, `sha512` -# hash-ip-algo: sha1 - -# # Use this option to add top level domain and tld+1, based on public suffix list https://publicsuffix.org/ -# # or convert all domain to lowercase -# # or enable quiet text in your logs -# # additionnals directive for text format -# # - publicsuffix-tld: tld -# # - publicsuffix-etld+1: effective tld plus one -# normalize: -# # Wwww.GooGlE.com will be equal to www.google.com -# qname-lowercase: true -# # add top level domain -# add-tld: false -# # add top level domain plus one label -# add-tld-plus-one: false -# # text will be replaced with the small form -# quiet-text: false - -# # filtering feature to ignore some specific qname -# # dns logs is not redirected to loggers if the filtering regexp matched -# # additionnals directive for text format -# # - filtering-sample-rate -# filtering: -# # path file of the fqdn drop list, domains list must be a full qualified domain name -# drop-fqdn-file: "" -# # path file of the domain drop list, domains list can be a partial domain name with regexp expression -# drop-domain-file: "" -# # path file of the fqdn keep list (all others are dropped), domains list must be a full qualified domain name -# keep-fqdn-file: "" -# # path file of the domain keep list (all others are dropped), domains list can be a partial domain name with regexp expression -# keep-domain-file: "" -# # path file of the query IP drop list, one IP address or subnet per line -# drop-queryip-file: "" -# # path file of the query IP keep list, one IP address or subnet per line -# keep-queryip-file: "" -# # drop specific responses according to the return code (NOERROR, ...). This list is empty by default -# # Example to ignore NOERROR dns packets -# # drop-rcodes: -# # - NOERROR -# keep-rdataip-file: "" -# # path file of the rdata IP keep list, one IP address or subnet per line -# drop-rcodes: [] -# # forward received queries to configured loggers ? -# log-queries: true -# # forward received replies to configured loggers ? -# log-replies: true -# # only keep 1 out of every downsample records, e.g. if set to 20, then this will return every 20th record, dropping 95% of queries -# downsample: 0 - -# # GeoIP maxmind support, more information on https://www.maxmind.com/en/geoip-demo -# # this feature can be used to append additional informations like country, city, asn -# # according to the query ip -# # additionnals directive for text format -# # - geoip-continent: continent code -# # - geoip-country: country iso code -# # - geoip-city: city name -# # - geoip-as-number: autonomous system number -# # - geoip-as-owner: autonomous system organization -# geoip: -# # path file to your mmdb country database -# mmdb-country-file: "" -# # path file to your mmdb city database -# mmdb-city-file: "" -# # path file to your mmdb ASN database -# mmdb-asn-file: "" - -# # this feature can be used to tag unusual dns traffic like long domain, large packets -# # additionnals directive for text format -# # - suspicious-score: suspicious score for unusual traffic -# suspicious: -# # a length greater than this value for qname will be considered as suspicious -# threshold-qname-len: 100 -# # a size greater than this value will be considered as suspicious in bytes -# threshold-packet-len: 1000 -# # threshold to set a domain considered as slow regarding latency, value in second -# threshold-slow: 1.0 -# # common qtypes list -# common-qtypes: [ "A", "AAAA", "CNAME", "TXT", "PTR", "NAPTR", "DNSKEY", "SRV", "SOA", "NS", "MX", "DS" ] -# # unallowed list of characters not acceptable in domain name -# unallowed-chars: [ "\"", "==", "/", ":" ] -# # maximum number of labels in domains name -# threshold-max-labels: 10 -# # to ignore some domains -# whitelist-domains: [ "\.ip6\.arpa" ] - -# # this feature can be used to add more text format directives for machine learning purpose -# # additionnals directive for text format -# # - ml-entropy -# # - ml-length -# # - ml-digits -# # - ml-lowers -# # - ml-uppers -# # - ml-specials -# # - ml-others -# # - ml-labels -# # - ml-ratio-digits -# # - ml-ratio-letters -# # - ml-ratio-specials -# # - ml-ratio-others -# # - ml-consecutive-chars -# # - ml-consecutive-vowels -# # - ml-consecutive-digits -# # - ml-consecutive-consonants -# # - ml-size -# # - ml-occurences -# # - ml-uncommon-qtypes -# machine-learning: -# # enable all features -# add-features: true \ No newline at end of file +# multiplexer: +# collectors: +# - name: tap +# dnstap: +# listen-ip: 0.0.0.0 +# listen-port: 6000 +# transforms: +# normalize: +# qname-lowercase: true +# loggers: +# - name: console +# stdout: +# mode: text +# routes: +# - from: [ tap ] +# to: [ console ] diff --git a/dnscollector.go b/dnscollector.go index 9f3fbd84..40691467 100644 --- a/dnscollector.go +++ b/dnscollector.go @@ -1,16 +1,20 @@ package main import ( + "context" "fmt" "os" "os/signal" + "strconv" "strings" "syscall" - "github.com/dmachard/go-dnscollector/dnsutils" + _ "net/http/pprof" + "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkglinker" - "github.com/dmachard/go-dnscollector/pkgutils" + "github.com/dmachard/go-dnscollector/pkginit" + "github.com/dmachard/go-dnscollector/telemetry" + "github.com/dmachard/go-dnscollector/workers" "github.com/dmachard/go-logger" "github.com/natefinch/lumberjack" "github.com/prometheus/common/version" @@ -44,6 +48,40 @@ func InitLogger(logger *logger.Logger, config *pkgconfig.Config) { logger.SetVerbose(config.Global.Trace.Verbose) } +func createPIDFile(pidFilePath string) (string, error) { + if _, err := os.Stat(pidFilePath); err == nil { + pidBytes, err := os.ReadFile(pidFilePath) + if err != nil { + return "", fmt.Errorf("failed to read PID file: %w", err) + } + + pid, err := strconv.Atoi(string(pidBytes)) + if err != nil { + return "", fmt.Errorf("invalid PID in PID file: %w", err) + } + + if process, err := os.FindProcess(pid); err == nil { + if err := process.Signal(syscall.Signal(0)); err == nil { + return "", fmt.Errorf("process with PID %d is already running", pid) + } + } + } + + pid := os.Getpid() + pidStr := strconv.Itoa(pid) + err := os.WriteFile(pidFilePath, []byte(pidStr), 0644) + if err != nil { + return "", fmt.Errorf("failed to write PID file: %w", err) + } + return pidStr, nil +} + +func removePIDFile(config *pkgconfig.Config) { + if config.Global.PidFile != "" { + os.Remove(config.Global.PidFile) + } +} + func main() { args := os.Args[1:] // Ignore the first argument (the program name) @@ -51,6 +89,11 @@ func main() { configPath := "./config.yml" testFlag := false + // Server for pprof + // go func() { + // fmt.Println(http.ListenAndServe("localhost:9999", nil)) + // }() + // no more use embedded golang flags... // external lib like tcpassembly can set some uneeded flags too... for i := 0; i < len(args); i++ { @@ -88,36 +131,52 @@ func main() { // create logger logger := logger.New(true) - // get DNSMessage flat model - dmRef := dnsutils.GetReferenceDNSMessage() - config, err := pkgutils.LoadConfig(configPath, dmRef) + // load config + config, err := pkgconfig.LoadConfig(configPath) if err != nil { - fmt.Printf("config error: %v\n", err) + fmt.Printf("main - config error: %v\n", err) os.Exit(1) } + // If PID file is specified in the config, create it + if config.Global.PidFile != "" { + pid, err := createPIDFile(config.Global.PidFile) + if err != nil { + fmt.Printf("main - PID file error: %v\n", err) + os.Exit(1) + } + logger.Info("main - write pid=%s to file=%s", pid, config.Global.PidFile) + } + // init logger InitLogger(logger, config) logger.Info("main - version=%s revision=%s", version.Version, version.Revision) - logger.Info("main - starting dns-collector...") + + // // telemetry + if config.Global.Telemetry.Enabled { + logger.Info("main - telemetry enabled on local address: %s", config.Global.Telemetry.WebListen) + } + promServer, metrics, errTelemetry := telemetry.InitTelemetryServer(config, logger) // init active collectors and loggers - mapLoggers := make(map[string]pkgutils.Worker) - mapCollectors := make(map[string]pkgutils.Worker) + mapLoggers := make(map[string]workers.Worker) + mapCollectors := make(map[string]workers.Worker) // running mode, // multiplexer ? - if pkglinker.IsMuxEnabled(config) { - logger.Info("main - multiplexer mode enabled") - pkglinker.InitMultiplexer(mapLoggers, mapCollectors, config, logger) + if pkginit.IsMuxEnabled(config) { + logger.Info("main - running in multiplexer mode") + logger.Warning("main - The multiplexer mode is deprecated. Please switch to the pipelines mode.") + pkginit.InitMultiplexer(mapLoggers, mapCollectors, config, logger) } // or pipeline ? - if len(config.Pipelines) > 0 { - logger.Info("main - pipelines mode enabled") - err := pkglinker.InitPipelines(mapLoggers, mapCollectors, config, logger) + if pkginit.IsPipelinesEnabled(config) { + logger.Info("main - running in pipeline mode") + err := pkginit.InitPipelines(mapLoggers, mapCollectors, config, logger, metrics) if err != nil { logger.Error("main - %s", err.Error()) + removePIDFile(config) os.Exit(1) } } @@ -132,27 +191,46 @@ func main() { go func() { for { select { + case err := <-errTelemetry: + logger.Error("main - unable to start telemetry: %v", err) + removePIDFile(config) + os.Exit(1) + case <-sigHUP: - logger.Info("main - SIGHUP received") + logger.Warning("main - SIGHUP received") // read config - err := pkgutils.ReloadConfig(configPath, config, dmRef) + err := pkgconfig.ReloadConfig(configPath, config) if err != nil { - panic(fmt.Sprintf("main - reload config error: %v", err)) + logger.Error("main - reload config error: %v", err) + removePIDFile(config) + os.Exit(1) } // reload logger and multiplexer InitLogger(logger, config) - if pkglinker.IsMuxEnabled(config) { - pkglinker.ReloadMultiplexer(mapLoggers, mapCollectors, config, logger) + if pkginit.IsMuxEnabled(config) { + pkginit.ReloadMultiplexer(mapLoggers, mapCollectors, config, logger) + } + if pkginit.IsPipelinesEnabled(config) { + pkginit.ReloadPipelines(mapLoggers, mapCollectors, config, logger) } case <-sigTerm: - logger.Info("main - exiting...") + logger.Warning("main - exiting...") - // stop all workers - logger.Info("main - stopping...") + // gracefully shutdown the HTTP server + if config.Global.Telemetry.Enabled { + logger.Info("main - telemetry is stopping") + metrics.Stop() + if err := promServer.Shutdown(context.Background()); err != nil { + logger.Error("main - telemetry error shutting down http server - %s", err.Error()) + } + + } + + // and stop all workers for _, c := range mapCollectors { c.Stop() } @@ -164,7 +242,6 @@ func main() { // unblock main function done <- true - os.Exit(0) } } }() @@ -172,21 +249,21 @@ func main() { if testFlag { // We've parsed the config and are ready to start, so the config is good enough logger.Info("main - config OK!") + removePIDFile(config) os.Exit(0) } // run all workers in background - logger.Info("main - running...") - for _, l := range mapLoggers { - go l.Run() + go l.StartCollect() } for _, c := range mapCollectors { - go c.Run() + go c.StartCollect() } // block main <-done + removePIDFile(config) logger.Info("main - stopped") } diff --git a/dnsutils/dns_parser.go b/dnsutils/dns_parser.go index 578042fc..1eda2f19 100644 --- a/dnsutils/dns_parser.go +++ b/dnsutils/dns_parser.go @@ -14,104 +14,34 @@ const DNSLen = 12 const UNKNOWN = "UNKNOWN" var ( + Class = map[int]string{1: "IN", 3: "CH", 4: "HS", 254: "NONE", 255: "ANY"} Rdatatypes = map[int]string{ - 0: "NONE", - 1: "A", - 2: "NS", - 3: "MD", - 4: "MF", - 5: "CNAME", - 6: "SOA", - 7: "MB", - 8: "MG", - 9: "MR", - 10: "NULL", - 11: "WKS", - 12: "PTR", - 13: "HINFO", - 14: "MINFO", - 15: "MX", - 16: "TXT", - 17: "RP", - 18: "AFSDB", - 19: "X25", - 20: "ISDN", - 21: "RT", - 22: "NSAP", - 23: "NSAP_PTR", - 24: "SIG", - 25: "KEY", - 26: "PX", - 27: "GPOS", - 28: "AAAA", - 29: "LOC", - 30: "NXT", - 33: "SRV", - 35: "NAPTR", - 36: "KX", - 37: "CERT", - 38: "A6", - 39: "DNAME", - 41: "OPT", - 42: "APL", - 43: "DS", - 44: "SSHFP", - 45: "IPSECKEY", - 46: "RRSIG", - 47: "NSEC", - 48: "DNSKEY", - 49: "DHCID", - 50: "NSEC3", - 51: "NSEC3PARAM", - 52: "TSLA", - 53: "SMIMEA", - 55: "HIP", - 56: "NINFO", - 59: "CDS", - 60: "CDNSKEY", - 61: "OPENPGPKEY", - 62: "CSYNC", - 64: "SVCB", - 65: "HTTPS", - 99: "SPF", - 103: "UNSPEC", - 108: "EUI48", - 109: "EUI64", - 249: "TKEY", - 250: "TSIG", - 251: "IXFR", - 252: "AXFR", - 253: "MAILB", - 254: "MAILA", - 255: "ANY", - 256: "URI", - 257: "CAA", - 258: "AVC", - 259: "AMTRELAY", - 32768: "TA", - 32769: "DLV", + 0: "NONE", 1: "A", 2: "NS", 3: "MD", + 4: "MF", 5: "CNAME", 6: "SOA", 7: "MB", + 8: "MG", 9: "MR", 10: "NULL", 11: "WKS", + 12: "PTR", 13: "HINFO", 14: "MINFO", 15: "MX", + 16: "TXT", 17: "RP", 18: "AFSDB", 19: "X25", + 20: "ISDN", 21: "RT", 22: "NSAP", 23: "NSAP_PTR", + 24: "SIG", 25: "KEY", 26: "PX", 27: "GPOS", + 28: "AAAA", 29: "LOC", 30: "NXT", 33: "SRV", + 35: "NAPTR", 36: "KX", 37: "CERT", 38: "A6", + 39: "DNAME", 41: "OPT", 42: "APL", 43: "DS", + 44: "SSHFP", 45: "IPSECKEY", 46: "RRSIG", 47: "NSEC", + 48: "DNSKEY", 49: "DHCID", 50: "NSEC3", 51: "NSEC3PARAM", + 52: "TSLA", 53: "SMIMEA", 55: "HIP", 56: "NINFO", + 59: "CDS", 60: "CDNSKEY", 61: "OPENPGPKEY", 62: "CSYNC", + 64: "SVCB", 65: "HTTPS", 99: "SPF", 103: "UNSPEC", + 108: "EUI48", 109: "EUI64", 249: "TKEY", 250: "TSIG", + 251: "IXFR", 252: "AXFR", 253: "MAILB", 254: "MAILA", + 255: "ANY", 256: "URI", 257: "CAA", 258: "AVC", + 259: "AMTRELAY", 32768: "TA", 32769: "DLV", } Rcodes = map[int]string{ - 0: "NOERROR", - 1: "FORMERR", - 2: "SERVFAIL", - 3: "NXDOMAIN", - 4: "NOIMP", - 5: "REFUSED", - 6: "YXDOMAIN", - 7: "YXRRSET", - 8: "NXRRSET", - 9: "NOTAUTH", - 10: "NOTZONE", - 11: "DSOTYPENI", - 16: "BADSIG", - 17: "BADKEY", - 18: "BADTIME", - 19: "BADMODE", - 20: "BADNAME", - 21: "BADALG", - 22: "BADTRUNC", - 23: "BADCOOKIE", + 0: "NOERROR", 1: "FORMERR", 2: "SERVFAIL", 3: "NXDOMAIN", 4: "NOIMP", + 5: "REFUSED", 6: "YXDOMAIN", 7: "YXRRSET", 8: "NXRRSET", 9: "NOTAUTH", + 10: "NOTZONE", 11: "DSOTYPENI", 16: "BADSIG", 17: "BADKEY", + 18: "BADTIME", 19: "BADMODE", 20: "BADNAME", 21: "BADALG", + 22: "BADTRUNC", 23: "BADCOOKIE", } ) @@ -124,6 +54,7 @@ var ErrDecodeDNSLabelTooShort = errors.New("malformed pkt, dns payload too short var ErrDecodeQuestionQtypeTooShort = errors.New("malformed pkt, not enough data to decode qtype") var ErrDecodeDNSAnswerTooShort = errors.New("malformed pkt, not enough data to decode answer") var ErrDecodeDNSAnswerRdataTooShort = errors.New("malformed pkt, not enough data to decode rdata answer") +var ErrDecodeQuestionQclassTooShort = errors.New("malformed pkt, not enough data to decode qclass") func RdatatypeToString(rrtype int) string { if value, ok := Rdatatypes[rrtype]; ok { @@ -139,6 +70,13 @@ func RcodeToString(rcode int) string { return UNKNOWN } +func ClassToString(class int) string { + if value, ok := Class[class]; ok { + return value + } + return UNKNOWN +} + // error returned if decoding of DNS packet payload fails. type decodingError struct { part string @@ -154,21 +92,9 @@ func (e *decodingError) Unwrap() error { } type DNSHeader struct { - ID int - Qr int - Opcode int - Aa int - Tc int - Rd int - Ra int - Z int - Ad int - Cd int - Rcode int - Qdcount int - Ancount int - Nscount int - Arcount int + ID, Qr, Opcode, Rcode int + Aa, Tc, Rd, Ra, Z, Ad, Cd int + Qdcount, Ancount, Nscount, Arcount int } /* @@ -273,7 +199,7 @@ func DecodePayload(dm *DNSMessage, header *DNSHeader, config *pkgconfig.Config) var payloadOffset int // decode DNS question if header.Qdcount > 0 { - dnsQname, dnsRRtype, offsetrr, err := DecodeQuestion(header.Qdcount, dm.DNS.Payload) + dnsQname, dnsRRtype, dnsQclass, offsetrr, err := DecodeQuestion(header.Qdcount, dm.DNS.Payload) if err != nil { dm.DNS.MalformedPacket = true return &decodingError{part: "query", err: err} @@ -281,6 +207,7 @@ func DecodePayload(dm *DNSMessage, header *DNSHeader, config *pkgconfig.Config) dm.DNS.Qname = dnsQname dm.DNS.Qtype = RdatatypeToString(dnsRRtype) + dm.DNS.Qclass = ClassToString(dnsQclass) payloadOffset = offsetrr } @@ -358,10 +285,11 @@ DNS QUESTION | QCLASS | +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ */ -func DecodeQuestion(qdcount int, payload []byte) (string, int, int, error) { +func DecodeQuestion(qdcount int, payload []byte) (string, int, int, int, error) { offset := DNSLen var qname string var qtype int + var qclass int for i := 0; i < qdcount; i++ { // the specification allows more than one query in DNS packet, @@ -373,18 +301,26 @@ func DecodeQuestion(qdcount int, payload []byte) (string, int, int, error) { // Decode QNAME qname, offset, err = ParseLabels(offset, payload) if err != nil { - return "", 0, 0, err + return "", 0, 0, 0, err } // decode QTYPE and support invalid packet, some abuser sends it... - if len(payload[offset:]) < 4 { - return "", 0, 0, ErrDecodeQuestionQtypeTooShort + if len(payload[offset:]) < 2 { + return "", 0, 0, 0, ErrDecodeQuestionQtypeTooShort } else { qtype = int(binary.BigEndian.Uint16(payload[offset : offset+2])) - offset += 4 + offset += 2 + } + + // decode QCLASS + if len(payload[offset:]) < 2 { + return "", 0, 0, 0, ErrDecodeQuestionQclassTooShort + } else { + qclass = int(binary.BigEndian.Uint16(payload[offset : offset+2])) + offset += 2 } } - return qname, qtype, offset, nil + return qname, qtype, qclass, offset, nil } /* @@ -461,7 +397,7 @@ func DecodeAnswer(ancount int, startOffset int, payload []byte) ([]DNSAnswer, in a := DNSAnswer{ Name: name, Rdatatype: rdatatype, - Class: int(class), + Class: ClassToString(int(class)), TTL: int(ttl), Rdata: parsed, } diff --git a/dnsutils/dns_parser_test.go b/dnsutils/dns_parser_test.go index a25ca701..9afb27a6 100644 --- a/dnsutils/dns_parser_test.go +++ b/dnsutils/dns_parser_test.go @@ -13,6 +13,23 @@ const ( TestQName = "dnstapcollector.test." ) +// Benchmark + +func BenchmarkDnsParseLabels(b *testing.B) { + payload := []byte{0x12, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x76, 0x69, + 0x74, 0x79, 0x2d, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x06, + 0x75, 0x62, 0x75, 0x6e, 0x74, 0x75, 0x03, 0x63, 0x6f, 0x6d, 0x00, + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, err := ParseLabels(0, payload) + if err != nil { + b.Fatalf("could not parse labels: %v\n", err) + } + } +} + +// Regular tests func TestRcodeValid(t *testing.T) { rcode := RcodeToString(0) if rcode != "NOERROR" { @@ -59,7 +76,11 @@ func TestDecodeQuestion(t *testing.T) { dm.SetQuestion(fqdn, dns.TypeA) payload, _ := dm.Pack() - qname, qtype, offsetRR, _ := DecodeQuestion(1, payload) + qname, qtype, qclass, offsetRR, _ := DecodeQuestion(1, payload) + if ClassToString(qclass) != "IN" { + t.Errorf("invalid qclass: %d", qclass) + } + if qname+"." != fqdn { t.Errorf("invalid qname: %s", qname) } @@ -90,13 +111,16 @@ func TestDecodeQuestion_Multiple(t *testing.T) { 0x00, 0x1c, 0x00, 0x01, } - qname, qtype, offset, err := DecodeQuestion(3, paylaod) + qname, qtype, qclass, offset, err := DecodeQuestion(3, paylaod) if err != nil { t.Errorf("unexpected error %v", err) } if qname != "c" || RdatatypeToString(qtype) != "AAAA" { t.Errorf("expected qname=C, type=AAAA, got qname=%s, type=%s", qname, RdatatypeToString(qtype)) } + if ClassToString(qclass) != "IN" { + t.Errorf("expected qclass=IN %s", ClassToString(qclass)) + } if offset != 33 { t.Errorf("expected resulting offset to be 33, got %d", offset) } @@ -120,7 +144,7 @@ func TestDecodeQuestion_Multiple_InvalidCount(t *testing.T) { 0x00, 0x1c, 0x00, 0x01, } - _, _, _, err := DecodeQuestion(4, paylaod) + _, _, _, _, err := DecodeQuestion(4, paylaod) if !errors.Is(err, ErrDecodeDNSLabelTooShort) { t.Errorf("bad error received: %v", err) } @@ -142,7 +166,7 @@ func TestDecodeAnswer_Ns(t *testing.T) { m.Ns = append(m.Ns, rrNs) payload, _ := m.Pack() - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) _, offsetRRns, _ := DecodeAnswer(len(dm.Answer), offsetRR, payload) nsAnswers, _, _ := DecodeAnswer(len(m.Ns), offsetRRns, payload) @@ -163,7 +187,7 @@ func TestDecodeAnswer(t *testing.T) { payload, _ := dm.Pack() - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) answer, _, _ := DecodeAnswer(len(dm.Answer), offsetRR, payload) if len(answer) != len(dm.Answer) { @@ -184,7 +208,7 @@ func TestDecodeRdataSVCB_alias(t *testing.T) { payload, _ := dm.Pack() - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) answer, _, _ := DecodeAnswer(len(dm.Answer), offsetRR, payload) if answer[0].Rdata != rdata { @@ -212,7 +236,7 @@ func TestDecodeRdataSVCB_params(t *testing.T) { rr1, _ := dns.NewRR(fmt.Sprintf("%s SVCB %s", fqdn, rdata)) dm.Answer = append(dm.Answer, rr1) payload, _ := dm.Pack() - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) answer, _, _ := DecodeAnswer(len(dm.Answer), offsetRR, payload) if answer[0].Rdata != rdata { t.Errorf("invalid decode for rdata SVCB, want %s, got: %s", rdata, answer[0].Rdata) @@ -234,7 +258,7 @@ func TestDecodeAnswer_QnameMinimized(t *testing.T) { 0xc0, 0x7e, 0x00, 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x09, 0x00, 0x04, 0x34, 0x71, 0xc3, 0x84, 0x00, 0x00, 0x29, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) _, _, err := DecodeAnswer(4, offsetRR, payload) if err != nil { t.Errorf("failed to decode valid dns packet with minimization") @@ -253,7 +277,7 @@ func TestDecodeRdataA(t *testing.T) { payload, _ := dm.Pack() - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) answer, _, _ := DecodeAnswer(len(dm.Answer), offsetRR, payload) if answer[0].Rdata != rdata { @@ -284,7 +308,7 @@ func TestDecodeRdataA_Short(t *testing.T) { // RDATA (1 byte too short for A record) 0x7f, 0x00, 0x00, } - _, _, offsetrr, err := DecodeQuestion(1, payload) + _, _, _, offsetrr, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("Unexpected error decoding question: %v", err) } @@ -307,7 +331,7 @@ func TestDecodeRdataAAAA(t *testing.T) { payload, _ := dm.Pack() - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) answer, _, _ := DecodeAnswer(len(dm.Answer), offsetRR, payload) if answer[0].Rdata != rdata { @@ -341,7 +365,7 @@ func TestDecodeRdataAAAA_Short(t *testing.T) { 0x00, 0x00, 0x00, } - _, _, offsetSetRR, err := DecodeQuestion(1, payload) + _, _, _, offsetSetRR, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("unexpected error while decoding question: %v", err) } @@ -363,7 +387,7 @@ func TestDecodeRdataCNAME(t *testing.T) { payload, _ := dm.Pack() - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) answer, _, _ := DecodeAnswer(len(dm.Answer), offsetRR, payload) if answer[0].Rdata != rdata { @@ -383,7 +407,7 @@ func TestDecodeRdataMX(t *testing.T) { payload, _ := dm.Pack() - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) answer, _, _ := DecodeAnswer(len(dm.Answer), offsetRR, payload) if answer[0].Rdata != rdata { @@ -415,7 +439,7 @@ func TestDecodeRdataMX_Short(t *testing.T) { // RDATA 0x00, } - _, _, offsetRR, err := DecodeQuestion(1, payload) + _, _, _, offsetRR, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("unexpected error while decoding question: %v", err) } @@ -450,7 +474,7 @@ func TestDecodeRdataMX_Minimal(t *testing.T) { // RDATA 0x00, 0x00, 0x00, } - _, _, offsetRR, err := DecodeQuestion(1, payload) + _, _, _, offsetRR, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("unexpected error while decoding question: %v", err) } @@ -476,7 +500,7 @@ func TestDecodeRdataSRV(t *testing.T) { payload, _ := dm.Pack() - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) answer, _, _ := DecodeAnswer(len(dm.Answer), offsetRR, payload) if answer[0].Rdata != rdata { @@ -513,7 +537,7 @@ func TestDecodeRdataSRV_Short(t *testing.T) { // missing port and target } - _, _, offsetRR, err := DecodeQuestion(1, payload) + _, _, _, offsetRR, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("unexpected error while decoding question: %v", err) } @@ -555,7 +579,7 @@ func TestDecodeRdataSRV_Minimal(t *testing.T) { 0x00, } - _, _, offsetRR, err := DecodeQuestion(1, payload) + _, _, _, offsetRR, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("unexpected error while decoding question: %v", err) } @@ -580,7 +604,7 @@ func TestDecodeRdataNS(t *testing.T) { payload, _ := dm.Pack() - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) answer, _, _ := DecodeAnswer(len(dm.Answer), offsetRR, payload) if answer[0].Rdata != rdata { @@ -600,7 +624,7 @@ func TestDecodeRdataTXT(t *testing.T) { payload, _ := dm.Pack() - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) answer, _, _ := DecodeAnswer(len(dm.Answer), offsetRR, payload) if answer[0].Rdata != rdata { @@ -632,7 +656,7 @@ func TestDecodeRdataTXT_Empty(t *testing.T) { // no data } - _, _, offsetRR, err := DecodeQuestion(1, payload) + _, _, _, offsetRR, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("unexpected error while decoding question: %v", err) } @@ -672,7 +696,7 @@ func TestDecodeRdataTXT_Short(t *testing.T) { // missing two bytes } - _, _, offsetRR, err := DecodeQuestion(1, payload) + _, _, _, offsetRR, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("unexpected error while decoding question: %v", err) } @@ -709,7 +733,7 @@ func TestDecodeRdataTXT_NoTxt(t *testing.T) { // no txt-data } - _, _, offsetRR, err := DecodeQuestion(1, payload) + _, _, _, offsetRR, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("unexpected error while decoding question: %v", err) } @@ -736,7 +760,7 @@ func TestDecodeRdataPTR(t *testing.T) { payload, _ := dm.Pack() - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) answer, _, _ := DecodeAnswer(len(dm.Answer), offsetRR, payload) if answer[0].Rdata != rdata { @@ -756,7 +780,7 @@ func TestDecodeRdataSOA(t *testing.T) { payload, _ := dm.Pack() - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) answer, _, _ := DecodeAnswer(len(dm.Answer), offsetRR, payload) if answer[0].Rdata != rdata { @@ -806,7 +830,7 @@ func TestDecodeRdataSOA_Short(t *testing.T) { // minimum -field missing from the RDATA } - _, _, offsetRR, err := DecodeQuestion(1, payload) + _, _, _, offsetRR, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("Unable to decode question: %v", err) } @@ -823,7 +847,7 @@ func TestDecodeRdataSOA_Minimization(t *testing.T) { 51, 3, 111, 118, 104, 3, 110, 101, 116, 0, 4, 116, 101, 99, 104, 192, 53, 120, 119, 219, 34, 0, 1, 81, 128, 0, 0, 14, 16, 0, 54, 238, 128, 0, 0, 0, 60} - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) _, _, err := DecodeAnswer(1, offsetRR, payload) if err != nil { t.Errorf(" error returned: %v", err) @@ -865,7 +889,7 @@ func TestDecodeQuestion_SkipOpt(t *testing.T) { // RDATA 0x7f, 0x00, 0x00, 0x01, } - _, _, offsetrr, err := DecodeQuestion(1, payload) + _, _, _, offsetrr, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("Unexpected error decoding question: %v", err) } @@ -892,7 +916,7 @@ func TestDecodeDns_HeaderTooShort(t *testing.T) { func TestDecodeDnsQuestion_InvalidOffset(t *testing.T) { decoded := []byte{183, 59, 130, 217, 128, 16, 0, 51, 165, 67, 0, 0} - _, _, _, err := DecodeQuestion(1, decoded) + _, _, _, _, err := DecodeQuestion(1, decoded) if !errors.Is(err, ErrDecodeDNSLabelTooShort) { t.Errorf("bad error returned: %v", err) } @@ -900,7 +924,7 @@ func TestDecodeDnsQuestion_InvalidOffset(t *testing.T) { func TestDecodeDnsQuestion_PacketTooShort(t *testing.T) { decoded := []byte{183, 59, 130, 217, 128, 16, 0, 51, 165, 67, 0, 0, 1, 1, 8, 10, 23} - _, _, _, err := DecodeQuestion(1, decoded) + _, _, _, _, err := DecodeQuestion(1, decoded) if !errors.Is(err, ErrDecodeDNSLabelTooShort) { t.Errorf("bad error returned: %v", err) } @@ -909,7 +933,7 @@ func TestDecodeDnsQuestion_PacketTooShort(t *testing.T) { func TestDecodeDnsQuestion_QtypeMissing(t *testing.T) { decoded := []byte{88, 27, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 15, 100, 110, 115, 116, 97, 112, 99, 111, 108, 108, 101, 99, 116, 111, 114, 4, 116, 101, 115, 116, 0} - _, _, _, err := DecodeQuestion(1, decoded) + _, _, _, _, err := DecodeQuestion(1, decoded) if !errors.Is(err, ErrDecodeQuestionQtypeTooShort) { t.Errorf("bad error returned: %v", err) } @@ -917,7 +941,7 @@ func TestDecodeDnsQuestion_QtypeMissing(t *testing.T) { func TestDecodeQuestion_InvalidPointer(t *testing.T) { decoded := []byte{88, 27, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 202} - _, _, _, err := DecodeQuestion(1, decoded) + _, _, _, _, err := DecodeQuestion(1, decoded) if !errors.Is(err, ErrDecodeDNSLabelTooShort) { t.Errorf("bad error returned: %v", err) } @@ -928,7 +952,7 @@ func TestDecodeDnsAnswer_PacketTooShort(t *testing.T) { 111, 114, 4, 116, 101, 115, 116, 0, 0, 1, 0, 1, 15, 100, 110, 115, 116, 97, 112, 99, 111, 108, 108, 101, 99, 116, 111, 114, 4, 116, 101, 115, 116, 0, 0, 1, 0, 1, 0, 0, 14, 16, 0} - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) _, _, err := DecodeAnswer(1, offsetRR, payload) if !errors.Is(err, ErrDecodeDNSAnswerTooShort) { t.Errorf("bad error returned: %v", err) @@ -1010,7 +1034,7 @@ func TestDecodeDnsAnswer_RdataTooShort(t *testing.T) { 111, 114, 4, 116, 101, 115, 116, 0, 0, 1, 0, 1, 15, 100, 110, 115, 116, 97, 112, 99, 111, 108, 108, 101, 99, 116, 111, 114, 4, 116, 101, 115, 116, 0, 0, 1, 0, 1, 0, 0, 14, 16, 0, 4, 127, 0} - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) _, _, err := DecodeAnswer(1, offsetRR, payload) if !errors.Is(err, ErrDecodeDNSAnswerRdataTooShort) { t.Errorf("bad error returned: %v", err) @@ -1022,7 +1046,7 @@ func TestDecodeDnsAnswer_InvalidPtr(t *testing.T) { 109, 99, 104, 100, 2, 109, 101, 0, 0, 1, 0, 1, 192, 254, 0, 1, 0, 1, 0, 0, 14, 16, 0, 4, 83, 112, 146, 176} - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) _, _, err := DecodeAnswer(1, offsetRR, payload) if !errors.Is(err, ErrDecodeDNSLabelInvalidPointer) { t.Errorf("bad error returned: %v", err) @@ -1035,7 +1059,7 @@ func TestDecodeDnsAnswer_InvalidPtr_Loop1(t *testing.T) { 109, 99, 104, 100, 2, 109, 101, 0, 0, 1, 0, 1, 192, 31, 0, 1, 0, 1, 0, 0, 14, 16, 0, 4, 83, 112, 146, 176} - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) _, _, err := DecodeAnswer(1, offsetRR, payload) if !errors.Is(err, ErrDecodeDNSLabelInvalidPointer) { t.Errorf("bad error returned: %v", err) @@ -1049,7 +1073,7 @@ func TestDecodeDnsAnswer_InvalidPtr_Loop2(t *testing.T) { 14, 16, 0, 4, 83, 112, 146, 176, 192, 31, 0, 1, 0, 1, 0, 0, 14, 16, 0, 4, 83, 112, 146, 176} - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) _, _, err := DecodeAnswer(1, offsetRR, payload) if !errors.Is(err, ErrDecodeDNSLabelInvalidPointer) { t.Errorf("bad error returned: %v", err) @@ -1356,7 +1380,7 @@ func TestDecodePayload_QueryHappy(t *testing.T) { t.Errorf("unexpected error when decoding header: %v", err) } - if err = DecodePayload(&dm, &header, pkgconfig.GetFakeConfig()); err != nil { + if err = DecodePayload(&dm, &header, pkgconfig.GetDefaultConfig()); err != nil { t.Errorf("Unexpected error while decoding payload: %v", err) } if dm.DNS.MalformedPacket != false { @@ -1422,7 +1446,7 @@ func TestDecodePayload_QueryInvalid(t *testing.T) { t.Errorf("unexpected error when decoding header: %v", err) } - if err = DecodePayload(&dm, &header, pkgconfig.GetFakeConfig()); err == nil { + if err = DecodePayload(&dm, &header, pkgconfig.GetDefaultConfig()); err == nil { t.Errorf("Expected error when parsing payload") } if dm.DNS.MalformedPacket != true { @@ -1491,7 +1515,7 @@ func TestDecodePayload_AnswerHappy(t *testing.T) { t.Errorf("unexpected error when decoding header: %v", err) } - if err = DecodePayload(&dm, &header, pkgconfig.GetFakeConfig()); err != nil { + if err = DecodePayload(&dm, &header, pkgconfig.GetDefaultConfig()); err != nil { t.Errorf("Unexpected error while decoding payload: %v", err) } if dm.DNS.MalformedPacket != false { @@ -1524,7 +1548,7 @@ func TestDecodePayload_AnswerHappy(t *testing.T) { expected := DNSAnswer{ Name: dm.DNS.Qname, Rdatatype: RdatatypeToString(0x0001), - Class: 0x0001, + Class: "IN", // 0x0001, TTL: 300, Rdata: fmt.Sprintf("10.10.1.%d", i+1), } @@ -1612,7 +1636,7 @@ func TestDecodePayload_AnswerMultipleQueries(t *testing.T) { t.Errorf("unexpected error when decoding header: %v", err) } - if err = DecodePayload(&dm, &header, pkgconfig.GetFakeConfig()); err != nil { + if err = DecodePayload(&dm, &header, pkgconfig.GetDefaultConfig()); err != nil { t.Errorf("Unexpected error while decoding payload: %v", err) } if dm.DNS.MalformedPacket != false { @@ -1645,7 +1669,7 @@ func TestDecodePayload_AnswerMultipleQueries(t *testing.T) { expected := DNSAnswer{ Name: "s" + dm.DNS.Qname, // answers have qname from 1st query data, 2nd data is missing 's' Rdatatype: RdatatypeToString(0x0001), - Class: 0x0001, + Class: "IN", // 0x0001, TTL: 300, Rdata: fmt.Sprintf("10.10.1.%d", i+1), } @@ -1725,7 +1749,7 @@ func TestDecodePayload_AnswerInvalid(t *testing.T) { t.Errorf("unexpected error when decoding header: %v", err) } - if err = DecodePayload(&dm, &header, pkgconfig.GetFakeConfig()); err == nil { + if err = DecodePayload(&dm, &header, pkgconfig.GetDefaultConfig()); err == nil { t.Error("expected decoding to fail") } // returned error should wrap the original error @@ -1793,7 +1817,7 @@ func TestDecodePayload_AnswerInvalidQuery(t *testing.T) { t.Errorf("unexpected error when decoding header: %v", err) } - if err = DecodePayload(&dm, &header, pkgconfig.GetFakeConfig()); err == nil { + if err = DecodePayload(&dm, &header, pkgconfig.GetDefaultConfig()); err == nil { t.Error("expected decoding to fail") } // returned error should wrap the original error @@ -1867,7 +1891,7 @@ func TestDecodePayload_AnswerInvalidEdns(t *testing.T) { t.Errorf("unexpected error when decoding header: %v", err) } - if err = DecodePayload(&dm, &header, pkgconfig.GetFakeConfig()); err == nil { + if err = DecodePayload(&dm, &header, pkgconfig.GetDefaultConfig()); err == nil { t.Error("expected decoding to fail") } // returned error should wrap the original error @@ -1935,7 +1959,7 @@ func TestDecodePayload_AnswerInvaliAdditional(t *testing.T) { t.Errorf("unexpected error when decoding header: %v", err) } - if err = DecodePayload(&dm, &header, pkgconfig.GetFakeConfig()); err == nil { + if err = DecodePayload(&dm, &header, pkgconfig.GetDefaultConfig()); err == nil { t.Error("expected decoding to fail") } // returned error should wrap the original error @@ -1997,7 +2021,7 @@ func TestDecodePayload_AnswerError(t *testing.T) { t.Errorf("unexpected error when decoding header: %v", err) } - if err = DecodePayload(&dm, &header, pkgconfig.GetFakeConfig()); err != nil { + if err = DecodePayload(&dm, &header, pkgconfig.GetDefaultConfig()); err != nil { t.Errorf("Unexpected error while decoding payload: %v", err) } if dm.DNS.MalformedPacket != false { @@ -2032,7 +2056,7 @@ func TestDecodePayload_AnswerError(t *testing.T) { expected := DNSAnswer{ Name: "google.com", Rdatatype: RdatatypeToString(0x0006), - Class: 0x0001, + Class: "IN", // 0x0001, TTL: 60, Rdata: "ns1.google.com dns-admin.google.com 430000820 900 900 1800 60", } @@ -2101,7 +2125,7 @@ func TestDecodePayload_AnswerError_Invalid(t *testing.T) { t.Errorf("unexpected error when decoding header: %v", err) } - if err = DecodePayload(&dm, &header, pkgconfig.GetFakeConfig()); err == nil { + if err = DecodePayload(&dm, &header, pkgconfig.GetDefaultConfig()); err == nil { t.Error("expected decoding to fail") } // returned error should wrap the original error @@ -2153,7 +2177,7 @@ func TestDecodePayload_AdditionalRRAndEDNS(t *testing.T) { t.Errorf("error when deocoding header: %v", err) } - if err := DecodePayload(&dm, &header, pkgconfig.GetFakeConfig()); err != nil { + if err := DecodePayload(&dm, &header, pkgconfig.GetDefaultConfig()); err != nil { t.Errorf("unexpected error while decoding payload: %v", err) } @@ -2307,7 +2331,7 @@ func TestDecodePayload_Truncated(t *testing.T) { t.Errorf("unexpected error when decoding header: %v", err) } - if err = DecodePayload(&dm, &header, pkgconfig.GetFakeConfig()); err != nil { + if err = DecodePayload(&dm, &header, pkgconfig.GetDefaultConfig()); err != nil { t.Error("expected no error on decode") } diff --git a/dnsutils/edns_parser.go b/dnsutils/edns_parser.go index 945037b5..b7952f05 100644 --- a/dnsutils/edns_parser.go +++ b/dnsutils/edns_parser.go @@ -17,13 +17,7 @@ var ErrDecodeEdnsTooManyOpts = errors.New("edns, packet contained too many OPT R var ( OptCodes = map[int]string{ - 3: "NSID", - 8: "CSUBNET", - 9: "EXPIRE", - 10: "COOKIE", - 11: "KEEPALIVE", - 12: "PADDING", - 15: "ERRORS", + 3: "NSID", 8: "CSUBNET", 9: "EXPIRE", 10: "COOKIE", 11: "KEEPALIVE", 12: "PADDING", 15: "ERRORS", } ErrorCodeToString = map[int]string{ 0: "Other", diff --git a/dnsutils/edns_parser_test.go b/dnsutils/edns_parser_test.go index f5ba0698..a8b57fdc 100644 --- a/dnsutils/edns_parser_test.go +++ b/dnsutils/edns_parser_test.go @@ -26,7 +26,7 @@ func TestDecodeQuery_EDNS(t *testing.T) { payload, _ := dm.Pack() - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) _, _, err := DecodeEDNS(len(dm.Extra), offsetRR, payload) if err != nil { @@ -63,7 +63,7 @@ func TestDecodeReply_EDNS(t *testing.T) { m.SetRcode(dm, 42) // 32(extended rcode) + 10(rcode) payload, _ := m.Pack() - _, _, offsetRR, _ := DecodeQuestion(1, payload) + _, _, _, offsetRR, _ := DecodeQuestion(1, payload) _, offsetRR, _ = DecodeAnswer(len(m.Answer), offsetRR, payload) _, _, err := DecodeEDNS(len(m.Extra), offsetRR, payload) @@ -109,7 +109,7 @@ func TestDecodeQuery_EdnsSubnet(t *testing.T) { 0xc0, 0xa8, 0x01, } - _, _, offset, err := DecodeQuestion(1, payload) + _, _, _, offset, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("unexpected error while decoding question: %v", err) } @@ -179,7 +179,7 @@ func TestDecodeQuery_EdnsSubnetV6(t *testing.T) { 0xfe, 0x80, 0x01, } - _, _, offset, err := DecodeQuestion(1, payload) + _, _, _, offset, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("unexpected error while decoding question: %v", err) } @@ -250,7 +250,7 @@ func TestDecodeQuery_EdnsSubnet_invalidFam(t *testing.T) { 0xfe, 0x80, 0x01, } - _, _, offset, err := DecodeQuestion(1, payload) + _, _, _, offset, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("unexpected error while decoding question: %v", err) } @@ -308,7 +308,7 @@ func TestDecodeQuery_EdnsSubnet_Short(t *testing.T) { // 0xfe, 0x80, 0x01, } - _, _, offset, err := DecodeQuestion(1, payload) + _, _, _, offset, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("unexpected error while decoding question: %v", err) } @@ -366,7 +366,7 @@ func TestDecodeQuery_EdnsSubnet_NoAddr(t *testing.T) { // 0xfe, 0x80, 0x01, } - _, _, offset, err := DecodeQuestion(1, payload) + _, _, _, offset, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("unexpected error while decoding question: %v", err) } @@ -429,7 +429,7 @@ func TestDecodeAnswer_EdnsError(t *testing.T) { 0x00, 0x17, } - _, _, offset, err := DecodeQuestion(1, payload) + _, _, _, offset, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("unexpected error while decoding question: %v", err) } @@ -485,7 +485,7 @@ func TestDecodeAnswer_EdnsErrorText(t *testing.T) { 0x62, 0x30, 0x72, 0x6b, 0x65, 0x6e, } - _, _, offset, err := DecodeQuestion(1, payload) + _, _, _, offset, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("unexpected error while decoding question: %v", err) } @@ -540,7 +540,7 @@ func TestDecodeAnswer_EdnsErrorShort(t *testing.T) { 0x00, } - _, _, offset, err := DecodeQuestion(1, payload) + _, _, _, offset, err := DecodeQuestion(1, payload) if err != nil { t.Errorf("unexpected error while decoding question: %v", err) } diff --git a/dnsutils/message.go b/dnsutils/message.go index 2e13814d..8c1f1f65 100644 --- a/dnsutils/message.go +++ b/dnsutils/message.go @@ -16,12 +16,12 @@ import ( "strings" "time" - "github.com/dmachard/go-dnscollector/netlib" + "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-dnstap-protobuf" + "github.com/dmachard/go-netutils" "github.com/google/gopacket" "github.com/google/gopacket/layers" "github.com/miekg/dns" - "github.com/nqd/flat" "google.golang.org/protobuf/proto" ) @@ -38,10 +38,16 @@ var ( ReducerDirectives = regexp.MustCompile(`^reducer-*`) MachineLearningDirectives = regexp.MustCompile(`^ml-*`) FilteringDirectives = regexp.MustCompile(`^filtering-*`) - // RawTextDirective = regexp.MustCompile(`^ *\{.*\}`) - RawTextDirective = regexp.MustCompile(`^ *\{.*`) + RawTextDirective = regexp.MustCompile(`^ *\{.*`) + ATagsDirectives = regexp.MustCompile(`^atags*`) ) +func GetFakeDNS() ([]byte, error) { + dnsmsg := new(dns.Msg) + dnsmsg.SetQuestion("dns.collector.", dns.TypeA) + return dnsmsg.Pack() +} + func GetIPPort(dm *DNSMessage) (string, int, string, int) { srcIP, srcPort := "0.0.0.0", 53 dstIP, dstPort := "0.0.0.0", 53 @@ -68,178 +74,194 @@ func GetIPPort(dm *DNSMessage) (string, int, string, int) { } type DNSAnswer struct { - Name string `json:"name" msgpack:"name"` - Rdatatype string `json:"rdatatype" msgpack:"rdatatype"` - Class int `json:"-" msgpack:"-"` - TTL int `json:"ttl" msgpack:"ttl"` - Rdata string `json:"rdata" msgpack:"rdata"` + Name string `json:"name"` + Rdatatype string `json:"rdatatype"` + Class string `json:"class"` + TTL int `json:"ttl"` + Rdata string `json:"rdata"` } type DNSFlags struct { - QR bool `json:"qr" msgpack:"qr"` - TC bool `json:"tc" msgpack:"tc"` - AA bool `json:"aa" msgpack:"aa"` - RA bool `json:"ra" msgpack:"ra"` - AD bool `json:"ad" msgpack:"ad"` - RD bool `json:"rd" msgpack:"rd"` - CD bool `json:"cd" msgpack:"cd"` + QR bool `json:"qr"` + TC bool `json:"tc"` + AA bool `json:"aa"` + RA bool `json:"ra"` + AD bool `json:"ad"` + RD bool `json:"rd"` + CD bool `json:"cd"` } type DNSNetInfo struct { - Family string `json:"family" msgpack:"family"` - Protocol string `json:"protocol" msgpack:"protocol"` - QueryIP string `json:"query-ip" msgpack:"query-ip"` - QueryPort string `json:"query-port" msgpack:"query-port"` - ResponseIP string `json:"response-ip" msgpack:"response-ip"` - ResponsePort string `json:"response-port" msgpack:"response-port"` - IPDefragmented bool `json:"ip-defragmented" msgpack:"ip-defragmented"` - TCPReassembled bool `json:"tcp-reassembled" msgpack:"tcp-reassembled"` + Family string `json:"family"` + Protocol string `json:"protocol"` + QueryIP string `json:"query-ip"` + QueryPort string `json:"query-port"` + ResponseIP string `json:"response-ip"` + ResponsePort string `json:"response-port"` + IPDefragmented bool `json:"ip-defragmented"` + TCPReassembled bool `json:"tcp-reassembled"` } type DNSRRs struct { - Answers []DNSAnswer `json:"an" msgpack:"an"` - Nameservers []DNSAnswer `json:"ns" msgpack:"ns"` - Records []DNSAnswer `json:"ar" msgpack:"ar"` + Answers []DNSAnswer `json:"an"` + Nameservers []DNSAnswer `json:"ns"` + Records []DNSAnswer `json:"ar"` } type DNS struct { - Type string `json:"-" msgpack:"-"` - Payload []byte `json:"-" msgpack:"-"` - Length int `json:"length" msgpack:"-"` - ID int `json:"id" msgpack:"id"` - Opcode int `json:"opcode" msgpack:"opcode"` - Rcode string `json:"rcode" msgpack:"rcode"` - Qname string `json:"qname" msgpack:"qname"` - - Qtype string `json:"qtype" msgpack:"qtype"` - Flags DNSFlags `json:"flags" msgpack:"flags"` - DNSRRs DNSRRs `json:"resource-records" msgpack:"resource-records"` - MalformedPacket bool `json:"malformed-packet" msgpack:"malformed-packet"` + Type string `json:"-"` + Payload []byte `json:"-"` + Length int `json:"length"` + ID int `json:"id"` + Opcode int `json:"opcode"` + Rcode string `json:"rcode"` + Qname string `json:"qname"` + Qclass string `json:"qclass"` + + Qtype string `json:"qtype"` + Flags DNSFlags `json:"flags"` + DNSRRs DNSRRs `json:"resource-records"` + MalformedPacket bool `json:"malformed-packet"` } type DNSOption struct { - Code int `json:"code" msgpack:"code"` - Name string `json:"name" msgpack:"name"` - Data string `json:"data" msgpack:"data"` + Code int `json:"code"` + Name string `json:"name"` + Data string `json:"data"` } type DNSExtended struct { - UDPSize int `json:"udp-size" msgpack:"udp-size"` - ExtendedRcode int `json:"rcode" msgpack:"rcode"` - Version int `json:"version" msgpack:"version"` - Do int `json:"dnssec-ok" msgpack:"dnssec-ok"` - Z int `json:"-" msgpack:"-"` - Options []DNSOption `json:"options" msgpack:"options"` + UDPSize int `json:"udp-size"` + ExtendedRcode int `json:"rcode"` + Version int `json:"version"` + Do int `json:"dnssec-ok"` + Z int `json:"-"` + Options []DNSOption `json:"options"` } type DNSTap struct { - Operation string `json:"operation" msgpack:"operation"` - Identity string `json:"identity" msgpack:"identity"` - Version string `json:"version" msgpack:"version"` - TimestampRFC3339 string `json:"timestamp-rfc3339ns" msgpack:"timestamp-rfc3339ns"` - Timestamp int64 `json:"-" msgpack:"-"` - TimeSec int `json:"-" msgpack:"-"` - TimeNsec int `json:"-" msgpack:"-"` - Latency float64 `json:"-" msgpack:"-"` - LatencySec string `json:"latency" msgpack:"latency"` - Payload []byte `json:"-" msgpack:"-"` - Extra string `json:"extra" msgpack:"extra"` - PolicyRule string `json:"policy-rule" msgpack:"policy-rule"` - PolicyType string `json:"policy-type" msgpack:"policy-type"` - PolicyMatch string `json:"policy-match" msgpack:"policy-match"` - PolicyAction string `json:"policy-action" msgpack:"policy-action"` - PolicyValue string `json:"policy-value" msgpack:"policy-value"` + Operation string `json:"operation"` + Identity string `json:"identity"` + Version string `json:"version"` + TimestampRFC3339 string `json:"timestamp-rfc3339ns"` + Timestamp int64 `json:"-"` + TimeSec int `json:"-"` + TimeNsec int `json:"-"` + Latency float64 `json:"-"` + LatencySec string `json:"latency"` + Payload []byte `json:"-"` + Extra string `json:"extra"` + PolicyRule string `json:"policy-rule"` + PolicyType string `json:"policy-type"` + PolicyMatch string `json:"policy-match"` + PolicyAction string `json:"policy-action"` + PolicyValue string `json:"policy-value"` + PeerName string `json:"peer-name"` + QueryZone string `json:"query-zone"` } type PowerDNS struct { - Tags []string `json:"tags" msgpack:"tags"` - OriginalRequestSubnet string `json:"original-request-subnet" msgpack:"original-request-subnet"` - AppliedPolicy string `json:"applied-policy" msgpack:"applied-policy"` - AppliedPolicyHit string `json:"applied-policy-hit" msgpack:"applied-policy-hit"` - AppliedPolicyKind string `json:"applied-policy-kind" msgpack:"applied-policy-kind"` - AppliedPolicyTrigger string `json:"applied-policy-trigger" msgpack:"applied-policy-trigger"` - AppliedPolicyType string `json:"applied-policy-type" msgpack:"applied-policy-type"` - Metadata map[string]string `json:"metadata" msgpack:"metadata"` + Tags []string `json:"tags"` + OriginalRequestSubnet string `json:"original-request-subnet"` + AppliedPolicy string `json:"applied-policy"` + AppliedPolicyHit string `json:"applied-policy-hit"` + AppliedPolicyKind string `json:"applied-policy-kind"` + AppliedPolicyTrigger string `json:"applied-policy-trigger"` + AppliedPolicyType string `json:"applied-policy-type"` + Metadata map[string]string `json:"metadata"` + HTTPVersion string `json:"http-version"` } type TransformDNSGeo struct { - City string `json:"city" msgpack:"city"` - Continent string `json:"continent" msgpack:"continent"` - CountryIsoCode string `json:"country-isocode" msgpack:"country-isocode"` - AutonomousSystemNumber string `json:"as-number" msgpack:"as-number"` - AutonomousSystemOrg string `json:"as-owner" msgpack:"as-owner"` + City string `json:"city"` + Continent string `json:"continent"` + CountryIsoCode string `json:"country-isocode"` + AutonomousSystemNumber string `json:"as-number"` + AutonomousSystemOrg string `json:"as-owner"` } type TransformSuspicious struct { - Score float64 `json:"score" msgpack:"score"` - MalformedPacket bool `json:"malformed-pkt" msgpack:"malformed-pkt"` - LargePacket bool `json:"large-pkt" msgpack:"large-pkt"` - LongDomain bool `json:"long-domain" msgpack:"long-domain"` - SlowDomain bool `json:"slow-domain" msgpack:"slow-domain"` - UnallowedChars bool `json:"unallowed-chars" msgpack:"unallowed-chars"` - UncommonQtypes bool `json:"uncommon-qtypes" msgpack:"uncommon-qtypes"` - ExcessiveNumberLabels bool `json:"excessive-number-labels" msgpack:"excessive-number-labels"` - Domain string `json:"domain,omitempty" msgpack:"-"` + Score float64 `json:"score"` + MalformedPacket bool `json:"malformed-pkt"` + LargePacket bool `json:"large-pkt"` + LongDomain bool `json:"long-domain"` + SlowDomain bool `json:"slow-domain"` + UnallowedChars bool `json:"unallowed-chars"` + UncommonQtypes bool `json:"uncommon-qtypes"` + ExcessiveNumberLabels bool `json:"excessive-number-labels"` + Domain string `json:"domain,omitempty"` } type TransformPublicSuffix struct { - QnamePublicSuffix string `json:"tld" msgpack:"qname-public-suffix"` - QnameEffectiveTLDPlusOne string `json:"etld+1" msgpack:"qname-effective-tld-plus-one"` + QnamePublicSuffix string `json:"tld"` + QnameEffectiveTLDPlusOne string `json:"etld+1"` + ManagedByICANN bool `json:"managed-icann"` } type TransformExtracted struct { - Base64Payload []byte `json:"dns_payload" msgpack:"dns_payload"` + Base64Payload []byte `json:"dns_payload"` } type TransformReducer struct { - Occurrences int `json:"occurrences" msgpack:"occurrences"` - CumulativeLength int `json:"cumulative-length" msgpack:"cumulative-length"` + Occurrences int `json:"occurrences"` + CumulativeLength int `json:"cumulative-length"` } type TransformFiltering struct { - SampleRate int `json:"sample-rate" msgpack:"sample-rate"` + SampleRate int `json:"sample-rate"` } type TransformML struct { - Entropy float64 `json:"entropy" msgpack:"entropy"` // Entropy of query name - Length int `json:"length" msgpack:"length"` // Length of domain - Labels int `json:"labels" msgpack:"labels"` // Number of labels in the query name separated by dots - Digits int `json:"digits" msgpack:"digits"` // Count of numerical characters - Lowers int `json:"lowers" msgpack:"lowers"` // Count of lowercase characters - Uppers int `json:"uppers" msgpack:"uppers"` // Count of uppercase characters - Specials int `json:"specials" msgpack:"specials"` // Number of special characters; special characters such as dash, underscore, equal sign,... - Others int `json:"others" msgpack:"others"` - RatioDigits float64 `json:"ratio-digits" msgpack:"ratio-digits"` - RatioLetters float64 `json:"ratio-letters" msgpack:"ratio-letters"` - RatioSpecials float64 `json:"ratio-specials" msgpack:"ratio-specials"` - RatioOthers float64 `json:"ratio-others" msgpack:"ratio-others"` - ConsecutiveChars int `json:"consecutive-chars" msgpack:"consecutive-chars"` - ConsecutiveVowels int `json:"consecutive-vowels" msgpack:"consecutive-vowels"` - ConsecutiveDigits int `json:"consecutive-digits" msgpack:"consecutive-digits"` - ConsecutiveConsonants int `json:"consecutive-consonants" msgpack:"consecutive-consonants"` - Size int `json:"size" msgpack:"size"` - Occurrences int `json:"occurrences" msgpack:"occurrences"` - UncommonQtypes int `json:"uncommon-qtypes" msgpack:"uncommon-qtypes"` + Entropy float64 `json:"entropy"` // Entropy of query name + Length int `json:"length"` // Length of domain + Labels int `json:"labels"` // Number of labels in the query name separated by dots + Digits int `json:"digits"` // Count of numerical characters + Lowers int `json:"lowers"` // Count of lowercase characters + Uppers int `json:"uppers"` // Count of uppercase characters + Specials int `json:"specials"` // Number of special characters; special characters such as dash, underscore, equal sign,... + Others int `json:"others"` + RatioDigits float64 `json:"ratio-digits"` + RatioLetters float64 `json:"ratio-letters"` + RatioSpecials float64 `json:"ratio-specials"` + RatioOthers float64 `json:"ratio-others"` + ConsecutiveChars int `json:"consecutive-chars"` + ConsecutiveVowels int `json:"consecutive-vowels"` + ConsecutiveDigits int `json:"consecutive-digits"` + ConsecutiveConsonants int `json:"consecutive-consonants"` + Size int `json:"size"` + Occurrences int `json:"occurrences"` + UncommonQtypes int `json:"uncommon-qtypes"` } type TransformATags struct { - Tags []string `json:"tags" msgpack:"tags"` + Tags []string `json:"tags"` +} + +type RelabelingRule struct { + Regex *regexp.Regexp + Replacement string + Action string +} + +type TransformRelabeling struct { + Rules []RelabelingRule } type DNSMessage struct { - NetworkInfo DNSNetInfo `json:"network" msgpack:"network"` - DNS DNS `json:"dns" msgpack:"dns"` - EDNS DNSExtended `json:"edns" msgpack:"edns"` - DNSTap DNSTap `json:"dnstap" msgpack:"dnstap"` - Geo *TransformDNSGeo `json:"geoip,omitempty" msgpack:"geo"` - PowerDNS *PowerDNS `json:"powerdns,omitempty" msgpack:"powerdns"` - Suspicious *TransformSuspicious `json:"suspicious,omitempty" msgpack:"suspicious"` - PublicSuffix *TransformPublicSuffix `json:"publicsuffix,omitempty" msgpack:"publicsuffix"` - Extracted *TransformExtracted `json:"extracted,omitempty" msgpack:"extracted"` - Reducer *TransformReducer `json:"reducer,omitempty" msgpack:"reducer"` - MachineLearning *TransformML `json:"ml,omitempty" msgpack:"ml"` - Filtering *TransformFiltering `json:"filtering,omitempty" msgpack:"filtering"` - ATags *TransformATags `json:"atags,omitempty" msgpack:"atags"` + NetworkInfo DNSNetInfo `json:"network"` + DNS DNS `json:"dns"` + EDNS DNSExtended `json:"edns"` + DNSTap DNSTap `json:"dnstap"` + Geo *TransformDNSGeo `json:"geoip,omitempty"` + PowerDNS *PowerDNS `json:"powerdns,omitempty"` + Suspicious *TransformSuspicious `json:"suspicious,omitempty"` + PublicSuffix *TransformPublicSuffix `json:"publicsuffix,omitempty"` + Extracted *TransformExtracted `json:"extracted,omitempty"` + Reducer *TransformReducer `json:"reducer,omitempty"` + MachineLearning *TransformML `json:"ml,omitempty"` + Filtering *TransformFiltering `json:"filtering,omitempty"` + ATags *TransformATags `json:"atags,omitempty"` + Relabeling *TransformRelabeling `json:"-"` } func (dm *DNSMessage) Init() { @@ -266,6 +288,8 @@ func (dm *DNSMessage) Init() { PolicyMatch: "-", PolicyAction: "-", PolicyValue: "-", + PeerName: "-", + QueryZone: "-", } dm.DNS = DNS{ @@ -275,6 +299,7 @@ func (dm *DNSMessage) Init() { Rcode: "-", Qtype: "-", Qname: "-", + Qclass: "-", DNSRRs: DNSRRs{Answers: []DNSAnswer{}, Nameservers: []DNSAnswer{}, Records: []DNSAnswer{}}, } @@ -298,13 +323,14 @@ func (dm *DNSMessage) InitTransforms() { dm.Suspicious = &TransformSuspicious{} dm.PowerDNS = &PowerDNS{} dm.Geo = &TransformDNSGeo{} + dm.Relabeling = &TransformRelabeling{} } -func (dm *DNSMessage) handleGeoIPDirectives(directives []string, s *strings.Builder) error { +func (dm *DNSMessage) handleGeoIPDirectives(directive string, s *strings.Builder) error { if dm.Geo == nil { s.WriteString("-") } else { - switch directive := directives[0]; { + switch { case directive == "geoip-continent": s.WriteString(dm.Geo.Continent) case directive == "geoip-country": @@ -322,10 +348,17 @@ func (dm *DNSMessage) handleGeoIPDirectives(directives []string, s *strings.Buil return nil } -func (dm *DNSMessage) handlePdnsDirectives(directives []string, s *strings.Builder) error { +func (dm *DNSMessage) handlePdnsDirectives(directive string, s *strings.Builder) error { if dm.PowerDNS == nil { s.WriteString("-") } else { + var directives []string + if i := strings.IndexByte(directive, ':'); i == -1 { + directives = append(directives, directive) + } else { + directives = []string{directive[:i], directive[i+1:]} + } + switch directive := directives[0]; { case directive == "powerdns-tags": if dm.PowerDNS.Tags == nil { @@ -409,6 +442,12 @@ func (dm *DNSMessage) handlePdnsDirectives(directives []string, s *strings.Build s.WriteString("-") } } + case directive == "powerdns-http-version": + if len(dm.PowerDNS.HTTPVersion) > 0 { + s.WriteString(dm.PowerDNS.HTTPVersion) + } else { + s.WriteString("-") + } default: return errors.New(ErrorUnexpectedDirective + directive) } @@ -416,11 +455,54 @@ func (dm *DNSMessage) handlePdnsDirectives(directives []string, s *strings.Build return nil } -func (dm *DNSMessage) handleSuspiciousDirectives(directives []string, s *strings.Builder) error { - if dm.Suspicious == nil { +func (dm *DNSMessage) handleATagsDirectives(directive string, s *strings.Builder) error { + if dm.ATags == nil { s.WriteString("-") } else { + var directives []string + if i := strings.IndexByte(directive, ':'); i == -1 { + directives = append(directives, directive) + } else { + directives = []string{directive[:i], directive[i+1:]} + } + switch directive := directives[0]; { + case directive == "atags": + if len(dm.ATags.Tags) > 0 { + if len(directives) == 2 { + tagIndex, err := strconv.Atoi(directives[1]) + if err != nil { + log.Fatalf("unsupport tag index provided (integer expected): %s", directives[1]) + } + if tagIndex >= len(dm.ATags.Tags) { + s.WriteString("-") + } else { + s.WriteString(dm.ATags.Tags[tagIndex]) + } + } else { + for i, tag := range dm.ATags.Tags { + s.WriteString(tag) + // add separator + if i+1 < len(dm.ATags.Tags) { + s.WriteString(",") + } + } + } + } else { + s.WriteString("-") + } + default: + return errors.New(ErrorUnexpectedDirective + directive) + } + } + return nil +} + +func (dm *DNSMessage) handleSuspiciousDirectives(directive string, s *strings.Builder) error { + if dm.Suspicious == nil { + s.WriteString("-") + } else { + switch { case directive == "suspicious-score": s.WriteString(strconv.Itoa(int(dm.Suspicious.Score))) default: @@ -430,15 +512,21 @@ func (dm *DNSMessage) handleSuspiciousDirectives(directives []string, s *strings return nil } -func (dm *DNSMessage) handlePublicSuffixDirectives(directives []string, s *strings.Builder) error { +func (dm *DNSMessage) handlePublicSuffixDirectives(directive string, s *strings.Builder) error { if dm.PublicSuffix == nil { s.WriteString("-") } else { - switch directive := directives[0]; { + switch { case directive == "publixsuffix-tld": s.WriteString(dm.PublicSuffix.QnamePublicSuffix) case directive == "publixsuffix-etld+1": s.WriteString(dm.PublicSuffix.QnameEffectiveTLDPlusOne) + case directive == "publixsuffix-managed-icann": + if dm.PublicSuffix.ManagedByICANN { + s.WriteString("managed") + } else { + s.WriteString("private") + } default: return errors.New(ErrorUnexpectedDirective + directive) } @@ -446,12 +534,12 @@ func (dm *DNSMessage) handlePublicSuffixDirectives(directives []string, s *strin return nil } -func (dm *DNSMessage) handleExtractedDirectives(directives []string, s *strings.Builder) error { +func (dm *DNSMessage) handleExtractedDirectives(directive string, s *strings.Builder) error { if dm.Extracted == nil { s.WriteString("-") return nil } - switch directive := directives[0]; { + switch { case directive == "extracted-dns-payload": if len(dm.DNS.Payload) > 0 { dst := make([]byte, base64.StdEncoding.EncodedLen(len(dm.DNS.Payload))) @@ -466,11 +554,11 @@ func (dm *DNSMessage) handleExtractedDirectives(directives []string, s *strings. return nil } -func (dm *DNSMessage) handleFilteringDirectives(directives []string, s *strings.Builder) error { +func (dm *DNSMessage) handleFilteringDirectives(directive string, s *strings.Builder) error { if dm.Filtering == nil { s.WriteString("-") } else { - switch directive := directives[0]; { + switch { case directive == "filtering-sample-rate": s.WriteString(strconv.Itoa(dm.Filtering.SampleRate)) default: @@ -480,11 +568,11 @@ func (dm *DNSMessage) handleFilteringDirectives(directives []string, s *strings. return nil } -func (dm *DNSMessage) handleReducerDirectives(directives []string, s *strings.Builder) error { +func (dm *DNSMessage) handleReducerDirectives(directive string, s *strings.Builder) error { if dm.Reducer == nil { s.WriteString("-") } else { - switch directive := directives[0]; { + switch { case directive == "reducer-occurrences": s.WriteString(strconv.Itoa(dm.Reducer.Occurrences)) case directive == "reducer-cumulative-length": @@ -496,11 +584,11 @@ func (dm *DNSMessage) handleReducerDirectives(directives []string, s *strings.Bu return nil } -func (dm *DNSMessage) handleMachineLearningDirectives(directives []string, s *strings.Builder) error { +func (dm *DNSMessage) handleMachineLearningDirectives(directive string, s *strings.Builder) error { if dm.MachineLearning == nil { s.WriteString("-") } else { - switch directive := directives[0]; { + switch { case directive == "ml-entropy": s.WriteString(strconv.FormatFloat(dm.MachineLearning.Entropy, 'f', -1, 64)) case directive == "ml-length": @@ -561,41 +649,12 @@ func (dm *DNSMessage) String(format []string, fieldDelimiter string, fieldBounda func (dm *DNSMessage) ToTextLine(format []string, fieldDelimiter string, fieldBoundary string) ([]byte, error) { var s strings.Builder - for i, word := range format { - directives := strings.SplitN(word, ":", 2) - if RawTextDirective.MatchString(word) { - directives[0]=word - } - // fmt.Printf("ToTextLine: directive >%v<\n",directives[0]) - switch directive := directives[0]; { - // default directives - case directive == "ttl": - if len(dm.DNS.DNSRRs.Answers) > 0 { - s.WriteString(strconv.Itoa(dm.DNS.DNSRRs.Answers[0].TTL)) - } else { - s.WriteByte('-') - } - case directive == "answer": - if len(dm.DNS.DNSRRs.Answers) > 0 { - s.WriteString(dm.DNS.DNSRRs.Answers[0].Rdata) - } else { - s.WriteByte('-') - } - case directive == "edns-csubnet": - if len(dm.EDNS.Options) > 0 { - for _, opt := range dm.EDNS.Options { - if opt.Name == "CSUBNET" { - s.WriteString(opt.Data) - break - } - } - } else { - s.WriteByte('-') - } - case directive == "answercount": - s.WriteString(strconv.Itoa(len(dm.DNS.DNSRRs.Answers))) - case directive == "id": - s.WriteString(strconv.Itoa(dm.DNS.ID)) + answers := dm.DNS.DNSRRs.Answers + qname := dm.DNS.Qname + flags := dm.DNS.Flags + + for i, directive := range format { + switch { case directive == "timestamp-rfc3339ns", directive == "timestamp": s.WriteString(dm.DNSTap.TimestampRFC3339) case directive == "timestamp-unixms": @@ -607,8 +666,28 @@ func (dm *DNSMessage) ToTextLine(format []string, fieldDelimiter string, fieldBo case directive == "localtime": ts := time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)) s.WriteString(ts.Format("2006-01-02 15:04:05.999999999")) + case directive == "qname": + if len(qname) == 0 { + s.WriteString(".") + } else { + if len(fieldDelimiter) > 0 { + if strings.Contains(qname, fieldDelimiter) { + qnameEscaped := qname + if strings.Contains(qname, fieldBoundary) { + qnameEscaped = strings.ReplaceAll(qnameEscaped, fieldBoundary, "\\"+fieldBoundary) + } + s.WriteString(fmt.Sprintf(fieldBoundary+"%s"+fieldBoundary, qnameEscaped)) + } else { + s.WriteString(qname) + } + } else { + s.WriteString(dm.DNS.Qname) + } + } case directive == "identity": s.WriteString(dm.DNSTap.Identity) + case directive == "peer-name": + s.WriteString(dm.DNSTap.PeerName) case directive == "version": s.WriteString(dm.DNSTap.Version) case directive == "extra": @@ -623,10 +702,14 @@ func (dm *DNSMessage) ToTextLine(format []string, fieldDelimiter string, fieldBo s.WriteString(dm.DNSTap.PolicyMatch) case directive == "policy-value": s.WriteString(dm.DNSTap.PolicyValue) + case directive == "query-zone": + s.WriteString(dm.DNSTap.QueryZone) case directive == "operation": s.WriteString(dm.DNSTap.Operation) case directive == "rcode": s.WriteString(dm.DNS.Rcode) + case directive == "id": + s.WriteString(strconv.Itoa(dm.DNS.ID)) case directive == "queryip": s.WriteString(dm.NetworkInfo.QueryIP) case directive == "queryport": @@ -643,26 +726,10 @@ func (dm *DNSMessage) ToTextLine(format []string, fieldDelimiter string, fieldBo s.WriteString(strconv.Itoa(dm.DNS.Length) + "b") case directive == "length": s.WriteString(strconv.Itoa(dm.DNS.Length)) - case directive == "qname": - if len(dm.DNS.Qname) == 0 { - s.WriteString(".") - } else { - if len(fieldDelimiter) > 0 { - if strings.Contains(dm.DNS.Qname, fieldDelimiter) { - qname := dm.DNS.Qname - if strings.Contains(qname, fieldBoundary) { - qname = strings.ReplaceAll(qname, fieldBoundary, "\\"+fieldBoundary) - } - s.WriteString(fmt.Sprintf(fieldBoundary+"%s"+fieldBoundary, qname)) - } else { - s.WriteString(dm.DNS.Qname) - } - } else { - s.WriteString(dm.DNS.Qname) - } - } case directive == "qtype": s.WriteString(dm.DNS.Qtype) + case directive == "qclass": + s.WriteString(dm.DNS.Qclass) case directive == "latency": s.WriteString(dm.DNSTap.LatencySec) case directive == "malformed": @@ -688,80 +755,112 @@ func (dm *DNSMessage) ToTextLine(format []string, fieldDelimiter string, fieldBo s.WriteByte('-') } case directive == "tc": - if dm.DNS.Flags.TC { + if flags.TC { s.WriteString("TC") } else { s.WriteByte('-') } case directive == "aa": - if dm.DNS.Flags.AA { + if flags.AA { s.WriteString("AA") } else { s.WriteByte('-') } case directive == "ra": - if dm.DNS.Flags.RA { + if flags.RA { s.WriteString("RA") } else { s.WriteByte('-') } case directive == "ad": - if dm.DNS.Flags.AD { + if flags.AD { s.WriteString("AD") } else { s.WriteByte('-') } + case directive == "ttl": + if len(answers) > 0 { + s.WriteString(strconv.Itoa(answers[0].TTL)) + } else { + s.WriteByte('-') + } + case directive == "answer": + if len(answers) > 0 { + s.WriteString(answers[0].Rdata) + } else { + s.WriteByte('-') + } + case directive == "answercount": + s.WriteString(strconv.Itoa(len(answers))) + + case directive == "edns-csubnet": + if len(dm.EDNS.Options) > 0 { + for _, opt := range dm.EDNS.Options { + if opt.Name == "CSUBNET" { + s.WriteString(opt.Data) + break + } + } + } else { + s.WriteByte('-') + } + // more directives from collectors case PdnsDirectives.MatchString(directive): - err := dm.handlePdnsDirectives(directives, &s) + err := dm.handlePdnsDirectives(directive, &s) if err != nil { return nil, err } // more directives from transformers case ReducerDirectives.MatchString(directive): - err := dm.handleReducerDirectives(directives, &s) + err := dm.handleReducerDirectives(directive, &s) if err != nil { return nil, err } case GeoIPDirectives.MatchString(directive): - err := dm.handleGeoIPDirectives(directives, &s) + err := dm.handleGeoIPDirectives(directive, &s) if err != nil { return nil, err } case SuspiciousDirectives.MatchString(directive): - err := dm.handleSuspiciousDirectives(directives, &s) + err := dm.handleSuspiciousDirectives(directive, &s) if err != nil { return nil, err } case PublicSuffixDirectives.MatchString(directive): - err := dm.handlePublicSuffixDirectives(directives, &s) + err := dm.handlePublicSuffixDirectives(directive, &s) if err != nil { return nil, err } case ExtractedDirectives.MatchString(directive): - err := dm.handleExtractedDirectives(directives, &s) + err := dm.handleExtractedDirectives(directive, &s) if err != nil { return nil, err } case MachineLearningDirectives.MatchString(directive): - err := dm.handleMachineLearningDirectives(directives, &s) + err := dm.handleMachineLearningDirectives(directive, &s) if err != nil { return nil, err } case FilteringDirectives.MatchString(directive): - err := dm.handleFilteringDirectives(directives, &s) + err := dm.handleFilteringDirectives(directive, &s) + if err != nil { + return nil, err + } + case ATagsDirectives.MatchString(directive): + err := dm.handleATagsDirectives(directive, &s) if err != nil { return nil, err } case RawTextDirective.MatchString(directive): - // fmt.Printf("directive (%v) is RawTextDirective\n", directive) - // error unsupport directive for text format directive = strings.Replace(directive, "{", "", -1) directive = strings.Replace(directive, "}", "", -1) s.WriteString(directive) + + // handle invalid directive default: - return nil, errors.New(ErrorUnexpectedDirective + word) + return nil, errors.New(ErrorUnexpectedDirective + directive) } if i < len(format)-1 { @@ -770,7 +869,6 @@ func (dm *DNSMessage) ToTextLine(format []string, fieldDelimiter string, fieldBo } } } - return []byte(s.String()), nil } @@ -804,7 +902,7 @@ func (dm *DNSMessage) ToDNSTap(extended bool) ([]byte, error) { mt := dnstap.Message_Type(dnstap.Message_Type_value[dm.DNSTap.Operation]) var sf dnstap.SocketFamily - if ipNet, valid := netlib.IPToInet[dm.NetworkInfo.Family]; valid { + if ipNet, valid := netutils.IPToInet[dm.NetworkInfo.Family]; valid { sf = dnstap.SocketFamily(dnstap.SocketFamily_value[ipNet]) } sp := dnstap.SocketProtocol(dnstap.SocketProtocol_value[dm.NetworkInfo.Protocol]) @@ -816,7 +914,7 @@ func (dm *DNSMessage) ToDNSTap(extended bool) ([]byte, error) { if dm.NetworkInfo.ResponsePort != "-" { if port, err := strconv.Atoi(dm.NetworkInfo.ResponsePort); err != nil { return nil, err - } else if port < 0 || port > math.MaxUint32 { + } else if port < 0 || port > 65535 { return nil, errors.New("invalid response port value") } else { rport = uint32(port) @@ -826,7 +924,7 @@ func (dm *DNSMessage) ToDNSTap(extended bool) ([]byte, error) { if dm.NetworkInfo.QueryPort != "-" { if port, err := strconv.Atoi(dm.NetworkInfo.QueryPort); err != nil { return nil, err - } else if port < 0 || port > math.MaxUint32 { + } else if port < 0 || port > 65535 { return nil, errors.New("invalid query port value") } else { qport = uint32(port) @@ -839,7 +937,7 @@ func (dm *DNSMessage) ToDNSTap(extended bool) ([]byte, error) { msg.SocketProtocol = &sp reqIP := net.ParseIP(dm.NetworkInfo.QueryIP) - if dm.NetworkInfo.Family == netlib.ProtoIPv4 { + if dm.NetworkInfo.Family == netutils.ProtoIPv4 { msg.QueryAddress = reqIP.To4() } else { msg.QueryAddress = reqIP.To16() @@ -847,7 +945,7 @@ func (dm *DNSMessage) ToDNSTap(extended bool) ([]byte, error) { msg.QueryPort = &qport rspIP := net.ParseIP(dm.NetworkInfo.ResponseIP) - if dm.NetworkInfo.Family == netlib.ProtoIPv4 { + if dm.NetworkInfo.Family == netutils.ProtoIPv4 { msg.ResponseAddress = rspIP.To4() } else { msg.ResponseAddress = rspIP.To16() @@ -955,11 +1053,11 @@ func (dm *DNSMessage) ToPacketLayer() ([]gopacket.SerializableLayer, error) { // set source and destination IP switch dm.NetworkInfo.Family { - case netlib.ProtoIPv4: + case netutils.ProtoIPv4: eth.EthernetType = layers.EthernetTypeIPv4 ip4.SrcIP = net.ParseIP(srcIP) ip4.DstIP = net.ParseIP(dstIP) - case netlib.ProtoIPv6: + case netutils.ProtoIPv6: eth.EthernetType = layers.EthernetTypeIPv6 ip6.SrcIP = net.ParseIP(srcIP) ip6.DstIP = net.ParseIP(dstIP) @@ -971,24 +1069,24 @@ func (dm *DNSMessage) ToPacketLayer() ([]gopacket.SerializableLayer, error) { switch dm.NetworkInfo.Protocol { // DNS over UDP - case netlib.ProtoUDP: + case netutils.ProtoUDP: udp.SrcPort = layers.UDPPort(srcPort) udp.DstPort = layers.UDPPort(dstPort) // update iplayer switch dm.NetworkInfo.Family { - case netlib.ProtoIPv4: + case netutils.ProtoIPv4: ip4.Protocol = layers.IPProtocolUDP udp.SetNetworkLayerForChecksum(ip4) pkt = append(pkt, gopacket.Payload(dm.DNS.Payload), udp, ip4) - case netlib.ProtoIPv6: + case netutils.ProtoIPv6: ip6.NextHeader = layers.IPProtocolUDP udp.SetNetworkLayerForChecksum(ip6) pkt = append(pkt, gopacket.Payload(dm.DNS.Payload), udp, ip6) } // DNS over TCP - case netlib.ProtoTCP: + case netutils.ProtoTCP: tcp.SrcPort = layers.TCPPort(srcPort) tcp.DstPort = layers.TCPPort(dstPort) tcp.PSH = true @@ -1000,11 +1098,11 @@ func (dm *DNSMessage) ToPacketLayer() ([]gopacket.SerializableLayer, error) { // update iplayer switch dm.NetworkInfo.Family { - case netlib.ProtoIPv4: + case netutils.ProtoIPv4: ip4.Protocol = layers.IPProtocolTCP tcp.SetNetworkLayerForChecksum(ip4) pkt = append(pkt, gopacket.Payload(append(dnsLengthField, dm.DNS.Payload...)), tcp, ip4) - case netlib.ProtoIPv6: + case netutils.ProtoIPv6: ip6.NextHeader = layers.IPProtocolTCP tcp.SetNetworkLayerForChecksum(ip6) pkt = append(pkt, gopacket.Payload(append(dnsLengthField, dm.DNS.Payload...)), tcp, ip6) @@ -1018,11 +1116,11 @@ func (dm *DNSMessage) ToPacketLayer() ([]gopacket.SerializableLayer, error) { // update iplayer switch dm.NetworkInfo.Family { - case netlib.ProtoIPv4: + case netutils.ProtoIPv4: ip4.Protocol = layers.IPProtocolUDP udp.SetNetworkLayerForChecksum(ip4) pkt = append(pkt, gopacket.Payload(dm.DNS.Payload), udp, ip4) - case netlib.ProtoIPv6: + case netutils.ProtoIPv6: ip6.NextHeader = layers.IPProtocolUDP udp.SetNetworkLayerForChecksum(ip6) pkt = append(pkt, gopacket.Payload(dm.DNS.Payload), udp, ip6) @@ -1037,15 +1135,235 @@ func (dm *DNSMessage) ToPacketLayer() ([]gopacket.SerializableLayer, error) { return pkt, nil } -func (dm *DNSMessage) Flatten() (ret map[string]interface{}, err error) { - // TODO perhaps panic when flattening fails, as it should always work. - var tmp []byte - if tmp, err = json.Marshal(dm); err != nil { - return +func (dm *DNSMessage) Flatten() (map[string]interface{}, error) { + dnsFields := map[string]interface{}{ + "dns.flags.aa": dm.DNS.Flags.AA, + "dns.flags.ad": dm.DNS.Flags.AD, + "dns.flags.qr": dm.DNS.Flags.QR, + "dns.flags.ra": dm.DNS.Flags.RA, + "dns.flags.tc": dm.DNS.Flags.TC, + "dns.flags.rd": dm.DNS.Flags.RD, + "dns.flags.cd": dm.DNS.Flags.CD, + "dns.length": dm.DNS.Length, + "dns.malformed-packet": dm.DNS.MalformedPacket, + "dns.id": dm.DNS.ID, + "dns.opcode": dm.DNS.Opcode, + "dns.qname": dm.DNS.Qname, + "dns.qtype": dm.DNS.Qtype, + "dns.qclass": dm.DNS.Qclass, + "dns.rcode": dm.DNS.Rcode, + "dnstap.identity": dm.DNSTap.Identity, + "dnstap.latency": dm.DNSTap.LatencySec, + "dnstap.operation": dm.DNSTap.Operation, + "dnstap.timestamp-rfc3339ns": dm.DNSTap.TimestampRFC3339, + "dnstap.version": dm.DNSTap.Version, + "dnstap.extra": dm.DNSTap.Extra, + "dnstap.policy-rule": dm.DNSTap.PolicyRule, + "dnstap.policy-type": dm.DNSTap.PolicyType, + "dnstap.policy-action": dm.DNSTap.PolicyAction, + "dnstap.policy-match": dm.DNSTap.PolicyMatch, + "dnstap.policy-value": dm.DNSTap.PolicyValue, + "dnstap.peer-name": dm.DNSTap.PeerName, + "dnstap.query-zone": dm.DNSTap.QueryZone, + "edns.dnssec-ok": dm.EDNS.Do, + "edns.rcode": dm.EDNS.ExtendedRcode, + "edns.udp-size": dm.EDNS.UDPSize, + "edns.version": dm.EDNS.Version, + "network.family": dm.NetworkInfo.Family, + "network.ip-defragmented": dm.NetworkInfo.IPDefragmented, + "network.protocol": dm.NetworkInfo.Protocol, + "network.query-ip": dm.NetworkInfo.QueryIP, + "network.query-port": dm.NetworkInfo.QueryPort, + "network.response-ip": dm.NetworkInfo.ResponseIP, + "network.response-port": dm.NetworkInfo.ResponsePort, + "network.tcp-reassembled": dm.NetworkInfo.TCPReassembled, } - json.Unmarshal(tmp, &ret) - ret, err = flat.Flatten(ret, nil) - return + + // Add empty slices + if len(dm.DNS.DNSRRs.Answers) == 0 { + dnsFields["dns.resource-records.an"] = "-" + } + if len(dm.DNS.DNSRRs.Records) == 0 { + dnsFields["dns.resource-records.ar"] = "-" + } + if len(dm.DNS.DNSRRs.Nameservers) == 0 { + dnsFields["dns.resource-records.ns"] = "-" + } + if len(dm.EDNS.Options) == 0 { + dnsFields["edns.options"] = "-" + } + + // Add DNSAnswer fields: "dns.resource-records.an.0.name": "google.nl" + // nolint: goconst + for i, an := range dm.DNS.DNSRRs.Answers { + prefixAn := "dns.resource-records.an." + strconv.Itoa(i) + dnsFields[prefixAn+".name"] = an.Name + dnsFields[prefixAn+".rdata"] = an.Rdata + dnsFields[prefixAn+".rdatatype"] = an.Rdatatype + dnsFields[prefixAn+".ttl"] = an.TTL + dnsFields[prefixAn+".class"] = an.Class + } + for i, ns := range dm.DNS.DNSRRs.Nameservers { + prefixNs := "dns.resource-records.ns." + strconv.Itoa(i) + dnsFields[prefixNs+".name"] = ns.Name + dnsFields[prefixNs+".rdata"] = ns.Rdata + dnsFields[prefixNs+".rdatatype"] = ns.Rdatatype + dnsFields[prefixNs+".ttl"] = ns.TTL + dnsFields[prefixNs+".class"] = ns.Class + } + for i, ar := range dm.DNS.DNSRRs.Records { + prefixAr := "dns.resource-records.ar." + strconv.Itoa(i) + dnsFields[prefixAr+".name"] = ar.Name + dnsFields[prefixAr+".rdata"] = ar.Rdata + dnsFields[prefixAr+".rdatatype"] = ar.Rdatatype + dnsFields[prefixAr+".ttl"] = ar.TTL + dnsFields[prefixAr+".class"] = ar.Class + } + + // Add EDNSoptions fields: "edns.options.0.code": 10, + for i, opt := range dm.EDNS.Options { + prefixOpt := "edns.options." + strconv.Itoa(i) + dnsFields[prefixOpt+".code"] = opt.Code + dnsFields[prefixOpt+".data"] = opt.Data + dnsFields[prefixOpt+".name"] = opt.Name + } + + // Add TransformDNSGeo fields + if dm.Geo != nil { + dnsFields["geoip.city"] = dm.Geo.City + dnsFields["geoip.continent"] = dm.Geo.Continent + dnsFields["geoip.country-isocode"] = dm.Geo.CountryIsoCode + dnsFields["geoip.as-number"] = dm.Geo.AutonomousSystemNumber + dnsFields["geoip.as-owner"] = dm.Geo.AutonomousSystemOrg + } + + // Add TransformSuspicious fields + if dm.Suspicious != nil { + dnsFields["suspicious.score"] = dm.Suspicious.Score + dnsFields["suspicious.malformed-pkt"] = dm.Suspicious.MalformedPacket + dnsFields["suspicious.large-pkt"] = dm.Suspicious.LargePacket + dnsFields["suspicious.long-domain"] = dm.Suspicious.LongDomain + dnsFields["suspicious.slow-domain"] = dm.Suspicious.SlowDomain + dnsFields["suspicious.unallowed-chars"] = dm.Suspicious.UnallowedChars + dnsFields["suspicious.uncommon-qtypes"] = dm.Suspicious.UncommonQtypes + dnsFields["suspicious.excessive-number-labels"] = dm.Suspicious.ExcessiveNumberLabels + dnsFields["suspicious.domain"] = dm.Suspicious.Domain + } + + // Add TransformPublicSuffix fields + if dm.PublicSuffix != nil { + dnsFields["publicsuffix.tld"] = dm.PublicSuffix.QnamePublicSuffix + dnsFields["publicsuffix.etld+1"] = dm.PublicSuffix.QnameEffectiveTLDPlusOne + dnsFields["publicsuffix.managed-icann"] = dm.PublicSuffix.ManagedByICANN + } + + // Add TransformExtracted fields + if dm.Extracted != nil { + dnsFields["extracted.dns_payload"] = dm.Extracted.Base64Payload + } + + // Add TransformReducer fields + if dm.Reducer != nil { + dnsFields["reducer.occurrences"] = dm.Reducer.Occurrences + dnsFields["reducer.cumulative-length"] = dm.Reducer.CumulativeLength + } + + // Add TransformFiltering fields + if dm.Filtering != nil { + dnsFields["filtering.sample-rate"] = dm.Filtering.SampleRate + } + + // Add TransformML fields + if dm.MachineLearning != nil { + dnsFields["ml.entropy"] = dm.MachineLearning.Entropy + dnsFields["ml.length"] = dm.MachineLearning.Length + dnsFields["ml.labels"] = dm.MachineLearning.Labels + dnsFields["ml.digits"] = dm.MachineLearning.Digits + dnsFields["ml.lowers"] = dm.MachineLearning.Lowers + dnsFields["ml.uppers"] = dm.MachineLearning.Uppers + dnsFields["ml.specials"] = dm.MachineLearning.Specials + dnsFields["ml.others"] = dm.MachineLearning.Others + dnsFields["ml.ratio-digits"] = dm.MachineLearning.RatioDigits + dnsFields["ml.ratio-letters"] = dm.MachineLearning.RatioLetters + dnsFields["ml.ratio-specials"] = dm.MachineLearning.RatioSpecials + dnsFields["ml.ratio-others"] = dm.MachineLearning.RatioOthers + dnsFields["ml.consecutive-chars"] = dm.MachineLearning.ConsecutiveChars + dnsFields["ml.consecutive-vowels"] = dm.MachineLearning.ConsecutiveVowels + dnsFields["ml.consecutive-digits"] = dm.MachineLearning.ConsecutiveDigits + dnsFields["ml.consecutive-consonants"] = dm.MachineLearning.ConsecutiveConsonants + dnsFields["ml.size"] = dm.MachineLearning.Size + dnsFields["ml.occurrences"] = dm.MachineLearning.Occurrences + dnsFields["ml.uncommon-qtypes"] = dm.MachineLearning.UncommonQtypes + } + + // Add TransformATags fields + if dm.ATags != nil { + if len(dm.ATags.Tags) == 0 { + dnsFields["atags.tags"] = "-" + } + for i, tag := range dm.ATags.Tags { + dnsFields["atags.tags."+strconv.Itoa(i)] = tag + } + } + + // Add PowerDNS collectors fields + if dm.PowerDNS != nil { + if len(dm.PowerDNS.Tags) == 0 { + dnsFields["powerdns.tags"] = "-" + } + for i, tag := range dm.PowerDNS.Tags { + dnsFields["powerdns.tags."+strconv.Itoa(i)] = tag + } + dnsFields["powerdns.original-request-subnet"] = dm.PowerDNS.OriginalRequestSubnet + dnsFields["powerdns.applied-policy"] = dm.PowerDNS.AppliedPolicy + dnsFields["powerdns.applied-policy-hit"] = dm.PowerDNS.AppliedPolicyHit + dnsFields["powerdns.applied-policy-kind"] = dm.PowerDNS.AppliedPolicyKind + dnsFields["powerdns.applied-policy-trigger"] = dm.PowerDNS.AppliedPolicyTrigger + dnsFields["powerdns.applied-policy-type"] = dm.PowerDNS.AppliedPolicyType + for mk, mv := range dm.PowerDNS.Metadata { + dnsFields["powerdns.metadata."+mk] = mv + } + dnsFields["powerdns.http-version"] = dm.PowerDNS.HTTPVersion + } + + // relabeling ? + if dm.Relabeling != nil { + err := dm.ApplyRelabeling(dnsFields) + if err != nil { + return nil, err + } + } + + return dnsFields, nil +} + +func (dm *DNSMessage) ApplyRelabeling(dnsFields map[string]interface{}) error { + + for _, label := range dm.Relabeling.Rules { + regex := label.Regex + for key := range dnsFields { + if regex.MatchString(key) { + if label.Action == "rename" { + replacement := label.Replacement + if value, exists := dnsFields[replacement]; exists { + switch v := value.(type) { + case []string: + dnsFields[replacement] = append(v, convertToString(dnsFields[key])) + default: + dnsFields[replacement] = []string{convertToString(v), convertToString(dnsFields[key])} + } + } else { + dnsFields[replacement] = convertToString(dnsFields[key]) + } + } + + // delete on all case + delete(dnsFields, key) + } + } + } + + return nil } func (dm *DNSMessage) Matching(matching map[string]interface{}) (error, bool) { @@ -1113,7 +1431,7 @@ func (dm *DNSMessage) Matching(matching map[string]interface{}) (error, bool) { // map can be provided by user in the config // dns.qname: -// match-source: "file://./testsdata/filtering_keep_domains_regex.txt" +// match-source: "file://./tests/testsdata/filtering_keep_domains_regex.txt" // source-kind: "regexp_list" func matchUserMap(realValue, expectedValue reflect.Value) (bool, error) { for _, opKey := range expectedValue.MapKeys() { @@ -1523,7 +1841,7 @@ func GetFakeDNSMessage() DNSMessage { dm.DNSTap.Identity = "collector" dm.DNSTap.Operation = "CLIENT_QUERY" dm.DNS.Type = DNSQuery - dm.DNS.Qname = "dns.collector" + dm.DNS.Qname = pkgconfig.ProgQname dm.NetworkInfo.QueryIP = "1.2.3.4" dm.NetworkInfo.QueryPort = "1234" dm.NetworkInfo.ResponseIP = "4.3.2.1" @@ -1540,8 +1858,8 @@ func GetFakeDNSMessageWithPayload() DNSMessage { dnsquestion, _ := dnsmsg.Pack() dm := GetFakeDNSMessage() - dm.NetworkInfo.Family = netlib.ProtoIPv4 - dm.NetworkInfo.Protocol = netlib.ProtoUDP + dm.NetworkInfo.Family = netutils.ProtoIPv4 + dm.NetworkInfo.Protocol = netutils.ProtoUDP dm.DNS.Payload = dnsquestion dm.DNS.Length = len(dnsquestion) return dm @@ -1561,3 +1879,18 @@ func GetReferenceDNSMessage() DNSMessage { dm.InitTransforms() return dm } + +func convertToString(value interface{}) string { + switch v := value.(type) { + case int: + return strconv.Itoa(v) + case bool: + return strconv.FormatBool(v) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + case string: + return v + default: + return fmt.Sprintf("%v", v) + } +} diff --git a/dnsutils/message_test.go b/dnsutils/message_test.go index 381fb3e0..9fbf9d4b 100644 --- a/dnsutils/message_test.go +++ b/dnsutils/message_test.go @@ -3,14 +3,27 @@ package dnsutils import ( "encoding/json" "reflect" + "regexp" "strings" "testing" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-dnstap-protobuf" + "github.com/dmachard/go-netutils" + "github.com/miekg/dns" "google.golang.org/protobuf/proto" ) +// Bench to init DNS message +func BenchmarkDnsMessage_Init(b *testing.B) { + b.ResetTimer() + for i := 0; i < b.N; i++ { + dm := DNSMessage{} + dm.Init() + dm.InitTransforms() + } +} + // Tests for DNSTap format func encodeToDNSTap(dm DNSMessage, t *testing.T) *ExtendedDnstap { // encode to extended dnstap @@ -35,6 +48,47 @@ func encodeToDNSTap(dm DNSMessage, t *testing.T) *ExtendedDnstap { return edt } +func TestDnsMessage_ToDNSTap(t *testing.T) { + dm := GetFakeDNSMessageWithPayload() + dm.DNSTap.Extra = "extra:value" + + // encode to dnstap + tapMsg, err := dm.ToDNSTap(false) + if err != nil { + t.Fatalf("could not encode to dnstap: %v\n", err) + } + + // decode dnstap message + dt := &dnstap.Dnstap{} + err = proto.Unmarshal(tapMsg, dt) + if err != nil { + t.Fatalf("error to decode dnstap: %v", err) + } + + if string(dt.GetIdentity()) != dm.DNSTap.Identity { + t.Errorf("identify field should be equal got=%s", string(dt.GetIdentity())) + } + + if string(dt.GetExtra()) != dm.DNSTap.Extra { + t.Errorf("extra field should be equal got=%s", string(dt.GetExtra())) + } +} + +func BenchmarkDnsMessage_ToDNSTap(b *testing.B) { + dm := DNSMessage{} + dm.Init() + dm.InitTransforms() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := dm.ToDNSTap(false) + if err != nil { + b.Fatalf("could not encode to dnstap: %v\n", err) + } + } +} + +// Tests for Extended DNSTap format func TestDnsMessage_ToExtendedDNSTap_GetOriginalDnstapExtra(t *testing.T) { dm := GetFakeDNSMessageWithPayload() dm.DNSTap.Extra = "tag0:value0" @@ -120,29 +174,17 @@ func TestDnsMessage_ToExtendedDNSTap_TransformGeo(t *testing.T) { } } -func TestDnsMessage_ToDNSTap(t *testing.T) { - dm := GetFakeDNSMessageWithPayload() - dm.DNSTap.Extra = "extra:value" - - // encode to dnstap - tapMsg, err := dm.ToDNSTap(false) - if err != nil { - t.Fatalf("could not encode to dnstap: %v\n", err) - } - - // decode dnstap message - dt := &dnstap.Dnstap{} - err = proto.Unmarshal(tapMsg, dt) - if err != nil { - t.Fatalf("error to decode dnstap: %v", err) - } - - if string(dt.GetIdentity()) != dm.DNSTap.Identity { - t.Errorf("identify field should be equal got=%s", string(dt.GetIdentity())) - } - - if string(dt.GetExtra()) != dm.DNSTap.Extra { - t.Errorf("extra field should be equal got=%s", string(dt.GetExtra())) +func BenchmarkDnsMessage_ToExtendedDNSTap(b *testing.B) { + dm := DNSMessage{} + dm.Init() + dm.InitTransforms() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := dm.ToDNSTap(true) + if err != nil { + b.Fatalf("could not encode to extended dnstap: %v\n", err) + } } } @@ -170,6 +212,7 @@ func TestDnsMessage_Json_Reference(t *testing.T) { "rcode": "-", "qname": "-", "qtype": "-", + "qclass": "-", "flags": { "qr": false, "tc": false, @@ -204,7 +247,9 @@ func TestDnsMessage_Json_Reference(t *testing.T) { "policy-action": "-", "policy-match": "-", "policy-value": "-", - "policy-rule": "-" + "policy-rule": "-", + "peer-name": "-", + "query-zone": "-" } } ` @@ -222,73 +267,7 @@ func TestDnsMessage_Json_Reference(t *testing.T) { } if !reflect.DeepEqual(dmMap, refMap) { - t.Errorf("json format different from reference") - } -} - -func TestDnsMessage_JsonFlatten_Reference(t *testing.T) { - dm := DNSMessage{} - dm.Init() - - refJSON := ` - { - "dns.flags.aa": false, - "dns.flags.ad": false, - "dns.flags.qr": false, - "dns.flags.ra": false, - "dns.flags.tc": false, - "dns.flags.rd": false, - "dns.flags.cd": false, - "dns.length": 0, - "dns.malformed-packet": false, - "dns.id": 0, - "dns.opcode": 0, - "dns.qname": "-", - "dns.qtype": "-", - "dns.rcode": "-", - "dns.resource-records.an": [], - "dns.resource-records.ar": [], - "dns.resource-records.ns": [], - "dnstap.identity": "-", - "dnstap.latency": "-", - "dnstap.operation": "-", - "dnstap.timestamp-rfc3339ns": "-", - "dnstap.version": "-", - "dnstap.extra": "-", - "dnstap.policy-rule": "-", - "dnstap.policy-type": "-", - "dnstap.policy-action": "-", - "dnstap.policy-match": "-", - "dnstap.policy-value": "-", - "edns.dnssec-ok": 0, - "edns.options": [], - "edns.rcode": 0, - "edns.udp-size": 0, - "edns.version": 0, - "network.family": "-", - "network.ip-defragmented": false, - "network.protocol": "-", - "network.query-ip": "-", - "network.query-port": "-", - "network.response-ip": "-", - "network.response-port": "-", - "network.tcp-reassembled": false - } - ` - - dmFlat, err := dm.Flatten() - if err != nil { - t.Fatalf("could not flat json: %s\n", err) - } - - var refMap map[string]interface{} - err = json.Unmarshal([]byte(refJSON), &refMap) - if err != nil { - t.Fatalf("could not unmarshal ref json: %s\n", err) - } - - if !reflect.DeepEqual(dmFlat, refMap) { - t.Errorf("flatten json format different from reference") + t.Errorf("json format different from reference %v", dmMap) } } @@ -309,6 +288,7 @@ func TestDnsMessage_Json_Collectors_Reference(t *testing.T) { AppliedPolicyType: "type", Tags: []string{"tag1"}, Metadata: map[string]string{"stream_id": "collector"}, + HTTPVersion: "http3", }}, jsonRef: `{ @@ -322,7 +302,8 @@ func TestDnsMessage_Json_Collectors_Reference(t *testing.T) { "tags": ["tag1"], "metadata": { "stream_id": "collector" - } + }, + "http-version": "http3" } }`, }, @@ -383,12 +364,14 @@ func TestDnsMessage_Json_Transforms_Reference(t *testing.T) { PublicSuffix: &TransformPublicSuffix{ QnamePublicSuffix: "com", QnameEffectiveTLDPlusOne: "hello.com", + ManagedByICANN: true, }, }, jsonRef: `{ "publicsuffix": { "tld": "com", - "etld+1": "hello.com" + "etld+1": "hello.com", + "managed-icann": true } }`, }, @@ -413,6 +396,15 @@ func TestDnsMessage_Json_Transforms_Reference(t *testing.T) { } }`, }, + { + transform: "atags", + dmRef: DNSMessage{ATags: &TransformATags{Tags: []string{"test0", "test1"}}}, + jsonRef: `{ + "atags": { + "tags": [ "test0", "test1" ] + } + }`, + }, } for _, tc := range testcases { @@ -439,10 +431,374 @@ func TestDnsMessage_Json_Transforms_Reference(t *testing.T) { } } +func BenchmarkDnsMessage_ToJSON(b *testing.B) { + dm := DNSMessage{} + dm.Init() + dm.InitTransforms() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + dm.ToJSON() + } +} + +// Tests for Flat JSON format +func TestDnsMessage_JsonFlatten_Reference(t *testing.T) { + dm := DNSMessage{} + dm.Init() + + // add some items in slices field + dm.DNS.DNSRRs.Answers = append(dm.DNS.DNSRRs.Answers, DNSAnswer{Name: "google.nl", Rdata: "142.251.39.99", Rdatatype: "A", TTL: 300, Class: "IN"}) + dm.EDNS.Options = append(dm.EDNS.Options, DNSOption{Code: 10, Data: "aaaabbbbcccc", Name: "COOKIE"}) + + refJSON := ` + { + "dns.flags.aa": false, + "dns.flags.ad": false, + "dns.flags.qr": false, + "dns.flags.ra": false, + "dns.flags.tc": false, + "dns.flags.rd": false, + "dns.flags.cd": false, + "dns.length": 0, + "dns.malformed-packet": false, + "dns.id": 0, + "dns.opcode": 0, + "dns.qname": "-", + "dns.qtype": "-", + "dns.rcode": "-", + "dns.qclass": "-", + "dns.resource-records.an.0.name": "google.nl", + "dns.resource-records.an.0.rdata": "142.251.39.99", + "dns.resource-records.an.0.rdatatype": "A", + "dns.resource-records.an.0.ttl": 300, + "dns.resource-records.an.0.class": "IN", + "dns.resource-records.ar": "-", + "dns.resource-records.ns": "-", + "dnstap.identity": "-", + "dnstap.latency": "-", + "dnstap.operation": "-", + "dnstap.timestamp-rfc3339ns": "-", + "dnstap.version": "-", + "dnstap.extra": "-", + "dnstap.policy-rule": "-", + "dnstap.policy-type": "-", + "dnstap.policy-action": "-", + "dnstap.policy-match": "-", + "dnstap.policy-value": "-", + "dnstap.peer-name": "-", + "dnstap.query-zone": "-", + "edns.dnssec-ok": 0, + "edns.options.0.code": 10, + "edns.options.0.data": "aaaabbbbcccc", + "edns.options.0.name": "COOKIE", + "edns.rcode": 0, + "edns.udp-size": 0, + "edns.version": 0, + "network.family": "-", + "network.ip-defragmented": false, + "network.protocol": "-", + "network.query-ip": "-", + "network.query-port": "-", + "network.response-ip": "-", + "network.response-port": "-", + "network.tcp-reassembled": false + } + ` + + var dmFlat map[string]interface{} + dmJSON, err := dm.ToFlatJSON() + if err != nil { + t.Fatalf("could not convert dm to flat json: %s\n", err) + } + err = json.Unmarshal([]byte(dmJSON), &dmFlat) + if err != nil { + t.Fatalf("could not unmarshal dm json: %s\n", err) + } + + var refMap map[string]interface{} + err = json.Unmarshal([]byte(refJSON), &refMap) + if err != nil { + t.Fatalf("could not unmarshal ref json: %s\n", err) + } + + for k, vRef := range refMap { + vFlat, ok := dmFlat[k] + if !ok { + t.Fatalf("Missing key %s in flatten message according to reference", k) + } + if vRef != vFlat { + t.Errorf("Invalid value for key=%s get=%v expected=%v", k, vFlat, vRef) + } + } + + for k := range dmFlat { + _, ok := refMap[k] + if !ok { + t.Errorf("This key %s should not be in the flat message", k) + } + } +} + +func TestDnsMessage_JsonFlatten_Transforms_Reference(t *testing.T) { + + testcases := []struct { + transform string + dm DNSMessage + jsonRef string + }{ + { + transform: "filtering", + dm: DNSMessage{Filtering: &TransformFiltering{SampleRate: 22}}, + jsonRef: `{ + "filtering.sample-rate": 22 + }`, + }, + { + transform: "reducer", + dm: DNSMessage{Reducer: &TransformReducer{Occurrences: 10, CumulativeLength: 47}}, + jsonRef: `{ + "reducer.occurrences": 10, + "reducer.cumulative-length": 47 + }`, + }, + { + transform: "publixsuffix", + dm: DNSMessage{ + PublicSuffix: &TransformPublicSuffix{ + QnamePublicSuffix: "com", + QnameEffectiveTLDPlusOne: "hello.com", + }, + }, + jsonRef: `{ + "publicsuffix.tld": "com", + "publicsuffix.etld+1": "hello.com" + }`, + }, + { + transform: "geoip", + dm: DNSMessage{ + Geo: &TransformDNSGeo{ + City: "Paris", + Continent: "Europe", + CountryIsoCode: "FR", + AutonomousSystemNumber: "1234", + AutonomousSystemOrg: "Internet", + }, + }, + jsonRef: `{ + "geoip.city": "Paris", + "geoip.continent": "Europe", + "geoip.country-isocode": "FR", + "geoip.as-number": "1234", + "geoip.as-owner": "Internet" + }`, + }, + { + transform: "suspicious", + dm: DNSMessage{Suspicious: &TransformSuspicious{Score: 1.0, + MalformedPacket: false, + LargePacket: true, + LongDomain: true, + SlowDomain: false, + UnallowedChars: true, + UncommonQtypes: false, + ExcessiveNumberLabels: true, + Domain: "gogle.co", + }}, + jsonRef: `{ + "suspicious.score": 1.0, + "suspicious.malformed-pkt": false, + "suspicious.large-pkt": true, + "suspicious.long-domain": true, + "suspicious.slow-domain": false, + "suspicious.unallowed-chars": true, + "suspicious.uncommon-qtypes": false, + "suspicious.excessive-number-labels": true, + "suspicious.domain": "gogle.co" + }`, + }, + { + transform: "extracted", + dm: DNSMessage{Extracted: &TransformExtracted{Base64Payload: []byte{}}}, + jsonRef: `{ + "extracted.dns_payload": "" + }`, + }, + { + transform: "machinelearning", + dm: DNSMessage{MachineLearning: &TransformML{ + Entropy: 10.0, + Length: 2, + Labels: 2, + Digits: 1, + Lowers: 35, + Uppers: 23, + Specials: 2, + Others: 1, + RatioDigits: 1.0, + RatioLetters: 1.0, + RatioSpecials: 1.0, + RatioOthers: 1.0, + ConsecutiveChars: 10, + ConsecutiveVowels: 10, + ConsecutiveDigits: 10, + ConsecutiveConsonants: 10, + Size: 11, + Occurrences: 10, + UncommonQtypes: 1, + }}, + jsonRef: `{ + "ml.entropy": 10.0, + "ml.length": 2, + "ml.labels": 2, + "ml.digits": 1, + "ml.lowers": 35, + "ml.uppers": 23, + "ml.specials": 2, + "ml.others": 1, + "ml.ratio-digits": 1.0, + "ml.ratio-letters": 1.0, + "ml.ratio-specials": 1.0, + "ml.ratio-others": 1.0, + "ml.consecutive-chars": 10, + "ml.consecutive-vowels": 10, + "ml.consecutive-digits": 10, + "ml.consecutive-consonants": 10, + "ml.size": 11, + "ml.occurrences": 10, + "ml.uncommon-qtypes": 1 + }`, + }, + { + transform: "atags", + dm: DNSMessage{ATags: &TransformATags{Tags: []string{"test0", "test1"}}}, + jsonRef: `{ + "atags.tags.0": "test0", + "atags.tags.1": "test1" + }`, + }, + } + + for _, tc := range testcases { + t.Run(tc.transform, func(t *testing.T) { + + tc.dm.Init() + + var dmFlat map[string]interface{} + dmJSON, err := tc.dm.ToFlatJSON() + if err != nil { + t.Fatalf("could not convert dm to flat json: %s\n", err) + } + err = json.Unmarshal([]byte(dmJSON), &dmFlat) + if err != nil { + t.Fatalf("could not unmarshal dm json: %s\n", err) + } + + var refMap map[string]interface{} + err = json.Unmarshal([]byte(tc.jsonRef), &refMap) + if err != nil { + t.Fatalf("could not unmarshal ref json: %s\n", err) + } + + for k, vRef := range refMap { + vFlat, ok := dmFlat[k] + if !ok { + t.Fatalf("Missing key %s in flatten message according to reference", k) + } + if vRef != vFlat { + t.Errorf("Invalid value for key=%s get=%v expected=%v", k, vFlat, vRef) + } + } + }) + } +} + +func TestDnsMessage_JsonFlatten_Collectors_Reference(t *testing.T) { + testcases := []struct { + collector string + dm DNSMessage + jsonRef string + }{ + { + collector: "powerdns", + dm: DNSMessage{PowerDNS: &PowerDNS{ + OriginalRequestSubnet: "subnet", + AppliedPolicy: "basicrpz", + AppliedPolicyHit: "hit", + AppliedPolicyKind: "kind", + AppliedPolicyTrigger: "trigger", + AppliedPolicyType: "type", + Tags: []string{"tag1"}, + Metadata: map[string]string{"stream_id": "collector"}, + HTTPVersion: "http3", + }}, + + jsonRef: `{ + "powerdns.original-request-subnet": "subnet", + "powerdns.applied-policy": "basicrpz", + "powerdns.applied-policy-hit": "hit", + "powerdns.applied-policy-kind": "kind", + "powerdns.applied-policy-trigger": "trigger", + "powerdns.applied-policy-type": "type", + "powerdns.tags.0": "tag1", + "powerdns.metadata.stream_id": "collector", + "powerdns.http-version": "http3" + }`, + }, + } + for _, tc := range testcases { + t.Run(tc.collector, func(t *testing.T) { + + tc.dm.Init() + + var dmFlat map[string]interface{} + dmJSON, err := tc.dm.ToFlatJSON() + if err != nil { + t.Fatalf("could not convert dm to flat json: %s\n", err) + } + err = json.Unmarshal([]byte(dmJSON), &dmFlat) + if err != nil { + t.Fatalf("could not unmarshal dm json: %s\n", err) + } + + var refMap map[string]interface{} + err = json.Unmarshal([]byte(tc.jsonRef), &refMap) + if err != nil { + t.Fatalf("could not unmarshal ref json: %s\n", err) + } + + for k, vRef := range refMap { + vFlat, ok := dmFlat[k] + if !ok { + t.Fatalf("Missing key %s in flatten message according to reference", k) + } + if vRef != vFlat { + t.Errorf("Invalid value for key=%s get=%v expected=%v", k, vFlat, vRef) + } + } + }) + } +} + +func BenchmarkDnsMessage_ToFlatJSON(b *testing.B) { + dm := DNSMessage{} + dm.Init() + dm.InitTransforms() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := dm.ToFlatJSON() + if err != nil { + b.Fatalf("could not encode to flat json: %v\n", err) + } + } +} + // Tests for TEXT format func TestDnsMessage_TextFormat_ToString(t *testing.T) { - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() testcases := []struct { name string @@ -510,7 +866,7 @@ func TestDnsMessage_TextFormat_ToString(t *testing.T) { } func TestDnsMessage_TextFormat_DefaultDirectives(t *testing.T) { - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() testcases := []struct { name string @@ -538,6 +894,11 @@ func TestDnsMessage_TextFormat_DefaultDirectives(t *testing.T) { dm: DNSMessage{DNS: DNS{Qname: "dnscollector.fr", Qtype: "AAAA", Opcode: 42}}, expected: "dnscollector.fr AAAA 42", }, + { + format: "qclass", + dm: DNSMessage{DNS: DNS{Qclass: "CH"}}, + expected: "CH", + }, { format: "operation", dm: DNSMessage{DNSTap: DNSTap{Operation: "CLIENT_QUERY"}}, @@ -590,6 +951,16 @@ func TestDnsMessage_TextFormat_DefaultDirectives(t *testing.T) { PolicyValue: "value"}}, expected: "rule type action match value", }, + { + format: "peer-name", + dm: DNSMessage{DNSTap: DNSTap{PeerName: "testpeer"}}, + expected: "testpeer", + }, + { + format: "query-zone", + dm: DNSMessage{DNSTap: DNSTap{QueryZone: "queryzone.test"}}, + expected: "queryzone.test", + }, } for _, tc := range testcases { @@ -667,7 +1038,7 @@ func TestDnsMessage_TextFormat_InvalidDirectives(t *testing.T) { } func TestDnsMessage_TextFormat_Directives_PublicSuffix(t *testing.T) { - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() testcases := []struct { name string @@ -704,7 +1075,7 @@ func TestDnsMessage_TextFormat_Directives_PublicSuffix(t *testing.T) { } func TestDnsMessage_TextFormat_Directives_Geo(t *testing.T) { - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() testcases := []struct { name string @@ -742,7 +1113,7 @@ func TestDnsMessage_TextFormat_Directives_Geo(t *testing.T) { } func TestDnsMessage_TextFormat_Directives_Pdns(t *testing.T) { - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() testcases := []struct { name string @@ -816,6 +1187,67 @@ func TestDnsMessage_TextFormat_Directives_Pdns(t *testing.T) { dm: DNSMessage{PowerDNS: &PowerDNS{Tags: []string{"tag1", "tag2"}}}, expected: "-", }, + { + name: "http_version", + format: "powerdns-http-version", + dm: DNSMessage{PowerDNS: &PowerDNS{HTTPVersion: "HTTP2"}}, + expected: "HTTP2", + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + line := tc.dm.String( + strings.Fields(tc.format), + config.Global.TextFormatDelimiter, + config.Global.TextFormatBoundary, + ) + if line != tc.expected { + t.Errorf("Want: %s, got: %s", tc.expected, line) + } + }) + } +} + +func TestDnsMessage_TextFormat_Directives_ATags(t *testing.T) { + config := pkgconfig.GetDefaultConfig() + + testcases := []struct { + name string + format string + dm DNSMessage + expected string + }{ + { + name: "undefined", + format: "atags", + dm: DNSMessage{}, + expected: "-", + }, + { + name: "empty_attributes", + format: "atags", + dm: DNSMessage{ATags: &TransformATags{}}, + expected: "-", + }, + { + name: "tags_all", + format: "atags", + dm: DNSMessage{ATags: &TransformATags{Tags: []string{"tag1", "tag2"}}}, + expected: "tag1,tag2", + }, + { + name: "tags_index", + format: "atags:1", + dm: DNSMessage{ATags: &TransformATags{Tags: []string{"tag1", "tag2"}}}, + expected: "tag2", + }, + { + name: "tags_invalid_index", + format: "atags:3", + dm: DNSMessage{ATags: &TransformATags{Tags: []string{"tag1", "tag2"}}}, + expected: "-", + }, } for _, tc := range testcases { @@ -833,7 +1265,7 @@ func TestDnsMessage_TextFormat_Directives_Pdns(t *testing.T) { } func TestDnsMessage_TextFormat_Directives_Suspicious(t *testing.T) { - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() testcases := []struct { name string @@ -870,7 +1302,7 @@ func TestDnsMessage_TextFormat_Directives_Suspicious(t *testing.T) { } func TestDnsMessage_TextFormat_Directives_Reducer(t *testing.T) { - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() testcases := []struct { name string @@ -907,7 +1339,7 @@ func TestDnsMessage_TextFormat_Directives_Reducer(t *testing.T) { } func TestDnsMessage_TextFormat_Directives_Extracted(t *testing.T) { - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() testcases := []struct { name string @@ -959,7 +1391,7 @@ func TestDnsMessage_TextFormat_Directives_Extracted(t *testing.T) { } func TestDnsMessage_TextFormat_Directives_Filtering(t *testing.T) { - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() testcases := []struct { name string @@ -994,3 +1426,117 @@ func TestDnsMessage_TextFormat_Directives_Filtering(t *testing.T) { }) } } + +func BenchmarkDnsMessage_ToTextFormat(b *testing.B) { + dm := DNSMessage{} + dm.Init() + dm.InitTransforms() + + textFormat := []string{"timestamp-rfc3339ns", "identity", + "operation", "rcode", "queryip", "queryport", "family", + "protocol", "length-unit", "qname", "qtype", "latency"} + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := dm.ToTextLine(textFormat, " ", "\"") + if err != nil { + b.Fatalf("could not encode to text format: %v\n", err) + } + } +} + +// Tests for PCAP serialization +func BenchmarkDnsMessage_ToPacketLayer(b *testing.B) { + dm := DNSMessage{} + dm.Init() + dm.InitTransforms() + + dnsmsg := new(dns.Msg) + dnsmsg.SetQuestion("dnscollector.dev.", dns.TypeAAAA) + dnsquestion, _ := dnsmsg.Pack() + + dm.NetworkInfo.Family = netutils.ProtoIPv4 + dm.NetworkInfo.Protocol = netutils.ProtoUDP + dm.DNS.Payload = dnsquestion + dm.DNS.Length = len(dnsquestion) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := dm.ToPacketLayer() + if err != nil { + b.Fatalf("could not encode to pcap: %v\n", err) + } + } +} + +// Flatten and relabeling +func TestDnsMessage_ApplyRelabeling(t *testing.T) { + // Créer un DNSMessage avec des règles de relabeling pour le test + dm := &DNSMessage{ + Relabeling: &TransformRelabeling{ + Rules: []RelabelingRule{ + {Regex: regexp.MustCompile("^old_"), Replacement: "new_field", Action: "rename"}, + {Regex: regexp.MustCompile("^foo_"), Action: "remove"}, + }, + }, + } + + // test map + dnsFields := map[string]interface{}{ + "old_field": "value1", + "foo_field": "value2", + "other_field": "value3", + } + + // apply relabeling + err := dm.ApplyRelabeling(dnsFields) + if err != nil { + t.Errorf("ApplyRelabeling() return an error: %v", err) + } + + // check + expectedDNSFields := map[string]interface{}{ + "new_field": "value1", + "other_field": "value3", + } + if !reflect.DeepEqual(dnsFields, expectedDNSFields) { + t.Errorf("Want: %v, Get: %v", expectedDNSFields, dnsFields) + } +} + +func BenchmarkDnsMessage_ToFlatten_Relabelling(b *testing.B) { + dm := DNSMessage{} + dm.Init() + dm.InitTransforms() + + dm.Relabeling.Rules = append(dm.Relabeling.Rules, RelabelingRule{ + Regex: regexp.MustCompile(`dns.qname`), + Action: "remove", + }) + dm.Relabeling.Rules = append(dm.Relabeling.Rules, RelabelingRule{ + Regex: regexp.MustCompile(`dns.qtype`), + Replacement: "qtype", + Action: "rename", + }) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := dm.Flatten() + if err != nil { + b.Fatalf("could not flat: %v\n", err) + } + } +} + +func BenchmarkDnsMessage_ToFlatten(b *testing.B) { + dm := DNSMessage{} + dm.Init() + dm.InitTransforms() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := dm.Flatten() + if err != nil { + b.Fatalf("could not flat: %v\n", err) + } + } +} diff --git a/docker-compose.yml b/docker-compose.yml index cd2784b7..e388b967 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -12,4 +12,5 @@ services: ports: - "6000:6000/tcp" - "8080:8080/tcp" + - "9165:9165/tcp" restart: always \ No newline at end of file diff --git a/docs/_examples/use-case-1.deprecated.yml b/docs/_examples/use-case-1.deprecated.yml new file mode 100644 index 00000000..7d455750 --- /dev/null +++ b/docs/_examples/use-case-1.deprecated.yml @@ -0,0 +1,39 @@ +# Example 1: Capture DNSTap stream and backup-it to text and pcap files +# +# As prerequisites, we assume you have a DNS server which supports DNSTap (unbound, bind, powerdns, etc) +# For more informations about dnstap, read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ +# + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + + # Write DNS logs to log file in text format and pcap + # with a maximum size of 100Mb for each files + # A rotation mechanism is implemented with 10 files maximum + # more detail about the text format: doc/configuration.md#custom-text-format + loggers: + - name: text + logfile: + file-path: "/var/log/dnstap.log" + max-size: 100 + max-files: 10 + mode: text + - name: pcap + logfile: + file-path: "/tmp/dns.pcap" + mode: pcap + + # Routes DNS messages from the tap collector to the file logger + routes: + - from: [ tap ] + to: [ text, pcap ] \ No newline at end of file diff --git a/docs/_examples/use-case-1.pipeline.yml b/docs/_examples/use-case-1.pipeline.yml deleted file mode 100644 index 5de00f66..00000000 --- a/docs/_examples/use-case-1.pipeline.yml +++ /dev/null @@ -1,26 +0,0 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000 -# and logging in both text and pcap formats. - -global: - trace: - verbose: true - -pipelines: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - routing-policy: - default: [ text, pcap ] - - - name: text - logfile: - file-path: "/tmp/dnstap.log" - max-size: 100 - max-files: 10 - mode: text - - - name: pcap - logfile: - file-path: "/tmp/dns.pcap" - mode: pcap \ No newline at end of file diff --git a/docs/_examples/use-case-1.yml b/docs/_examples/use-case-1.yml index 7d455750..6bb42c0b 100644 --- a/docs/_examples/use-case-1.yml +++ b/docs/_examples/use-case-1.yml @@ -1,39 +1,26 @@ -# Example 1: Capture DNSTap stream and backup-it to text and pcap files -# -# As prerequisites, we assume you have a DNS server which supports DNSTap (unbound, bind, powerdns, etc) -# For more informations about dnstap, read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ -# +# This configuration sets up DNS traffic monitoring through DNStap on port 6000 +# and logging in both text and pcap formats. -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ text, pcap ] - # Write DNS logs to log file in text format and pcap - # with a maximum size of 100Mb for each files - # A rotation mechanism is implemented with 10 files maximum - # more detail about the text format: doc/configuration.md#custom-text-format - loggers: - - name: text - logfile: - file-path: "/var/log/dnstap.log" - max-size: 100 - max-files: 10 - mode: text - - name: pcap - logfile: - file-path: "/tmp/dns.pcap" - mode: pcap + - name: text + logfile: + file-path: "/tmp/dnstap.log" + max-size: 100 + max-files: 10 + mode: text - # Routes DNS messages from the tap collector to the file logger - routes: - - from: [ tap ] - to: [ text, pcap ] \ No newline at end of file + - name: pcap + logfile: + file-path: "/tmp/dns.pcap" + mode: pcap \ No newline at end of file diff --git a/docs/_examples/use-case-10.deprecated.yml b/docs/_examples/use-case-10.deprecated.yml new file mode 100644 index 00000000..e7a7a6b6 --- /dev/null +++ b/docs/_examples/use-case-10.deprecated.yml @@ -0,0 +1,34 @@ +# Example 10: Transform all domains to lowercase +# +# As prerequisites, we assume you have +# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, +# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + + # Print DNS messages on standard output with TEXT format + # with on tranformation to reduce qname to lowercase + # For example: Wwww.GooGlE.com will be equal to www.google.com + loggers: + - name: console + stdout: + mode: text + transforms: + normalize: + qname-lowercase: true + + # Routes DNS messages from the tap collector to standard output + routes: + - from: [tap] + to: [console] \ No newline at end of file diff --git a/docs/_examples/use-case-10.pipeline.yml b/docs/_examples/use-case-10.pipeline.yml deleted file mode 100644 index 5c45a262..00000000 --- a/docs/_examples/use-case-10.pipeline.yml +++ /dev/null @@ -1,21 +0,0 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000; -# and applies tranformation to reduce qname to lowercase - -global: - trace: - verbose: true - -pipelines: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - routing-policy: - default: [ console ] - - - name: console - stdout: - mode: text - transforms: - normalize: - qname-lowercase: true \ No newline at end of file diff --git a/docs/_examples/use-case-10.yml b/docs/_examples/use-case-10.yml index e7a7a6b6..7a0bdb01 100644 --- a/docs/_examples/use-case-10.yml +++ b/docs/_examples/use-case-10.yml @@ -1,34 +1,21 @@ -# Example 10: Transform all domains to lowercase -# -# As prerequisites, we assume you have -# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, -# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ +# This configuration sets up DNS traffic monitoring through DNStap on port 6000; +# and applies tranformation to reduce qname to lowercase -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ console ] - # Print DNS messages on standard output with TEXT format - # with on tranformation to reduce qname to lowercase - # For example: Wwww.GooGlE.com will be equal to www.google.com - loggers: - - name: console - stdout: - mode: text - transforms: - normalize: - qname-lowercase: true - - # Routes DNS messages from the tap collector to standard output - routes: - - from: [tap] - to: [console] \ No newline at end of file + - name: console + stdout: + mode: text + transforms: + normalize: + qname-lowercase: true \ No newline at end of file diff --git a/docs/_examples/use-case-11.deprecated.yml b/docs/_examples/use-case-11.deprecated.yml new file mode 100644 index 00000000..9a3ae378 --- /dev/null +++ b/docs/_examples/use-case-11.deprecated.yml @@ -0,0 +1,35 @@ +# Example 11: Add geographical metadata with GeoIP +# +# As prerequisites, we assume you have +# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, +# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers + # and try to add country name in metadata + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + transforms: + geoip: + mmdb-country-file: "/tmp/GeoIP/GeoLite2-Country.mmdb" + + # Print DNS messages on standard output with TEXT format + # Configure a custom text format to display the country name + loggers: + - name: console + stdout: + mode: text + text-format: "localtime identity queryip qname qtype geoip-country rcode" + + # Routes DNS messages from the tap collector to standard output + routes: + - from: [tap] + to: [console] \ No newline at end of file diff --git a/docs/_examples/use-case-11.pipeline.yml b/docs/_examples/use-case-11.pipeline.yml deleted file mode 100644 index 936f772d..00000000 --- a/docs/_examples/use-case-11.pipeline.yml +++ /dev/null @@ -1,22 +0,0 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000; -# and add geographical metadata with GeoIP database - -global: - trace: - verbose: true - -pipelines: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - transforms: - geoip: - mmdb-country-file: "./testsdata/GeoLite2-Country.mmdb" - routing-policy: - default: [ console ] - - - name: console - stdout: - mode: text - text-format: "localtime identity queryip qname qtype geoip-country rcode" diff --git a/docs/_examples/use-case-11.yml b/docs/_examples/use-case-11.yml index 9a3ae378..9f6bee1b 100644 --- a/docs/_examples/use-case-11.yml +++ b/docs/_examples/use-case-11.yml @@ -1,35 +1,22 @@ -# Example 11: Add geographical metadata with GeoIP -# -# As prerequisites, we assume you have -# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, -# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ +# This configuration sets up DNS traffic monitoring through DNStap on port 6000; +# and add geographical metadata with GeoIP database -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers - # and try to add country name in metadata - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - transforms: - geoip: - mmdb-country-file: "/tmp/GeoIP/GeoLite2-Country.mmdb" +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + transforms: + geoip: + mmdb-country-file: "./tests/testsdata/GeoLite2-Country.mmdb" + routing-policy: + forward: [ console ] - # Print DNS messages on standard output with TEXT format - # Configure a custom text format to display the country name - loggers: - - name: console - stdout: - mode: text - text-format: "localtime identity queryip qname qtype geoip-country rcode" - - # Routes DNS messages from the tap collector to standard output - routes: - - from: [tap] - to: [console] \ No newline at end of file + - name: console + stdout: + mode: text + text-format: "localtime identity queryip qname qtype geoip-country rcode" diff --git a/docs/_examples/use-case-12.deprecated.yml b/docs/_examples/use-case-12.deprecated.yml new file mode 100644 index 00000000..e8de3220 --- /dev/null +++ b/docs/_examples/use-case-12.deprecated.yml @@ -0,0 +1,33 @@ +# Example 12: Relays DNStap stream to multiple listeners without decoding it. +# +# As prerequisites, we assume you have +# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, +# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers + collectors: + - name: relay-in + dnstap-relay: + listen-ip: 0.0.0.0 + listen-port: 6000 + + # Redirect DNSTap to two destinations + loggers: + - name: relay-out1 + dnstapclient: + remote-address: 127.0.0.1 + remote-port: 6001 + - name: relay-out2 + dnstapclient: + remote-address: 127.0.0.1 + remote-port: 6002 + + routes: + - from: [ relay-in ] + to: [ relay-out1, relay-out2 ] diff --git a/docs/_examples/use-case-12.pipeline.yml b/docs/_examples/use-case-12.pipeline.yml deleted file mode 100644 index 009cb5a0..00000000 --- a/docs/_examples/use-case-12.pipeline.yml +++ /dev/null @@ -1,24 +0,0 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000; -# and duplicate the flow to two dnstap receiver - -global: - trace: - verbose: true - -pipelines: - - name: relay-in - dnstap-relay: - listen-ip: 0.0.0.0 - listen-port: 6000 - routing-policy: - default: [ relay-out1, relay-out2 ] - - - name: relay-out1 - dnstapclient: - remote-address: 127.0.0.1 - remote-port: 6001 - - - name: relay-out2 - dnstapclient: - remote-address: 127.0.0.1 - remote-port: 6002 diff --git a/docs/_examples/use-case-12.yml b/docs/_examples/use-case-12.yml index e8de3220..9002ebd9 100644 --- a/docs/_examples/use-case-12.yml +++ b/docs/_examples/use-case-12.yml @@ -1,33 +1,24 @@ -# Example 12: Relays DNStap stream to multiple listeners without decoding it. -# -# As prerequisites, we assume you have -# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, -# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ +# This configuration sets up DNS traffic monitoring through DNStap on port 6000; +# and duplicate the flow to two dnstap receiver -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers - collectors: - - name: relay-in - dnstap-relay: - listen-ip: 0.0.0.0 - listen-port: 6000 +pipelines: + - name: relay-in + dnstap-relay: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ relay-out1, relay-out2 ] - # Redirect DNSTap to two destinations - loggers: - - name: relay-out1 - dnstapclient: - remote-address: 127.0.0.1 - remote-port: 6001 - - name: relay-out2 - dnstapclient: - remote-address: 127.0.0.1 - remote-port: 6002 + - name: relay-out1 + dnstapclient: + remote-address: 127.0.0.1 + remote-port: 6001 - routes: - - from: [ relay-in ] - to: [ relay-out1, relay-out2 ] + - name: relay-out2 + dnstapclient: + remote-address: 127.0.0.1 + remote-port: 6002 diff --git a/docs/_examples/use-case-13.deprecated.yml b/docs/_examples/use-case-13.deprecated.yml new file mode 100644 index 00000000..608f3eba --- /dev/null +++ b/docs/_examples/use-case-13.deprecated.yml @@ -0,0 +1,26 @@ +# Example 13: Save DNStap stream to file. + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + + # Save the dnstap stream to file + loggers: + - name: dnstap + logfile: + file-path: /tmp/dnstap.fstrm + flush-interval: 10 + mode: dnstap + + routes: + - from: [ tap ] + to: [ dnstap ] diff --git a/docs/_examples/use-case-13.pipeline.yml b/docs/_examples/use-case-13.pipeline.yml deleted file mode 100644 index 264a5b33..00000000 --- a/docs/_examples/use-case-13.pipeline.yml +++ /dev/null @@ -1,20 +0,0 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000; -# and save to a file as DNStap - -global: - trace: - verbose: true - -pipelines: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - routing-policy: - default: [ dnstap ] - - - name: dnstap - logfile: - file-path: /tmp/dnstap.fstrm - flush-interval: 10 - mode: dnstap diff --git a/docs/_examples/use-case-13.yml b/docs/_examples/use-case-13.yml index 608f3eba..1170008f 100644 --- a/docs/_examples/use-case-13.yml +++ b/docs/_examples/use-case-13.yml @@ -1,26 +1,20 @@ -# Example 13: Save DNStap stream to file. +# This configuration sets up DNS traffic monitoring through DNStap on port 6000; +# and save to a file as DNStap -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ dnstap ] - # Save the dnstap stream to file - loggers: - - name: dnstap - logfile: - file-path: /tmp/dnstap.fstrm - flush-interval: 10 - mode: dnstap - - routes: - - from: [ tap ] - to: [ dnstap ] + - name: dnstap + logfile: + file-path: /tmp/dnstap.fstrm + flush-interval: 10 + mode: dnstap diff --git a/docs/_examples/use-case-14.deprecated.yml b/docs/_examples/use-case-14.deprecated.yml new file mode 100644 index 00000000..7e3a7a94 --- /dev/null +++ b/docs/_examples/use-case-14.deprecated.yml @@ -0,0 +1,24 @@ +# Example 14: Watch for DNStap files as input. + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + # Watch in /tmp folder to find dnstap files with fstrm extension + collectors: + - name: dnstap + file-ingestor: + watch-dir: /tmp + watch-mode: dnstap + + # Redirect output to the console + loggers: + - name: console + stdout: + mode: text + + routes: + - from: [ dnstap ] + to: [ console ] diff --git a/docs/_examples/use-case-14.pipeline.yml b/docs/_examples/use-case-14.pipeline.yml deleted file mode 100644 index 4f2ac08a..00000000 --- a/docs/_examples/use-case-14.pipeline.yml +++ /dev/null @@ -1,31 +0,0 @@ -# This configuration sets up watch and read DNStap files; -# and logging to the console in text format. - -global: - trace: - verbose: true - -pipelines: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - routing-policy: - default: [ out-dnstap ] - - - name: out-dnstap - logfile: - file-path: /tmp/dnstap.fstrm - flush-interval: 1 - mode: dnstap - - - name: file-dnstap - file-ingestor: - watch-dir: /tmp - watch-mode: dnstap - routing-policy: - default: [ console ] - - - name: console - stdout: - mode: text \ No newline at end of file diff --git a/docs/_examples/use-case-14.yml b/docs/_examples/use-case-14.yml index 7e3a7a94..3f352159 100644 --- a/docs/_examples/use-case-14.yml +++ b/docs/_examples/use-case-14.yml @@ -1,24 +1,31 @@ -# Example 14: Watch for DNStap files as input. +# This configuration sets up watch and read DNStap files; +# and logging to the console in text format. -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - # Watch in /tmp folder to find dnstap files with fstrm extension - collectors: - - name: dnstap - file-ingestor: - watch-dir: /tmp - watch-mode: dnstap +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ out-dnstap ] - # Redirect output to the console - loggers: - - name: console - stdout: - mode: text + - name: out-dnstap + logfile: + file-path: /tmp/dnstap.fstrm + flush-interval: 1 + mode: dnstap - routes: - - from: [ dnstap ] - to: [ console ] + - name: file-dnstap + file-ingestor: + watch-dir: /tmp + watch-mode: dnstap + routing-policy: + forward: [ console ] + + - name: console + stdout: + mode: text \ No newline at end of file diff --git a/docs/_examples/use-case-15.deprecated.yml b/docs/_examples/use-case-15.deprecated.yml new file mode 100644 index 00000000..5533ee7a --- /dev/null +++ b/docs/_examples/use-case-15.deprecated.yml @@ -0,0 +1,27 @@ +# Example 15: Watch for PCAP files as input and convert to JSON + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + # Watch in /tmp folder to find pcap files with pcap or pcap.gz extension + collectors: + - name: pcap + file-ingestor: + watch-dir: /home/pcap/ + watch-mode: pcap + transforms: + normalize: + qname-lowercase: true + + # Redirect output to the console + loggers: + - name: console + stdout: + mode: json + + routes: + - from: [ pcap ] + to: [ console ] \ No newline at end of file diff --git a/docs/_examples/use-case-15.pipeline.yml b/docs/_examples/use-case-15.pipeline.yml deleted file mode 100644 index c88ccc07..00000000 --- a/docs/_examples/use-case-15.pipeline.yml +++ /dev/null @@ -1,21 +0,0 @@ -# This configuration sets up watch and read PCAP files; -# and logging to the console in JSON format. - -global: - trace: - verbose: true - -pipelines: - - name: pcap - file-ingestor: - watch-dir: /tmp/ - watch-mode: pcap - transforms: - normalize: - qname-lowercase: true - routing-policy: - default: [ console ] - - - name: console - stdout: - mode: json diff --git a/docs/_examples/use-case-15.yml b/docs/_examples/use-case-15.yml index 5533ee7a..885e4b02 100644 --- a/docs/_examples/use-case-15.yml +++ b/docs/_examples/use-case-15.yml @@ -1,27 +1,21 @@ -# Example 15: Watch for PCAP files as input and convert to JSON +# This configuration sets up watch and read PCAP files; +# and logging to the console in JSON format. -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - # Watch in /tmp folder to find pcap files with pcap or pcap.gz extension - collectors: - - name: pcap - file-ingestor: - watch-dir: /home/pcap/ - watch-mode: pcap - transforms: - normalize: - qname-lowercase: true +pipelines: + - name: pcap + file-ingestor: + watch-dir: /tmp/ + watch-mode: pcap + transforms: + normalize: + qname-lowercase: true + routing-policy: + forward: [ console ] - # Redirect output to the console - loggers: - - name: console - stdout: - mode: json - - routes: - - from: [ pcap ] - to: [ console ] \ No newline at end of file + - name: console + stdout: + mode: json diff --git a/docs/_examples/use-case-16.deprecated.yml b/docs/_examples/use-case-16.deprecated.yml new file mode 100644 index 00000000..a3a1c5c0 --- /dev/null +++ b/docs/_examples/use-case-16.deprecated.yml @@ -0,0 +1,25 @@ +# Example 16: Listen for PowerDNS protobuf and convert them to DNStap stream + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + # Listen for Protobuf PowerDNS + collectors: + - name: pdns + powerdns: + listen-ip: 0.0.0.0 + listen-port: 6001 + + # Redirect output to a remote DNStap collector + loggers: + - name: tap + dnstapclient: + remote-address: 127.0.0.1 + remote-port: 6002 + + routes: + - from: [ pdns ] + to: [ tap ] diff --git a/docs/_examples/use-case-16.pipeline.yml b/docs/_examples/use-case-16.pipeline.yml deleted file mode 100644 index 6e5cd2a1..00000000 --- a/docs/_examples/use-case-16.pipeline.yml +++ /dev/null @@ -1,19 +0,0 @@ -# This configuration sets up DNS traffic monitoring through PowerDNS protobuf on port 6001; -# and transforms it to DNStap on port 6002. - -global: - trace: - verbose: true - -pipelines: - - name: pdns - powerdns: - listen-ip: 0.0.0.0 - listen-port: 6001 - routing-policy: - default: [ tap ] - - - name: tap - dnstapclient: - remote-address: 127.0.0.1 - remote-port: 6002 \ No newline at end of file diff --git a/docs/_examples/use-case-16.yml b/docs/_examples/use-case-16.yml index a3a1c5c0..5d294e2c 100644 --- a/docs/_examples/use-case-16.yml +++ b/docs/_examples/use-case-16.yml @@ -1,25 +1,19 @@ -# Example 16: Listen for PowerDNS protobuf and convert them to DNStap stream +# This configuration sets up DNS traffic monitoring through PowerDNS protobuf on port 6001; +# and transforms it to DNStap on port 6002. -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - # Listen for Protobuf PowerDNS - collectors: - - name: pdns - powerdns: - listen-ip: 0.0.0.0 - listen-port: 6001 +pipelines: + - name: pdns + powerdns: + listen-ip: 0.0.0.0 + listen-port: 6001 + routing-policy: + forward: [ tap ] - # Redirect output to a remote DNStap collector - loggers: - - name: tap - dnstapclient: - remote-address: 127.0.0.1 - remote-port: 6002 - - routes: - - from: [ pdns ] - to: [ tap ] + - name: tap + dnstapclient: + remote-address: 127.0.0.1 + remote-port: 6002 \ No newline at end of file diff --git a/docs/_examples/use-case-17.deprecated.yml b/docs/_examples/use-case-17.deprecated.yml new file mode 100644 index 00000000..dcf6b8eb --- /dev/null +++ b/docs/_examples/use-case-17.deprecated.yml @@ -0,0 +1,29 @@ +# Example 17: Capture TZSP packets containing DNS packets and process them as json +# +# As prerequisites, we assume you have +# - a Mikrotik brand device that routes DNS packets and can use the "tzsp" Action in the firewall +# or firewall6 mangle table. + + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + collectors: + - name: tzsp + tzsp: + enable: true + listen-ip: "0.0.0.0" + listen-port: 10000 + + # Redirect output to the console + loggers: + - name: console + stdout: + mode: json + + routes: + - from: [ tzsp ] + to: [ console ] diff --git a/docs/_examples/use-case-17.pipeline.yml b/docs/_examples/use-case-17.pipeline.yml deleted file mode 100644 index cc094fea..00000000 --- a/docs/_examples/use-case-17.pipeline.yml +++ /dev/null @@ -1,19 +0,0 @@ -# This configuration sets up DNS traffic monitoring through TZSP protocol on port 1000; -# and logging to the console in JSON format. - -global: - trace: - verbose: true - -pipelines: - - name: tzsp - tzsp: - enable: true - listen-ip: "0.0.0.0" - listen-port: 10000 - routing-policy: - default: [ console ] - - - name: console - stdout: - mode: json \ No newline at end of file diff --git a/docs/_examples/use-case-17.yml b/docs/_examples/use-case-17.yml index dcf6b8eb..ce4a5720 100644 --- a/docs/_examples/use-case-17.yml +++ b/docs/_examples/use-case-17.yml @@ -1,29 +1,19 @@ -# Example 17: Capture TZSP packets containing DNS packets and process them as json -# -# As prerequisites, we assume you have -# - a Mikrotik brand device that routes DNS packets and can use the "tzsp" Action in the firewall -# or firewall6 mangle table. +# This configuration sets up DNS traffic monitoring through TZSP protocol on port 1000; +# and logging to the console in JSON format. - -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - collectors: - - name: tzsp - tzsp: - enable: true - listen-ip: "0.0.0.0" - listen-port: 10000 - - # Redirect output to the console - loggers: - - name: console - stdout: - mode: json +pipelines: + - name: tzsp + tzsp: + enable: true + listen-ip: "0.0.0.0" + listen-port: 10000 + routing-policy: + forward: [ console ] - routes: - - from: [ tzsp ] - to: [ console ] + - name: console + stdout: + mode: json \ No newline at end of file diff --git a/docs/_examples/use-case-18.deprecated.yml b/docs/_examples/use-case-18.deprecated.yml new file mode 100644 index 00000000..ba0b964b --- /dev/null +++ b/docs/_examples/use-case-18.deprecated.yml @@ -0,0 +1,32 @@ +# Example 18: Count the number of evicted queries +# + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + transforms: + latency: + measure-latency: false + unanswered-queries: true + queries-timeout: 2 + + loggers: + - name: console + stdout: + mode: json + - name: prom + prometheus: + listen-ip: 0.0.0.0 + listen-port: 8080 + + routes: + - from: [ tap ] + to: [ console, prom ] diff --git a/docs/_examples/use-case-18.pipeline.yml b/docs/_examples/use-case-18.pipeline.yml deleted file mode 100644 index 22f388ee..00000000 --- a/docs/_examples/use-case-18.pipeline.yml +++ /dev/null @@ -1,28 +0,0 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000; -# applies tranformations on it and send to the console and prometheus - -global: - trace: - verbose: true - -pipelines: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - transforms: - latency: - measure-latency: false - unanswered-queries: true - queries-timeout: 2 - routing-policy: - default: [ console, prom ] - - - name: console - stdout: - mode: text - - - name: prom - prometheus: - listen-ip: 0.0.0.0 - listen-port: 8080 \ No newline at end of file diff --git a/docs/_examples/use-case-18.yml b/docs/_examples/use-case-18.yml index ba0b964b..222edaca 100644 --- a/docs/_examples/use-case-18.yml +++ b/docs/_examples/use-case-18.yml @@ -1,32 +1,28 @@ -# Example 18: Count the number of evicted queries -# +# This configuration sets up DNS traffic monitoring through DNStap on port 6000; +# applies tranformations on it and send to the console and prometheus -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - transforms: - latency: - measure-latency: false - unanswered-queries: true - queries-timeout: 2 +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + transforms: + latency: + measure-latency: false + unanswered-queries: true + queries-timeout: 2 + routing-policy: + forward: [ console, prom ] - loggers: - - name: console - stdout: - mode: json - - name: prom - prometheus: - listen-ip: 0.0.0.0 - listen-port: 8080 + - name: console + stdout: + mode: text - routes: - - from: [ tap ] - to: [ console, prom ] + - name: prom + prometheus: + listen-ip: 0.0.0.0 + listen-port: 8080 \ No newline at end of file diff --git a/docs/_examples/use-case-19.deprecated.yml b/docs/_examples/use-case-19.deprecated.yml new file mode 100644 index 00000000..c4476a66 --- /dev/null +++ b/docs/_examples/use-case-19.deprecated.yml @@ -0,0 +1,33 @@ +# Example 19: Suspicious traffic detector + +global: + trace: + verbose: true + +multiplexer: + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + transforms: + normalize: + qname-lowercase: true + latency: + measure-latency: true + queries-timeout: 2 + + loggers: + - name: console + stdout: + mode: text + text-format: timestamp-rfc3339ns identity operation rcode qname qtype latency suspicious-score + transforms: + filtering: + log-queries : false + suspicious: + threshold-qname-len: 4 + + routes: + - from: [ tap ] + to: [ console ] \ No newline at end of file diff --git a/docs/_examples/use-case-19.pipeline.yml b/docs/_examples/use-case-19.pipeline.yml deleted file mode 100644 index f47656f7..00000000 --- a/docs/_examples/use-case-19.pipeline.yml +++ /dev/null @@ -1,30 +0,0 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000; -# applies tranformations on dnstap collector and the console logger - -global: - trace: - verbose: true - -pipelines: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - transforms: - normalize: - qname-lowercase: true - latency: - measure-latency: true - queries-timeout: 2 - routing-policy: - default: [ console ] - - - name: console - stdout: - mode: text - text-format: timestamp-rfc3339ns identity operation rcode qname qtype latency suspicious-score - transforms: - filtering: - log-queries : false - suspicious: - threshold-qname-len: 4 \ No newline at end of file diff --git a/docs/_examples/use-case-19.yml b/docs/_examples/use-case-19.yml index c4476a66..b7cc8530 100644 --- a/docs/_examples/use-case-19.yml +++ b/docs/_examples/use-case-19.yml @@ -1,33 +1,30 @@ -# Example 19: Suspicious traffic detector +# This configuration sets up DNS traffic monitoring through DNStap on port 6000; +# applies tranformations on dnstap collector and the console logger global: trace: verbose: true -multiplexer: - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - transforms: - normalize: - qname-lowercase: true - latency: - measure-latency: true - queries-timeout: 2 +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + transforms: + normalize: + qname-lowercase: true + latency: + measure-latency: true + queries-timeout: 2 + routing-policy: + forward: [ console ] - loggers: - - name: console - stdout: - mode: text - text-format: timestamp-rfc3339ns identity operation rcode qname qtype latency suspicious-score - transforms: - filtering: - log-queries : false - suspicious: - threshold-qname-len: 4 - - routes: - - from: [ tap ] - to: [ console ] \ No newline at end of file + - name: console + stdout: + mode: text + text-format: timestamp-rfc3339ns identity operation rcode qname qtype latency suspicious-score + transforms: + filtering: + log-queries : false + suspicious: + threshold-qname-len: 4 \ No newline at end of file diff --git a/docs/_examples/use-case-2.deprecated.yml b/docs/_examples/use-case-2.deprecated.yml new file mode 100644 index 00000000..798a7374 --- /dev/null +++ b/docs/_examples/use-case-2.deprecated.yml @@ -0,0 +1,32 @@ +# Example 2: Observe DNS metrics with Prometheus and Grafana +# +# As prerequisites, we assume you have +# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, +# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ +# - a Prometheus instance ready to scrap on tcp/8080 +# - Basic or advanced dashboards are deployed on Grafana: doc/dashboards.md + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + + # Exposes prometheus metrics + loggers: + - name: prom + prometheus: + listen-ip: 0.0.0.0 + listen-port: 8080 + + # Routes DNS messages from the tap collector to the prometheus logger + routes: + - from: [tap] + to: [prom] \ No newline at end of file diff --git a/docs/_examples/use-case-2.yml b/docs/_examples/use-case-2.yml index 798a7374..8bf4d52a 100644 --- a/docs/_examples/use-case-2.yml +++ b/docs/_examples/use-case-2.yml @@ -1,32 +1,19 @@ -# Example 2: Observe DNS metrics with Prometheus and Grafana -# -# As prerequisites, we assume you have -# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, -# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ -# - a Prometheus instance ready to scrap on tcp/8080 -# - Basic or advanced dashboards are deployed on Grafana: doc/dashboards.md +# This configuration sets up DNS traffic monitoring through DNStap on port 6000, +# and computes Prometheus metrics for analysis. -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ prom ] - # Exposes prometheus metrics - loggers: - - name: prom - prometheus: - listen-ip: 0.0.0.0 - listen-port: 8080 - - # Routes DNS messages from the tap collector to the prometheus logger - routes: - - from: [tap] - to: [prom] \ No newline at end of file + - name: prom + prometheus: + listen-ip: 0.0.0.0 + listen-port: 8080 \ No newline at end of file diff --git a/docs/_examples/use-case-20.deprecated.yml b/docs/_examples/use-case-20.deprecated.yml new file mode 100644 index 00000000..45c7b944 --- /dev/null +++ b/docs/_examples/use-case-20.deprecated.yml @@ -0,0 +1,29 @@ +# Example 20: Detected repetitive traffic and log once +# + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + transforms: + reducer: + repetitive-traffic-detector: true + qname-plus-one: false + watch-interval: 5 + + loggers: + - name: console + stdout: + mode: text + text-format: "timestamp-rfc3339ns identity operation rcode queryip qname qtype reducer-occurrences reducer-cumulative-length" + + routes: + - from: [ tap ] + to: [ console ] diff --git a/docs/_examples/use-case-20.pipeline.yml b/docs/_examples/use-case-20.pipeline.yml deleted file mode 100644 index 0762fae5..00000000 --- a/docs/_examples/use-case-20.pipeline.yml +++ /dev/null @@ -1,24 +0,0 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000; -# removes duplicate traffic and log to the console - -global: - trace: - verbose: true - -pipelines: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - transforms: - reducer: - repetitive-traffic-detector: true - qname-plus-one: false - watch-interval: 5 - routing-policy: - default: [ console ] - - - name: console - stdout: - mode: text - text-format: "timestamp-rfc3339ns identity operation rcode queryip qname qtype reducer-occurrences reducer-cumulative-length" diff --git a/docs/_examples/use-case-20.yml b/docs/_examples/use-case-20.yml index 45c7b944..dcf62f3e 100644 --- a/docs/_examples/use-case-20.yml +++ b/docs/_examples/use-case-20.yml @@ -1,29 +1,24 @@ -# Example 20: Detected repetitive traffic and log once -# +# This configuration sets up DNS traffic monitoring through DNStap on port 6000; +# removes duplicate traffic and log to the console -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - transforms: - reducer: - repetitive-traffic-detector: true - qname-plus-one: false - watch-interval: 5 +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + transforms: + reducer: + repetitive-traffic-detector: true + qname-plus-one: false + watch-interval: 5 + routing-policy: + forward: [ console ] - loggers: - - name: console - stdout: - mode: text - text-format: "timestamp-rfc3339ns identity operation rcode queryip qname qtype reducer-occurrences reducer-cumulative-length" - - routes: - - from: [ tap ] - to: [ console ] + - name: console + stdout: + mode: text + text-format: "timestamp-rfc3339ns identity operation rcode queryip qname qtype reducer-occurrences reducer-cumulative-length" diff --git a/testsdata/config_verbose.yml b/docs/_examples/use-case-21.deprecated.yml similarity index 67% rename from testsdata/config_verbose.yml rename to docs/_examples/use-case-21.deprecated.yml index 29161ede..006b8d26 100644 --- a/testsdata/config_verbose.yml +++ b/docs/_examples/use-case-21.deprecated.yml @@ -1,6 +1,7 @@ global: trace: verbose: true + filename: /tmp/dnscollector.log multiplexer: collectors: @@ -12,8 +13,8 @@ multiplexer: loggers: - name: console stdout: - mode: text + mode: pcap routes: - - from: [tap] - to: [console] \ No newline at end of file + - from: [ tap ] + to: [ console ] \ No newline at end of file diff --git a/docs/_examples/use-case-21.yml b/docs/_examples/use-case-21.yml index 006b8d26..17822265 100644 --- a/docs/_examples/use-case-21.yml +++ b/docs/_examples/use-case-21.yml @@ -1,20 +1,18 @@ +# This configuration sets up DNS traffic monitoring through DNStap on port 6000; +# and log the console as PCAP format + global: trace: verbose: true - filename: /tmp/dnscollector.log - -multiplexer: - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - loggers: - - name: console - stdout: - mode: pcap +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ console ] - routes: - - from: [ tap ] - to: [ console ] \ No newline at end of file + - name: console + stdout: + mode: pcap \ No newline at end of file diff --git a/testsdata/config_stdout_dnstaptls.yml b/docs/_examples/use-case-22.deprecated.yml similarity index 51% rename from testsdata/config_stdout_dnstaptls.yml rename to docs/_examples/use-case-22.deprecated.yml index df933a62..46b0ac5f 100644 --- a/testsdata/config_stdout_dnstaptls.yml +++ b/docs/_examples/use-case-22.deprecated.yml @@ -8,15 +8,16 @@ multiplexer: dnstap: listen-ip: 0.0.0.0 listen-port: 6000 - tls-support: true - cert-file: "./testsdata/dnscollector.crt" - key-file: "./testsdata/dnscollector.key" + transforms: + machine-learning: + add-features: true loggers: - name: console stdout: mode: text - + text-format: ml-size ml-entropy ml-length ml-digits ml-lowers ml-uppers + routes: - - from: [tap] - to: [console] + - from: [ tap ] + to: [ console ] \ No newline at end of file diff --git a/docs/_examples/use-case-22.pipeline.yml b/docs/_examples/use-case-22.pipeline.yml deleted file mode 100644 index 47470288..00000000 --- a/docs/_examples/use-case-22.pipeline.yml +++ /dev/null @@ -1,23 +0,0 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000; -# applies machine learning transformation on it - -global: - trace: - verbose: true - -pipelines: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - transforms: - machine-learning: - add-features: true - routing-policy: - default: [ console ] - - - name: console - stdout: - mode: text - text-format: ml-size ml-entropy ml-length ml-digits ml-lowers ml-uppers - \ No newline at end of file diff --git a/docs/_examples/use-case-22.yml b/docs/_examples/use-case-22.yml index 46b0ac5f..df45cbd5 100644 --- a/docs/_examples/use-case-22.yml +++ b/docs/_examples/use-case-22.yml @@ -1,23 +1,23 @@ +# This configuration sets up DNS traffic monitoring through DNStap on port 6000; +# applies machine learning transformation on it + global: trace: verbose: true -multiplexer: - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - transforms: - machine-learning: - add-features: true +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + transforms: + machine-learning: + add-features: true + routing-policy: + forward: [ console ] - loggers: - - name: console - stdout: - mode: text - text-format: ml-size ml-entropy ml-length ml-digits ml-lowers ml-uppers - - routes: - - from: [ tap ] - to: [ console ] \ No newline at end of file + - name: console + stdout: + mode: text + text-format: ml-size ml-entropy ml-length ml-digits ml-lowers ml-uppers + \ No newline at end of file diff --git a/docs/_examples/use-case-23.deprecated.yml b/docs/_examples/use-case-23.deprecated.yml new file mode 100644 index 00000000..85d5fd38 --- /dev/null +++ b/docs/_examples/use-case-23.deprecated.yml @@ -0,0 +1,24 @@ +global: + trace: + verbose: true + +multiplexer: + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + + loggers: + - name: syslog + syslog: + transport: tcp+tls + remote-address: "127.0.0.1:6514" + mode: text + tls-insecure: true + formatter: "rfc5424" + framer: "rfc5425" + + routes: + - from: [ tap ] + to: [ syslog ] \ No newline at end of file diff --git a/docs/_examples/use-case-23.pipeline.yml b/docs/_examples/use-case-23.pipeline.yml deleted file mode 100644 index 02c13f57..00000000 --- a/docs/_examples/use-case-23.pipeline.yml +++ /dev/null @@ -1,23 +0,0 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000; -# and log to a remote syslog server with TLS. - -global: - trace: - verbose: true - -pipelines: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - routing-policy: - default: [ syslog ] - - - name: syslog - syslog: - transport: tcp+tls - remote-address: "127.0.0.1:6514" - mode: text - tls-insecure: true - formatter: "rfc5424" - framer: "rfc5425" \ No newline at end of file diff --git a/docs/_examples/use-case-23.yml b/docs/_examples/use-case-23.yml index 85d5fd38..ca7016c6 100644 --- a/docs/_examples/use-case-23.yml +++ b/docs/_examples/use-case-23.yml @@ -1,24 +1,23 @@ +# This configuration sets up DNS traffic monitoring through DNStap on port 6000; +# and log to a remote syslog server with TLS. + global: trace: verbose: true -multiplexer: - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - - loggers: - - name: syslog - syslog: - transport: tcp+tls - remote-address: "127.0.0.1:6514" - mode: text - tls-insecure: true - formatter: "rfc5424" - framer: "rfc5425" +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ syslog ] - routes: - - from: [ tap ] - to: [ syslog ] \ No newline at end of file + - name: syslog + syslog: + transport: tcp+tls + remote-address: "127.0.0.1:6514" + mode: text + tls-insecure: true + formatter: "rfc5424" + framer: "rfc5425" \ No newline at end of file diff --git a/docs/_examples/use-case-24.pipeline.yml b/docs/_examples/use-case-24.yml similarity index 86% rename from docs/_examples/use-case-24.pipeline.yml rename to docs/_examples/use-case-24.yml index 7e68ce76..dca37544 100644 --- a/docs/_examples/use-case-24.pipeline.yml +++ b/docs/_examples/use-case-24.yml @@ -21,7 +21,7 @@ pipelines: normalize: qname-lowercase: true routing-policy: - default: [ txt-queries, prom ] + forward: [ txt-queries, prom ] - name: txt-queries dnsmessage: @@ -31,9 +31,9 @@ pipelines: dns.qtype: "TXT" transforms: atags: - tags: [ "TAG:TXT-QUERIES" ] + add-tags: [ "TAG:TXT-QUERIES" ] routing-policy: - default: [ apple-txt, all-txt ] + forward: [ apple-txt, all-txt ] - name: all-txt dnsmessage: @@ -41,7 +41,7 @@ pipelines: include: dnstap.operation: "CLIENT_RESPONSE" routing-policy: - default: [ outputfile-justtxt ] + forward: [ outputfile-justtxt ] - name: apple-txt dnsmessage: @@ -51,9 +51,9 @@ pipelines: dns.qname: "^*.apple.com$" transforms: atags: - tags: [ "TXT:apple" ] + add-tags: [ "TXT:apple" ] routing-policy: - default: [ outputfile-apple ] + forward: [ outputfile-apple ] - name: outputfile-justtxt logfile: diff --git a/docs/_examples/use-case-3.deprecated.yml b/docs/_examples/use-case-3.deprecated.yml new file mode 100644 index 00000000..861ee7d1 --- /dev/null +++ b/docs/_examples/use-case-3.deprecated.yml @@ -0,0 +1,30 @@ +# Example 3: Transform DNSTap stream to JSON format +# +# As prerequisites, we assume you have a DNS server which supports DNSTap (unbound, bind, powerdns, etc) +# For more informations about dnstap, read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ +# + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: false + +multiplexer: + # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + + # Print DNS messages on standard output with JSON format + # more detail about the format: doc/dnsjson.md + loggers: + - name: console + stdout: + mode: json + + # Routes DNS messages from the tap collector to standard output + routes: + - from: [tap] + to: [console] \ No newline at end of file diff --git a/docs/_examples/use-case-3.pipeline.yml b/docs/_examples/use-case-3.pipeline.yml deleted file mode 100644 index 1c8e1c45..00000000 --- a/docs/_examples/use-case-3.pipeline.yml +++ /dev/null @@ -1,18 +0,0 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000 -# and logging to the console in JSON format. - -global: - trace: - verbose: false - -pipelines: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - routing-policy: - default: [ console ] - - - name: console - stdout: - mode: json diff --git a/docs/_examples/use-case-3.yml b/docs/_examples/use-case-3.yml index 861ee7d1..336cd438 100644 --- a/docs/_examples/use-case-3.yml +++ b/docs/_examples/use-case-3.yml @@ -1,30 +1,18 @@ -# Example 3: Transform DNSTap stream to JSON format -# -# As prerequisites, we assume you have a DNS server which supports DNSTap (unbound, bind, powerdns, etc) -# For more informations about dnstap, read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ -# +# This configuration sets up DNS traffic monitoring through DNStap on port 6000 +# and logging to the console in JSON format. -# If turned on, debug messages are printed in the standard output global: trace: - verbose: false + verbose: true -multiplexer: - # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ console ] - # Print DNS messages on standard output with JSON format - # more detail about the format: doc/dnsjson.md - loggers: - - name: console - stdout: - mode: json - - # Routes DNS messages from the tap collector to standard output - routes: - - from: [tap] - to: [console] \ No newline at end of file + - name: console + stdout: + mode: json diff --git a/docs/_examples/use-case-4.deprecated.yml b/docs/_examples/use-case-4.deprecated.yml new file mode 100644 index 00000000..e8e46a70 --- /dev/null +++ b/docs/_examples/use-case-4.deprecated.yml @@ -0,0 +1,32 @@ +# Example 4: Follow DNS traffic with Loki and Grafana +# +# As prerequisites, we assume you have +# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, +# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ +# - a Loki infrastructure: https://grafana.com/docs/loki/latest/ + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + + # Sends DNS messages to loki instance in text format + loggers: + - name: loki + lokiclient: + server-url: "http://loki:3100/loki/api/v1/push" + job-name: "dnscollector" + text-format: "localtime identity qr queryip family protocol qname qtype rcode" + + # Routes DNS messages from the tap collector to loki with some transformations + routes: + - from: [tap] + to: [loki] \ No newline at end of file diff --git a/docs/_examples/use-case-4.pipeline.yml b/docs/_examples/use-case-4.pipeline.yml deleted file mode 100644 index 804b2100..00000000 --- a/docs/_examples/use-case-4.pipeline.yml +++ /dev/null @@ -1,20 +0,0 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000 -# and logging to a remote Loki server. - -global: - trace: - verbose: true - -pipelines: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - routing-policy: - default: [ loki ] - - - name: loki - lokiclient: - server-url: "http://loki:3100/loki/api/v1/push" - job-name: "dnscollector" - text-format: "localtime identity qr queryip family protocol qname qtype rcode" diff --git a/docs/_examples/use-case-4.yml b/docs/_examples/use-case-4.yml index e8e46a70..2c7f0d5c 100644 --- a/docs/_examples/use-case-4.yml +++ b/docs/_examples/use-case-4.yml @@ -1,32 +1,20 @@ -# Example 4: Follow DNS traffic with Loki and Grafana -# -# As prerequisites, we assume you have -# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, -# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ -# - a Loki infrastructure: https://grafana.com/docs/loki/latest/ +# This configuration sets up DNS traffic monitoring through DNStap on port 6000 +# and logging to a remote Loki server. -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ loki ] - # Sends DNS messages to loki instance in text format - loggers: - - name: loki - lokiclient: - server-url: "http://loki:3100/loki/api/v1/push" - job-name: "dnscollector" - text-format: "localtime identity qr queryip family protocol qname qtype rcode" - - # Routes DNS messages from the tap collector to loki with some transformations - routes: - - from: [tap] - to: [loki] \ No newline at end of file + - name: loki + lokiclient: + server-url: "http://loki:3100/loki/api/v1/push" + job-name: "dnscollector" + text-format: "localtime identity qr queryip family protocol qname qtype rcode" diff --git a/docs/_examples/use-case-5.deprecated.yml b/docs/_examples/use-case-5.deprecated.yml new file mode 100644 index 00000000..75614e99 --- /dev/null +++ b/docs/_examples/use-case-5.deprecated.yml @@ -0,0 +1,31 @@ +# Example 5: Read from UNIX DNSTap socket and forward it to TLS stream +# +# As prerequisites, we assume you have +# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, +# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ +# - a remote DNSTap collector with TLS support + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + # Read DNSTap stream from a UNIX socket + collectors: + - name: tap_unix + dnstap: + sock-path: /tmp/dnstap.sock + + # Sends to another DNSTap collector with TLS + loggers: + - name: tap_tls + dnstapclient: + remote-address: 127.0.0.1 + remote-port: 6000 + tls-support: true + + # Routes DNS messages from the Unix socket to TLS tap destination + routes: + - from: [tap_unix] + to: [tap_tls] \ No newline at end of file diff --git a/docs/_examples/use-case-5.pipeline.yml b/docs/_examples/use-case-5.pipeline.yml deleted file mode 100644 index e999298a..00000000 --- a/docs/_examples/use-case-5.pipeline.yml +++ /dev/null @@ -1,19 +0,0 @@ -# This configuration sets up DNS traffic monitoring through DNStap unix socket; -# and logging to a remote DNSTap server with TLS enabled. - -global: - trace: - verbose: true - -pipelines: - - name: tap_unix - dnstap: - sock-path: /tmp/dnstap.sock - routing-policy: - default: [ tap_tls ] - - - name: tap_tls - dnstapclient: - remote-address: 127.0.0.1 - remote-port: 6000 - tls-support: true \ No newline at end of file diff --git a/docs/_examples/use-case-5.yml b/docs/_examples/use-case-5.yml index 75614e99..6c87bfec 100644 --- a/docs/_examples/use-case-5.yml +++ b/docs/_examples/use-case-5.yml @@ -1,31 +1,19 @@ -# Example 5: Read from UNIX DNSTap socket and forward it to TLS stream -# -# As prerequisites, we assume you have -# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, -# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ -# - a remote DNSTap collector with TLS support +# This configuration sets up DNS traffic monitoring through DNStap unix socket; +# and logging to a remote DNSTap server with TLS enabled. -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - # Read DNSTap stream from a UNIX socket - collectors: - - name: tap_unix - dnstap: - sock-path: /tmp/dnstap.sock - - # Sends to another DNSTap collector with TLS - loggers: - - name: tap_tls - dnstapclient: - remote-address: 127.0.0.1 - remote-port: 6000 - tls-support: true - - # Routes DNS messages from the Unix socket to TLS tap destination - routes: - - from: [tap_unix] - to: [tap_tls] \ No newline at end of file +pipelines: + - name: tap_unix + dnstap: + sock-path: /tmp/dnstap.sock + routing-policy: + forward: [ tap_tls ] + + - name: tap_tls + dnstapclient: + remote-address: 127.0.0.1 + remote-port: 6000 + tls-support: true \ No newline at end of file diff --git a/docs/_examples/use-case-6.deprecated.yml b/docs/_examples/use-case-6.deprecated.yml new file mode 100644 index 00000000..d326f043 --- /dev/null +++ b/docs/_examples/use-case-6.deprecated.yml @@ -0,0 +1,35 @@ +# Example 6: Capture DNSTap stream and apply user privacy on it +# +# As prerequisites, we assume you have +# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, +# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers + # and apply some transformations to apply user privacy by reducing user IP + # and the requested domain: doc/configuration.md#user-privacy + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + transforms: + user-privacy: + anonymize-ip: true + minimaze-qname: true + + # Print DNS messages on standard output with TEXT format + loggers: + - name: console + stdout: + mode: text + + # Routes DNS messages from the tap collector to standard output + routes: + - from: [tap] + to: [console] \ No newline at end of file diff --git a/docs/_examples/use-case-6.pipeline.yml b/docs/_examples/use-case-6.pipeline.yml deleted file mode 100644 index 6c435e20..00000000 --- a/docs/_examples/use-case-6.pipeline.yml +++ /dev/null @@ -1,22 +0,0 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000; -# applies transformations, and logs the processed data to the console in text format. - -global: - trace: - verbose: true - -pipelines: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - transforms: - user-privacy: - anonymize-ip: true - minimaze-qname: true - routing-policy: - default: [ console ] - - - name: console - stdout: - mode: text diff --git a/docs/_examples/use-case-6.yml b/docs/_examples/use-case-6.yml index d326f043..5aef56f1 100644 --- a/docs/_examples/use-case-6.yml +++ b/docs/_examples/use-case-6.yml @@ -1,35 +1,22 @@ -# Example 6: Capture DNSTap stream and apply user privacy on it -# -# As prerequisites, we assume you have -# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, -# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ +# This configuration sets up DNS traffic monitoring through DNStap on port 6000; +# applies transformations, and logs the processed data to the console in text format. -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers - # and apply some transformations to apply user privacy by reducing user IP - # and the requested domain: doc/configuration.md#user-privacy - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - transforms: - user-privacy: - anonymize-ip: true - minimaze-qname: true - - # Print DNS messages on standard output with TEXT format - loggers: - - name: console - stdout: - mode: text - - # Routes DNS messages from the tap collector to standard output - routes: - - from: [tap] - to: [console] \ No newline at end of file +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + transforms: + user-privacy: + anonymize-ip: true + minimaze-qname: true + routing-policy: + forward: [ console ] + + - name: console + stdout: + mode: text diff --git a/docs/_examples/use-case-7.deprecated.yml b/docs/_examples/use-case-7.deprecated.yml new file mode 100644 index 00000000..a60dc460 --- /dev/null +++ b/docs/_examples/use-case-7.deprecated.yml @@ -0,0 +1,37 @@ +# Example 7: Aggregate several DNSTap stream and forward it to the same file +# +# As prerequisites, we assume you have +# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, +# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + # Listen on tcp/6000 and tcp/6001 for incoming DNSTap protobuf messages + # from several servers + collectors: + - name: tap1 + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + - name: tap2 + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6001 + + # Write DNS logs to a local file in TEXT format + loggers: + - name: file + logfile: + file-path: "/var/run/dnscollector/dnstap.log" + max-size: 100 + max-files: 10 + mode: text + + # Routes DNS messages from the tap 1 and 2 to the same file destination + routes: + - from: [ tap1, tap2 ] + to: [ file ] \ No newline at end of file diff --git a/docs/_examples/use-case-7.pipeline.yml b/docs/_examples/use-case-7.pipeline.yml deleted file mode 100644 index 16b4a80e..00000000 --- a/docs/_examples/use-case-7.pipeline.yml +++ /dev/null @@ -1,28 +0,0 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000 and port 6001; -# and logs in text file. - -global: - trace: - verbose: true - -pipelines: - - name: tap1 - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - routing-policy: - default: [ file ] - - - name: tap2 - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6001 - routing-policy: - default: [ file ] - - - name: file - logfile: - file-path: "/tmp/dnstap.log" - max-size: 100 - max-files: 10 - mode: text diff --git a/docs/_examples/use-case-7.yml b/docs/_examples/use-case-7.yml index a60dc460..1a9c9d38 100644 --- a/docs/_examples/use-case-7.yml +++ b/docs/_examples/use-case-7.yml @@ -1,37 +1,28 @@ -# Example 7: Aggregate several DNSTap stream and forward it to the same file -# -# As prerequisites, we assume you have -# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, -# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ +# This configuration sets up DNS traffic monitoring through DNStap on port 6000 and port 6001; +# and logs in text file. -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - # Listen on tcp/6000 and tcp/6001 for incoming DNSTap protobuf messages - # from several servers - collectors: - - name: tap1 - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - - name: tap2 - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6001 +pipelines: + - name: tap1 + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ file ] - # Write DNS logs to a local file in TEXT format - loggers: - - name: file - logfile: - file-path: "/var/run/dnscollector/dnstap.log" - max-size: 100 - max-files: 10 - mode: text + - name: tap2 + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6001 + routing-policy: + forward: [ file ] - # Routes DNS messages from the tap 1 and 2 to the same file destination - routes: - - from: [ tap1, tap2 ] - to: [ file ] \ No newline at end of file + - name: file + logfile: + file-path: "/tmp/dnstap.log" + max-size: 100 + max-files: 10 + mode: text diff --git a/docs/_examples/use-case-8.deprecated.yml b/docs/_examples/use-case-8.deprecated.yml new file mode 100644 index 00000000..f686b3f0 --- /dev/null +++ b/docs/_examples/use-case-8.deprecated.yml @@ -0,0 +1,39 @@ +# Example 8: Multiple PowerDNS collectors +# +# As prerequisites, we assume you have +# - a PowerDNS DNS server which protobuf enabled + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + # Listen on tcp/6000 and tcp/6001 for internal/internet DNS + # with protobuf PowerDNS messages: https://dnsdist.org/reference/protobuf.html + collectors: + - name: pdns_internal + powerdns: + listen-ip: 0.0.0.0 + listen-port: 6000 + - name: pdns_internet + powerdns: + listen-ip: 0.0.0.0 + listen-port: 6001 + + # Write DNS logs to a local files in TEXT format + loggers: + - name: file_internal + logfile: + file-path: "/var/tap/dnscollector_internal.log" + - name: file_internet + logfile: + file-path: "/var/tap/dnscollector_internet.log" + + # Routes the pdns_internal stream to internal file + # and pdns_internet steram to internet file + routes: + - from: [ pdns_internal ] + to: [ file_internal ] + - from: [pdns_internet ] + to: [ file_internet ] \ No newline at end of file diff --git a/docs/_examples/use-case-8.pipeline.yml b/docs/_examples/use-case-8.pipeline.yml deleted file mode 100644 index 73326483..00000000 --- a/docs/_examples/use-case-8.pipeline.yml +++ /dev/null @@ -1,29 +0,0 @@ -# This configuration sets up DNS traffic monitoring through PowerDNS Protobuf on port 6000 and port 6001; -# and save-it in specific log files as text format. - -global: - trace: - verbose: true - -pipelines: - - name: pdns_internal - powerdns: - listen-ip: 0.0.0.0 - listen-port: 6000 - routing-policy: - default: [ file_internal ] - - - name: pdns_internet - powerdns: - listen-ip: 0.0.0.0 - listen-port: 6001 - routing-policy: - default: [ file_internet ] - - - name: file_internal - logfile: - file-path: "/tmp/dnscollector_internal.log" - - - name: file_internet - logfile: - file-path: "/tmp/dnscollector_internet.log" diff --git a/docs/_examples/use-case-8.yml b/docs/_examples/use-case-8.yml index f686b3f0..b8e8489b 100644 --- a/docs/_examples/use-case-8.yml +++ b/docs/_examples/use-case-8.yml @@ -1,39 +1,29 @@ -# Example 8: Multiple PowerDNS collectors -# -# As prerequisites, we assume you have -# - a PowerDNS DNS server which protobuf enabled +# This configuration sets up DNS traffic monitoring through PowerDNS Protobuf on port 6000 and port 6001; +# and save-it in specific log files as text format. -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - # Listen on tcp/6000 and tcp/6001 for internal/internet DNS - # with protobuf PowerDNS messages: https://dnsdist.org/reference/protobuf.html - collectors: - - name: pdns_internal - powerdns: - listen-ip: 0.0.0.0 - listen-port: 6000 - - name: pdns_internet - powerdns: - listen-ip: 0.0.0.0 - listen-port: 6001 +pipelines: + - name: pdns_internal + powerdns: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ file_internal ] - # Write DNS logs to a local files in TEXT format - loggers: - - name: file_internal - logfile: - file-path: "/var/tap/dnscollector_internal.log" - - name: file_internet - logfile: - file-path: "/var/tap/dnscollector_internet.log" + - name: pdns_internet + powerdns: + listen-ip: 0.0.0.0 + listen-port: 6001 + routing-policy: + forward: [ file_internet ] - # Routes the pdns_internal stream to internal file - # and pdns_internet steram to internet file - routes: - - from: [ pdns_internal ] - to: [ file_internal ] - - from: [pdns_internet ] - to: [ file_internet ] \ No newline at end of file + - name: file_internal + logfile: + file-path: "/tmp/dnscollector_internal.log" + + - name: file_internet + logfile: + file-path: "/tmp/dnscollector_internet.log" diff --git a/docs/_examples/use-case-9.deprecated.yml b/docs/_examples/use-case-9.deprecated.yml new file mode 100644 index 00000000..558ffb66 --- /dev/null +++ b/docs/_examples/use-case-9.deprecated.yml @@ -0,0 +1,37 @@ +# Example 9: Filtering incoming traffic with downsample and whitelist of domains +# +# As prerequisites, we assume you have +# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, +# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ + +# If turned on, debug messages are printed in the standard output +global: + trace: + verbose: true + +multiplexer: + # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers + # with some transformations to only keep 1 out of every downsample records + # and whitelistfile witch contains: + # *.google.com$ + # *.github.com$ + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + transforms: + filtering: + downsample: 1 + keep-domain-file: ./tests/testsdata/filtering_fqdn.txt + + # Print DNS messages on standard output with TEXT format + loggers: + - name: console + stdout: + mode: text + + # Routes DNS messages from the tap collector to standard output + routes: + - from: [tap] + to: [console] \ No newline at end of file diff --git a/docs/_examples/use-case-9.pipeline.yml b/docs/_examples/use-case-9.pipeline.yml deleted file mode 100644 index b9999fca..00000000 --- a/docs/_examples/use-case-9.pipeline.yml +++ /dev/null @@ -1,21 +0,0 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000 and port 6001; -# applies filtering and save to file - -global: - trace: - verbose: true - -pipelines: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - transforms: - filtering: - keep-domain-file: ./testsdata/filtering_fqdn.txt - routing-policy: - default: [ console ] - - - name: console - stdout: - mode: text diff --git a/docs/_examples/use-case-9.yml b/docs/_examples/use-case-9.yml index 9db9e98d..dc2ea9a2 100644 --- a/docs/_examples/use-case-9.yml +++ b/docs/_examples/use-case-9.yml @@ -1,37 +1,21 @@ -# Example 9: Filtering incoming traffic with downsample and whitelist of domains -# -# As prerequisites, we assume you have -# - a DNS server which supports DNSTap (unbound, bind, powerdns, etc) for more informations about dnstap, -# read the following page: https://dmachard.github.io/posts/0001-dnstap-testing/ +# This configuration sets up DNS traffic monitoring through DNStap on port 6000 and port 6001; +# applies filtering and save to file -# If turned on, debug messages are printed in the standard output global: trace: verbose: true -multiplexer: - # Listen on tcp/6000 for incoming DNSTap protobuf messages from dns servers - # with some transformations to only keep 1 out of every downsample records - # and whitelistfile witch contains: - # *.google.com$ - # *.github.com$ - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - transforms: - filtering: - downsample: 1 - keep-domain-file: ./testsdata/filtering_fqdn.txt +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + transforms: + filtering: + keep-domain-file: ./tests/testsdata/filtering_fqdn.txt + routing-policy: + forward: [ console ] - # Print DNS messages on standard output with TEXT format - loggers: - - name: console - stdout: - mode: text - - # Routes DNS messages from the tap collector to standard output - routes: - - from: [tap] - to: [console] \ No newline at end of file + - name: console + stdout: + mode: text diff --git a/docs/_images/dashboard_global.png b/docs/_images/dashboard_global.png new file mode 100644 index 00000000..1f760332 Binary files /dev/null and b/docs/_images/dashboard_global.png differ diff --git a/docs/_integration/elasticsearch/README.md b/docs/_integration/elasticsearch/README.md new file mode 100644 index 00000000..bdaa1bc3 --- /dev/null +++ b/docs/_integration/elasticsearch/README.md @@ -0,0 +1,24 @@ + +# DNS-collector with Elastic and Kibana + +- Download the [`docker-compose`](https://github.com/dmachard/go-dnscollector/blob/doc_atags/docs/_integration/elasticsearch/docker-compose.yml) file + +- Create the `data` folder. + +- Start the docker stack: + + ```bash + sudo docker compose up -d + ``` + +- Go to kibana web interface through `http://127.0.0.1:5601` + +- Click on `Explore on my own` and `Discover` + +- Finally create index pattern `dnscollector` and choose `dnstap.timestamp-rfc33939ns` + +- Finally, run DNScollector from source and generate some DNS logs from your DNS server with DNStap protocol. + + ```bash + go run . -config docs/_integration/elasticsearch/config.yml + ``` diff --git a/docs/_integration/elasticsearch/config.deprecated.yml b/docs/_integration/elasticsearch/config.deprecated.yml new file mode 100644 index 00000000..82f58e2e --- /dev/null +++ b/docs/_integration/elasticsearch/config.deprecated.yml @@ -0,0 +1,25 @@ + +global: + trace: + verbose: true + +multiplexer: + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + chan-buffer-size: 4096 + loggers: + - name: elastic + elasticsearch: + server: "http://192.168.1.220:9200/" + index: "dnscollector" + chan-buffer-size: 4096 + bulk-size: 5242880 + flush-interval: 10 + compression: gzip + bulk-channel-size: 10 + routes: + - from: [ tap ] + to: [ elastic ] \ No newline at end of file diff --git a/docs/_integration/elasticsearch/config.yml b/docs/_integration/elasticsearch/config.yml new file mode 100644 index 00000000..36b6474f --- /dev/null +++ b/docs/_integration/elasticsearch/config.yml @@ -0,0 +1,22 @@ + +global: + trace: + verbose: true + +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ elastic] + dropped: [] + + - name: elastic + elasticsearch: + server: "http://192.168.1.220:9200/" + index: "dnscollector" + bulk-size: 5242880 + flush-interval: 10 + compression: gzip + bulk-channel-size: 10 \ No newline at end of file diff --git a/docs/_integration/elasticsearch/docker-compose.yml b/docs/_integration/elasticsearch/docker-compose.yml new file mode 100644 index 00000000..7320b135 --- /dev/null +++ b/docs/_integration/elasticsearch/docker-compose.yml @@ -0,0 +1,24 @@ + +services: + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:8.12.2 + container_name: elasticsearch + restart: always + environment: + - discovery.type=single-node + - xpack.security.enabled=false + - xpack.security.enrollment.enabled=false + volumes: + - ./data:/usr/share/elasticsearch/data + ports: + - 9200:9200 + kibana: + container_name: kibana + image: docker.elastic.co/kibana/kibana:8.12.2 + restart: always + environment: + - ELASTICSEARCH_HOSTS=http://elasticsearch:9200 + ports: + - 5601:5601 + depends_on: + - elasticsearch \ No newline at end of file diff --git a/docs/_integration/fluentd/README.md b/docs/_integration/fluentd/README.md new file mode 100644 index 00000000..6687d953 --- /dev/null +++ b/docs/_integration/fluentd/README.md @@ -0,0 +1,26 @@ + +# DNS-collector with Fluentd + +- Download the [`docker-compose`](https://github.com/dmachard/go-dnscollector/blob/doc_atags/docs/_integration/fluentd/docker-compose.yml) file + +- Create the `data` folder. + +- Start the docker stack: + + ```bash + sudo docker compose up -d + + sudo docker compose logs + ... + fluentd | 2024-03-06 05:46:12.930048059 +0000 fluent.info: {"port":24224,"bind":"0.0.0.0","message":"[input1] listening port port=24224 bind=\"0.0.0.0\""} + fluentd | 2024-03-06 05:46:12 +0000 [warn]: #0 no patterns matched tag="fluent.info" + fluentd | 2024-03-06 05:46:12.933055666 +0000 fluent.info: {"worker":0,"message":"fluentd worker is now running worker=0"} + ``` + +- Finally, run DNScollector from source and generate some DNS logs from your DNS server with DNStap protocol. + + ```bash + go run . -config docs/_integration/fluentd/config.yml + ``` + +- Logs are available in ./data diff --git a/docs/_integration/fluentd/config.deprecated.yml b/docs/_integration/fluentd/config.deprecated.yml new file mode 100644 index 00000000..27c122ba --- /dev/null +++ b/docs/_integration/fluentd/config.deprecated.yml @@ -0,0 +1,33 @@ + +global: + trace: + verbose: true + +multiplexer: + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + chan-buffer-size: 4096 + loggers: + - name: fluentd + fluentd: + transport: tcp + remote-address: 127.0.0.1 + remote-port: 24224 + connect-timeout: 5 + retry-interval: 10 + flush-interval: 30 + tag: "dns.collector" + tls-insecure: false + tls-min-version: 1.2 + ca-file: "" + cert-file: "" + key-file: "" + buffer-size: 100 + chan-buffer-size: 4096 + + routes: + - from: [ tap ] + to: [ fluentd ] \ No newline at end of file diff --git a/docs/_integration/fluentd/config.yml b/docs/_integration/fluentd/config.yml new file mode 100644 index 00000000..4bf0cf9a --- /dev/null +++ b/docs/_integration/fluentd/config.yml @@ -0,0 +1,29 @@ + +global: + trace: + verbose: true + +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ fluentd ] + dropped: [] + + - name: fluentd + fluentd: + transport: tcp + remote-address: 127.0.0.1 + remote-port: 24224 + connect-timeout: 5 + retry-interval: 10 + flush-interval: 30 + tag: "dns.collector" + tls-insecure: false + tls-min-version: 1.2 + ca-file: "" + cert-file: "" + key-file: "" + buffer-size: 100 \ No newline at end of file diff --git a/docs/_integration/fluentd/docker-compose.yml b/docs/_integration/fluentd/docker-compose.yml new file mode 100644 index 00000000..9b1067ca --- /dev/null +++ b/docs/_integration/fluentd/docker-compose.yml @@ -0,0 +1,12 @@ + +services: + fluentd: + container_name: fluentd + image: fluent/fluentd:v1.16-debian-2 + user: 1000:1000 + volumes: + - ./data:/fluentd/log + ports: + - "24224:24224/tcp" + - "24224:24224/udp" + restart: unless-stopped diff --git a/docs/_integration/influxdb/README.md b/docs/_integration/influxdb/README.md new file mode 100644 index 00000000..13f4d43a --- /dev/null +++ b/docs/_integration/influxdb/README.md @@ -0,0 +1,32 @@ + +# DNS-collector with InfluxDB + +- Download the [`docker-compose`](https://github.com/dmachard/go-dnscollector/blob/doc_atags/docs/_integration/influxdb/docker-compose.yml) file + +- Create the `data` folder. + + ```bash + mkdir -p ./data + ``` + +- Start the docker stack: + + ```bash + sudo docker compose up -d + + sudo docker compose logs + ... + influxdb-1 | ts=2024-06-13T18:38:18.131480Z lvl=info msg=Listening log_id=0plj8Rp0000 service=tcp-listener transport=http addr=:8086 port=8086 + ``` + +- Go to http://127.0.0.1:8086 to create initial user with + organization: dnscollector + bucket: db_dns + Copy/paste the token in the DNScollector config. + +- Finally, run DNScollector from source and generate some DNS logs from your DNS server with DNStap protocol. + + ```bash + go run . -config docs/_integration/influxdb/config.yml + ``` + diff --git a/docs/_integration/influxdb/config.yml b/docs/_integration/influxdb/config.yml new file mode 100644 index 00000000..1427361c --- /dev/null +++ b/docs/_integration/influxdb/config.yml @@ -0,0 +1,26 @@ + +global: + trace: + verbose: true + +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ influxdb ] + dropped: [] + + - name: influxdb + influxdb: + server-url: "http://localhost:8086" + auth-token: "jgrt1gbfR4T8PQ41_SwqI58oU0dIFt5aNMqK3hfugPoMQBFl6OMGUQGBd_d6oI3Ylnh2ivvWbkqzwdCnUP-r-w==" + bucket: "db_dns" + organization: "dnscollector" + tls-support: false + tls-insecure: false + tls-min-version: 1.2 + ca-file: "" + cert-file: "" + key-file: "" \ No newline at end of file diff --git a/docs/_integration/influxdb/docker-compose.yml b/docs/_integration/influxdb/docker-compose.yml new file mode 100644 index 00000000..00176edc --- /dev/null +++ b/docs/_integration/influxdb/docker-compose.yml @@ -0,0 +1,14 @@ + +services: + influxdb: + image: influxdb:2.7.6 + user: 1000:1000 + ports: + - "8086:8086" + environment: + - INFLUXDB_DB=db0 + - INFLUXDB_ADMIN_USER=admin + - INFLUXDB_ADMIN_PASSWORD=badpassword + volumes: + - ./data:/var/lib/influxdb + restart: unless-stopped \ No newline at end of file diff --git a/docs/_integration/kafka/README.md b/docs/_integration/kafka/README.md new file mode 100644 index 00000000..4b53b5aa --- /dev/null +++ b/docs/_integration/kafka/README.md @@ -0,0 +1,22 @@ + +# DNS-collector with Kafka + +- Download the [`docker-compose`](https://github.com/dmachard/go-dnscollector/blob/doc_atags/docs/_integration/kafka/docker-compose.yml) file + +- Create the `data` folder. + +- Start the docker stack: + + ```bash + sudo docker compose up -d + ``` + +- Go to Apache Kafka interface through `http://127.0.0.1:8080` + +- The `dnscollector` topic should be available. + +- Finally, run DNScollector from source and generate some DNS logs from your DNS server with DNStap protocol. + + ```bash + go run . -config docs/_integration/kafka/config.yml + ``` diff --git a/docs/_integration/kafka/config.deprecated.yml b/docs/_integration/kafka/config.deprecated.yml new file mode 100644 index 00000000..6efa0890 --- /dev/null +++ b/docs/_integration/kafka/config.deprecated.yml @@ -0,0 +1,36 @@ + +global: + trace: + verbose: true + +multiplexer: + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + chan-buffer-size: 4096 + loggers: + - name: kafka + kafkaproducer: + remote-address: 127.0.0.1 + remote-port: 9092 + connect-timeout: 5 + retry-interval: 10 + flush-interval: 30 + tls-support: false + tls-insecure: false + sasl-support: false + sasl-mechanism: PLAIN + sasl-username: false + sasl-password: false + mode: flat-json + buffer-size: 100 + topic: "dnscollector" + partition: 0 + chan-buffer-size: 4096 + compression: none + + routes: + - from: [ tap ] + to: [ kafka ] \ No newline at end of file diff --git a/docs/_integration/kafka/config.yml b/docs/_integration/kafka/config.yml new file mode 100644 index 00000000..edeeb59a --- /dev/null +++ b/docs/_integration/kafka/config.yml @@ -0,0 +1,34 @@ + +global: + trace: + verbose: true + +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + chan-buffer-size: 4096 + routing-policy: + forward: [ kafka ] + dropped: [] + + - name: kafka + kafkaproducer: + remote-address: 127.0.0.1 + remote-port: 9092 + connect-timeout: 5 + retry-interval: 10 + flush-interval: 30 + tls-support: false + tls-insecure: false + sasl-support: false + sasl-mechanism: PLAIN + sasl-username: false + sasl-password: false + mode: flat-json + buffer-size: 100 + topic: "dnscollector" + partition: 0 + chan-buffer-size: 4096 + compression: none \ No newline at end of file diff --git a/docs/_integration/kafka/docker-compose.yml b/docs/_integration/kafka/docker-compose.yml new file mode 100644 index 00000000..1a459032 --- /dev/null +++ b/docs/_integration/kafka/docker-compose.yml @@ -0,0 +1,60 @@ + +services: + kafka-ui: + image: provectuslabs/kafka-ui:v0.7.1 + container_name: kafka-ui + environment: + DYNAMIC_CONFIG_ENABLED: true + KAFKA_CLUSTERS_0_NAME: local + KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 + KAFKA_CLUSTERS_0_METRICS_PORT: 9997 + ports: + - 8080:8080 + depends_on: + - kafka + + zookeeper: + image: confluentinc/cp-zookeeper:7.6.0 + hostname: zookeeper + container_name: zookeeper + ports: + - "2181:2181" + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + ZOOKEEPER_TICK_TIME: 2000 + + kafka: + image: confluentinc/cp-kafka:7.6.0 + container_name: kafka + depends_on: + - zookeeper + ports: + - "9092:9092" + - "9997:9997" + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092 + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 + KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 + KAFKA_JMX_PORT: 9997 + KAFKA_JMX_HOSTNAME: kafka + volumes: + - "./data:/var/lib/kafka/data" + + + kafka-init-topics: + image: confluentinc/cp-kafka:7.6.0 + container_name: kafka-init-topic + volumes: + - ./message.json:/data/message.json + depends_on: + - kafka + command: "bash -c 'echo Waiting for Kafka to be ready... && \ + cub kafka-ready -b kafka:29092 1 30 && \ + kafka-topics --create --topic dnscollector --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka:29092 < /data/message.json'" diff --git a/docs/_integration/kafka/message.json b/docs/_integration/kafka/message.json new file mode 100644 index 00000000..9e26dfee --- /dev/null +++ b/docs/_integration/kafka/message.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/docs/_integration/loki/README.md b/docs/_integration/loki/README.md new file mode 100644 index 00000000..6019911a --- /dev/null +++ b/docs/_integration/loki/README.md @@ -0,0 +1,25 @@ + +# DNS-collector with Loki + +- Download the [`docker-compose`](https://github.com/dmachard/go-dnscollector/blob/doc_atags/docs/_integration/loki/docker-compose.yml) file + +- Create the `data` folder. + + ```bash + mkdir -p ./data + ``` + +- Start the docker stack: + + ```bash + sudo docker compose up -d + ``` + +- Finally, run DNScollector from source and generate some DNS logs from your DNS server with DNStap protocol. + + ```bash + go run . -config docs/_integration/loki/config.yml + ``` + +- Connect to the web interface of grafana through http://127.0.0.1:3000 and `admin` login and `badpassword` + Go to the menu `Explorer` and add the `{job="dnscollector"}` filter, your DNS logs will be here. diff --git a/docs/_integration/loki/config.yml b/docs/_integration/loki/config.yml new file mode 100644 index 00000000..02d7c498 --- /dev/null +++ b/docs/_integration/loki/config.yml @@ -0,0 +1,35 @@ + +global: + trace: + verbose: true + +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ loki ] + dropped: [] + + - name: loki + lokiclient: + server-url: "http://localhost:3100/loki/api/v1/push" + job-name: "dnscollector" + mode: "text" + flush-interval: 5 + batch-size: 1048576 + retry-interval: 10 + text-format: "" + proxy-url: "" + tls-insecure: false + tls-min-version: 1.2 + ca-file: "" + cert-file: "" + key-file: "" + basic-auth-login: "" + basic-auth-pwd: "" + basic-auth-pwd-file: "" + tenant-id: "" + relabel-configs: [] + chan-buffer-size: 0 \ No newline at end of file diff --git a/docs/_integration/loki/datasource.yml b/docs/_integration/loki/datasource.yml new file mode 100644 index 00000000..f3bd87c1 --- /dev/null +++ b/docs/_integration/loki/datasource.yml @@ -0,0 +1,8 @@ +apiVersion: 1 + +datasources: + - name: Loki + type: loki + access: proxy + url: http://loki:3100 + isDefault: true \ No newline at end of file diff --git a/docs/_integration/loki/docker-compose.yml b/docs/_integration/loki/docker-compose.yml new file mode 100644 index 00000000..a14ef352 --- /dev/null +++ b/docs/_integration/loki/docker-compose.yml @@ -0,0 +1,21 @@ + +services: + loki: + image: grafana/loki:3.0.0 + user: "1000:1000" + ports: + - "3100:3100" + command: + - '--config.file=/etc/loki/config.yaml' + volumes: + - ./data:/tmp/loki + - ./loki-config.yaml:/etc/loki/config.yaml + + grafana: + image: grafana/grafana:11.0.0 + user: "1000:1000" + ports: + - "3000:3000/tcp" + volumes: + - ./datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml + - ./grafana.ini:/etc/grafana/grafana.ini \ No newline at end of file diff --git a/docs/_integration/loki/grafana.ini b/docs/_integration/loki/grafana.ini new file mode 100644 index 00000000..24682c50 --- /dev/null +++ b/docs/_integration/loki/grafana.ini @@ -0,0 +1,3 @@ +[security] +admin_user = admin +admin_password = badpassword \ No newline at end of file diff --git a/docs/_integration/loki/loki-config.yaml b/docs/_integration/loki/loki-config.yaml new file mode 100644 index 00000000..b13de7d2 --- /dev/null +++ b/docs/_integration/loki/loki-config.yaml @@ -0,0 +1,44 @@ +auth_enabled: false + +server: + http_listen_port: 3100 + grpc_listen_port: 9096 + +common: + instance_addr: 127.0.0.1 + path_prefix: /tmp/loki + storage: + filesystem: + chunks_directory: /tmp/loki/chunks + rules_directory: /tmp/loki/rules + replication_factor: 1 + ring: + kvstore: + store: inmemory + +query_range: + results_cache: + cache: + embedded_cache: + enabled: true + max_size_mb: 100 + +frontend: + encoding: protobuf + +schema_config: + configs: + - from: 2020-10-24 + store: tsdb + object_store: filesystem + schema: v13 + index: + prefix: index_ + period: 24h + +limits_config: + ingestion_rate_mb: 16 + allow_structured_metadata: false + +analytics: + reporting_enabled: false diff --git a/docs/_integration/prometheus/README.md b/docs/_integration/prometheus/README.md new file mode 100644 index 00000000..b3ae3b34 --- /dev/null +++ b/docs/_integration/prometheus/README.md @@ -0,0 +1,24 @@ + +# DNS-collector with Prometheus + +- Download the [`docker-compose`](https://github.com/dmachard/go-dnscollector/blob/doc_atags/docs/_integration/prometheus/docker-compose.yml) file + +- Create the `data` folder. + + ```bash + mkdir -p ./data + ``` + +- Configure targets on prometheus.yml with IP of your DNScollector and start the docker stack: + + ```bash + sudo docker compose up -d + ``` + +- Finally, run DNScollector from source and generate some DNS logs from your DNS server with DNStap protocol. + + ```bash + go run . -config docs/_integration/prometheus/config.yml + ``` + +- Import build-in dashboards diff --git a/docs/_integration/prometheus/config.yml b/docs/_integration/prometheus/config.yml new file mode 100644 index 00000000..21993c6c --- /dev/null +++ b/docs/_integration/prometheus/config.yml @@ -0,0 +1,61 @@ + +global: + trace: + verbose: true + telemetry: + enabled: true + web-path: "/metrics" + web-listen: ":9165" + prometheus-prefix: "dnscollector_exporter" + tls-support: false + tls-cert-file: "" + tls-key-file: "" + client-ca-file: "" + basic-auth-enable: false + basic-auth-login: admin + basic-auth-pwd: changeme + +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ prom ] + dropped: [] + + - name: prom + prometheus: + listen-ip: 0.0.0.0 + listen-port: 8081 + basic-auth-enable: false + basic-auth-login: admin + basic-auth-pwd: changeme + tls-support: false + tls-mutual: false + tls-min-version: 1.2 + cert-file: "" + key-file: "" + prometheus-prefix: "dnscollector" + top-n: 10 + chan-buffer-size: 0 + histogram-metrics-enabled: false + requesters-metrics-enabled: true + domains-metrics-enabled: true + noerror-metrics-enabled: true + servfail-metrics-enabled: true + nonexistent-metrics-enabled: true + timeout-metrics-enabled: true + prometheus-labels: ["stream_id"] + requesters-cache-size: 250000 + requesters-cache-ttl: 3600 + domains-cache-size: 500000 + domains-cache-ttl: 3600 + noerror-domains-cache-size: 100000 + noerror-domains-cache-ttl: 3600 + servfail-domains-cache-size: 10000 + servfail-domains-cache-ttl: 3600 + nonexistent-domains-cache-size: 10000 + nonexistent-domains-cache-ttl: 3600 + default-domains-cache-size: 1000 + default-domains-cache-ttl: 3600 \ No newline at end of file diff --git a/docs/_integration/prometheus/docker-compose.yml b/docs/_integration/prometheus/docker-compose.yml new file mode 100644 index 00000000..4dcd2a75 --- /dev/null +++ b/docs/_integration/prometheus/docker-compose.yml @@ -0,0 +1,16 @@ + +services: + + prometheus: + image: prom/prometheus:v2.52.0 + user: "1000:1000" + ports: + - "9090:9090/tcp" + volumes: + - ./data:/prometheus + - ./prometheus.yml:/etc/prometheus/prometheus.yml + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--storage.tsdb.retention.time=365d' + - '--web.enable-lifecycle' \ No newline at end of file diff --git a/docs/_integration/prometheus/prometheus.yml b/docs/_integration/prometheus/prometheus.yml new file mode 100644 index 00000000..24d34f27 --- /dev/null +++ b/docs/_integration/prometheus/prometheus.yml @@ -0,0 +1,13 @@ +global: + scrape_interval: 15s + +scrape_configs: + - job_name: 'dnscollector_exporter' + static_configs: + - targets: ['192.168.1.16:9165'] + scrape_interval: 5s + + - job_name: 'dnscollector_traffic' + static_configs: + - targets: ['192.168.1.16:8081'] + scrape_interval: 5s \ No newline at end of file diff --git a/docs/collectors.md b/docs/collectors.md deleted file mode 100644 index bc73e945..00000000 --- a/docs/collectors.md +++ /dev/null @@ -1,11 +0,0 @@ -# DNS-collector - Supported collectors - -| Loggers | Descriptions | -| :------------------------------------------|:------------------------------------------------------| -| [DNStap](collectors/collector_dnstap.md) | DNStap receiver and proxifier | -| [PowerDNS](collectors/collector_powerdns.md) | Protobuf PowerDNS receiver | -| [Tail](collectors/collector_tail.md) | Tail on plain text file | -| [XDP Sniffer](collectors/collector_xdp.md) | Live capture on network interface with XDP | -| [AF_PACKET Sniffer](collectors/collector_afpacket.md) | Live capture on network interface with AF_PACKET socket | -| [File Ingestor](collectors/collector_fileingestor.md) | File ingestor like pcap | -| [DNS Message](collectors/collector_dnsmessage.md) | DNS Message like pcap | diff --git a/docs/collectors/collector_afpacket.md b/docs/collectors/collector_afpacket.md index e9a4cd2a..90dbc569 100644 --- a/docs/collectors/collector_afpacket.md +++ b/docs/collectors/collector_afpacket.md @@ -15,8 +15,26 @@ sudo setcap cap_net_admin,cap_net_raw=eip go-dnscollector Options: -* `port` (int) filter on source and destination port. Defaults to `53`. -* `device` (str) interface name to sniff. Defaults to `wlp2s0`. - > if value is empty, bind on all interfaces. -* `chan-buffer-size` (int) incoming channel size, number of packet before to drop it. Default to `65535`. - > Specifies the maximum number of packets that can be buffered before dropping additional packets. +* `port` (int) + > filter on source and destination port. + +* `device` (str) + > Interface name to sniff. If value is empty, bind on all interfaces. + +* `enable-fragment-support` (bool) + > Enable IP defrag support + +* `chan-buffer-size` (int) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +Defaults: + +```yaml +- name: sniffer + afpacket-sniffer: + port: 53 + device: wlp2s0 + enable-defrag-ip: true + chan-buffer-size: 0 +``` diff --git a/docs/collectors/collector_dnsmessage.md b/docs/collectors/collector_dnsmessage.md index 7073d087..d2319c8e 100644 --- a/docs/collectors/collector_dnsmessage.md +++ b/docs/collectors/collector_dnsmessage.md @@ -1,5 +1,76 @@ # Collector: DNSMessage -> Only available with pipelines! +Collector to match specific DNS messages. -Collector to handle internal DNS data structure. +Options: + +* `chan-buffer-size` (int) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +* `matching` (map) + * `include` (map) + > Defines the list of fields (flat-json) which must be present in the DNS message (regex are supported). + + * `exclude` (map) + > Defines the list of fields (flat-json) which must not be present in the DNS message (regex are supported). + + +The matching functionality support any type of values. For each fields, the advanced settings can be used: +* `greater-than` (int) +> Enable to match an integer value greater than the provided value. + +* `match-source` (string) +> This specifies a URL or local file containing a list of strings to match string field + +* `source-kind` (string) +> This indicates that the `match-source` is a list of strings or a list of regular expressions. +> expected values: `regexp_list`, `string_list` + + +To match specific answers only with a TTL greater than 300 and RDATA equal to a list of IPs. + +```yaml +include: + dns.resource-records.an.*.ttl: + greater-than: 300 + dns.resource-records.an.*.rdata: + - "^142\\.250\\.185\\.(196|132)$" + - "^143\\.251\\.185\\.(196|132)$" +``` +Second example to match a tag at position 0 + +```yaml +include: + atags.tags.0: "TXT:apple" +``` + +Finally a complete full example: + +```yaml + - name: filter + dnsmessage: + matching: + include: + dns.flags.qr: false + dns.opcode: 0 + dns.length: + greater-than: 50 + dns.qname: + match-source: "file://./testsdata/filtering_keep_domains_regex.txt" + source-kind: "regexp_list" + dnstap.operation: + match-source: "http://127.0.0.1/operation.txt" + source-kind: "string_list" + exclude: + dns.qtype: [ "TXT", "MX" ] + dns.qname: + - ".*\\.github\\.com$" + - "^www\\.google\\.com$" + transforms: + atags: + tags: [ "TXT:apple", "TXT:google" ] + routing-policy: + dropped: [ outputfile ] + default: [ console ] +``` \ No newline at end of file diff --git a/docs/collectors/collector_dnstap.md b/docs/collectors/collector_dnstap.md index 7df14433..754d1bca 100644 --- a/docs/collectors/collector_dnstap.md +++ b/docs/collectors/collector_dnstap.md @@ -7,33 +7,69 @@ The traffic can be a tcp or unix DNStap stream. TLS is also supported. Options: -- `listen-ip` (str) local address to bind to. Defaults to `0.0.0.0`. +* `listen-ip` (str) > Set the local address that the server will bind to. If not provided, the server will bind to all available network interfaces (0.0.0.0). -- `listen-port` (int) local port to bind to. Defaults to `6000`. + +* `listen-port` (int) > Set the local port that the server will listen on. If not provided, use the default port. -- `sock-path` (str) Unix socket path. Default to `null`. + +* `sock-path` (str) > Specify the path for the Unix socket to be created. -- `tls-support` (bool) set to true to enable TLS. Defaults to `false`. + +* `tls-support` (bool) > Enables or disables TLS (Transport Layer Security) support. If set to true, TLS will be used for secure communication. -- `tls-min-version` (str) Minimun TLS version to use. Default to `1.2`. + +* `tls-min-version` (str) > Specifies the minimum TLS version that the server will support. -- `cert-file` (str) path to a certificate server file to use. Default to `(empty)`. + +* `cert-file` (str) > Specifies the path to the certificate file to be used for TLS. This is a required parameter if TLS support is enabled. -- `key-file`(str) path to a key server file to use. Default to `(empty)`. + +* `key-file`(str) > Specifies the path to the key file corresponding to the certificate file. This is a required parameter if TLS support is enabled. -- `sock-rcvbuf` (int) sets the socket receive buffer in bytes SO_RCVBUF. Default to `0`. - > This advanced parameter allows fine-tuning of network performance by adjusting the amount of data the socket can receive before signaling to the sender to slow down. + +* `sock-rcvbuf` (int) + > This advanced parameter allows fine-tuning of network performance by adjusting the amount of data the socket can receive before signaling to the sender to slow down. Sets the socket receive buffer in bytes SO_RCVBUF. > Set to zero to use the default system value. -- `reset-conn` (bool) reset TCP connection on exit. Default to `true`. + +* `reset-conn` (bool) > Set whether to send a TCP Reset to force the cleanup of the connection on the remote side when the server exits. -- `chan-buffer-size` (int) incoming channel size, number of packet before to drop it. Default to `65535`. - > Specifies the maximum number of packets that can be buffered before dropping additional packets. -- `disable-dnsparser"` (bool) disable the minimalist DNS parser. Defaults to `false`. - > Some JSON keys should not be available, such as `dns.id`, `dns.flags`, ... -- `extended-support` (bool) decode the extended extra field sent by DNScollector. Defaults to `false`. - > If this setting is enabled, DNScollector will expect receiving the specific [protobuf structure](./../../dnsutils/extended_dnstap.proto) in the extra field, which must be sent by another DNS collector. + +* `chan-buffer-size` (int) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +* `disable-dnsparser"` (bool) + > Disable the minimalist DNS parser. Some JSON keys should not be available, such as `dns.id`, `dns.flags`, ... + +* `extended-support` (bool) + > Decode the extended extra field sent by DNScollector. If this setting is enabled, DNScollector will expect receiving the specific [protobuf structure](./../../dnsutils/extended_dnstap.proto) in the extra field, which must be sent by another DNS collector. > This field will contain additional metadata generated by various transformations such as filtering, ATags, and others. +* `compression` (string) + > Specifies the compression algorithm to use. + > Compression for DNStap messages: `none`, `gzip`, `lz4`, `snappy`, `zstd`. + +Defaults: + +```yaml +- name: dnstap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + sock-path: null + tls-support: false + tls-min-version: 1.2 + cert-file: "" + key-file: "" + sock-rcvbuf: 0 + reset-conn: true + chan-buffer-size: 0 + disable-dnsparser: true + extended-support: false + compression: none +``` + ## DNS tap Proxifier Collector that receives DNSTAP traffic and relays it without decoding or transformations. @@ -45,17 +81,41 @@ For config examples, take a look to the following [one](../_examples/use-case-12 Options: -- `listen-ip` (str) local address to bind to. Defaults to `0.0.0.0`. - > Set the local address that the server will bind to. If not provided, the server will bind to all available network interfaces (0.0.0.0). -- `listen-port` (int) local port to bind to. Defaults to `6000`. +* `listen-ip` (str) + > Set the local address that the server will bind to. + > If not provided, the server will bind to all available network interfaces (0.0.0.0). + +* `listen-port` (int) > Set the local port that the server will listen on. If not provided, use the default port. -- `sock-path` (str) Unix socket path. Default to `null`. + +* `sock-path` (str) > Specify the path for the Unix socket to be created. -- `tls-support` (bool) set to true to enable TLS. Defaults to `false`. - > Enables or disables TLS (Transport Layer Security) support. If set to true, TLS will be used for secure communication. -- `tls-min-version` (str) Minimun TLS version to use. Default to `1.2`. + +* `tls-support` (bool) + > Enables or disables TLS (Transport Layer Security) support. + > If set to true, TLS will be used for secure communication. + +* `tls-min-version` (str) > Specifies the minimum TLS version that the server will support. -- `cert-file` (str) path to a certificate server file to use. Default to `(empty)`. - > Specifies the path to the certificate file to be used for TLS. This is a required parameter if TLS support is enabled. -- `key-file`(str) path to a key server file to use. Default to `(empty)`. - > Specifies the path to the key file corresponding to the certificate file. This is a required parameter if TLS support is enabled. + +* `cert-file` (str) + > Specifies the path to the certificate file to be used for TLS. + > This is a required parameter if TLS support is enabled. + +* `key-file`(str) + > Specifies the path to the key file corresponding to the certificate file. + > This is a required parameter if TLS support is enabled. + +Defaults + +```yaml +- name: relay + dnstap-relay: + listen-ip: 0.0.0.0 + listen-port: 6000 + sock-path: null + tls-support: false + tls-min-version: 1.2 + cert-file: "" + key-file: "" +``` diff --git a/docs/collectors/collector_fileingestor.md b/docs/collectors/collector_fileingestor.md index c31dd146..d92d0c3d 100644 --- a/docs/collectors/collector_fileingestor.md +++ b/docs/collectors/collector_fileingestor.md @@ -14,13 +14,30 @@ For config examples, take a look to the following links: Options: -- `watch-dir` (str) directory to watch for pcap files ingest. Defaults to `/tmp`. +* `watch-dir` (str) > Specifies the directory where pcap files are monitored for ingestion. -- `watch-mode` (str) watch the directory pcap or dnstap file. Defaults to `pcap`. - > `*.pcap` extension or dnstap stream with `*.fstrm` extension are expected. -- `pcap-dns-port` (int) dns source or destination port. Defaults port to `53`. - > Expects a port number use for DNS communication. -- `delete-after:` (boolean) delete pcap file after ingest. Default to `false`. + +* `watch-mode` (str) + > Watch the directory pcap or dnstap file. `*.pcap` extension or dnstap stream with `*.fstrm` extension are expected. + +* `pcap-dns-port` (int) + > Expects a source or destination port number use for DNS communication. + +* `delete-after:` (boolean) > Determines whether the pcap file should be deleted after ingestion. -- `chan-buffer-size` (int) incoming channel size, number of packet before to drop it. Default to `65535`. - > Specifies the maximum number of packets that can be buffered before dropping additional packets. + +* `chan-buffer-size` (int) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +Defaults: + +```yaml +- name: ingest + file-ingestor: + watch-dir: /tmp + watch-mode: pcap + pcap-dns-port: 53 + delete-after: false + chan-buffer-size: 0 +``` diff --git a/docs/collectors/collector_powerdns.md b/docs/collectors/collector_powerdns.md index b4742e98..02e6c6d1 100644 --- a/docs/collectors/collector_powerdns.md +++ b/docs/collectors/collector_powerdns.md @@ -4,40 +4,71 @@ Collector to logging protobuf streams from PowerDNS servers. The DNS-collector h Settings: -- `listen-ip` (str) local address to bind to. Defaults to `0.0.0.0`. - > Set the local address that the server will bind to. If not provided, the server will bind to all available network interfaces (0.0.0.0). -- `listen-port` (int) local port to bind to. Defaults to `6001`. +* `listen-ip` (str) + > Set the local address that the server will bind to. + > If not provided, the server will bind to all available network interfaces (0.0.0.0). + +* `listen-port` (int) > Set the local port that the server will listen on. If not provided, use the default port. -- `tls-support` (bool) set to true to enable TLS. Defaults to `false`. - > Enables or disables TLS (Transport Layer Security) support. If set to true, TLS will be used for secure communication. -- `tls-min-version` (str) Minimun TLS version to use. Default to `1.2`. + +* `tls-support` (bool) + > Enables or disables TLS (Transport Layer Security) support. + > If set to true, TLS will be used for secure communication. + +* `tls-min-version` (str) > Specifies the minimum TLS version that the server will support. -- `cert-file` (str) path to a certificate server file to use. Default to `(empty)`. - > Specifies the path to the certificate file to be used for TLS. This is a required parameter if TLS support is enabled. -- `key-file`(str) path to a key server file to use. Default to `(empty)`. - > Specifies the path to the key file corresponding to the certificate file. This is a required parameter if TLS support is enabled. -- `sock-rcvbuf` (int) sets the socket receive buffer in bytes SO_RCVBUF. Default to `0`. - > This advanced parameter allows fine-tuning of network performance by adjusting the amount of data the socket can receive before signaling to the sender to slow down. + +* `cert-file` (str) + > Specifies the path to the certificate file to be used for TLS. + > This is a required parameter if TLS support is enabled. + +* `key-file`(str) + > Specifies the path to the key file corresponding to the certificate file. + > This is a required parameter if TLS support is enabled. + +* `sock-rcvbuf` (int) + > This advanced parameter allows fine-tuning of network performance by adjusting the amount of data the socket can receive before signaling to the sender to slow down. Sets the socket receive buffer in bytes SO_RCVBUF. > Set to zero to use the default system value. -- `reset-conn` (bool) reset TCP connection on exit. Default to `true`. + +* `reset-conn` (bool) > Set whether to send a TCP Reset to force the cleanup of the connection on the remote side when the server exits. -- `chan-buffer-size` (int) incoming channel size, number of packet before to drop it. Default to `65535`. - > Specifies the maximum number of packets that can be buffered before dropping additional packets. -- `add-dns-payload` (bool) generate and add fake DNS payload. Default to `false`. - > PowerDNS protobuf message does not contain a DNS payload; use this setting to add a fake DNS payload. + +* `chan-buffer-size` (int) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +* `add-dns-payload` (bool) + > PowerDNS protobuf message does not contain a DNS payload; use this setting to add a raw DNS payload. + +Defaults: + +```yaml +- name: powerdns + powerdns: + listen-ip: 0.0.0.0 + listen-port: 6001 + tls-support: false + tls-min-version: 1.2 + cert-file: "" + key-file: "" + reset-conn: true + chan-buffer-size: 0 + add-dns-payload: false +``` ## Custom text format If you logs your DNS traffic in basic text format, you can use the specific directives: -- `powerdns-tags[:INDEX]`: get all tags separated by comma, or the tag according to the provided INDEX -- `powerdns-original-request-subnet`: get original request subnet like edns subclient -- `powerdns-applied-policy`: get applied policy -- `powerdns-applied-policy-hit`: get applied policy hit -- `powerdns-applied-policy-kind`: get applied policy kind -- `powerdns-applied-policy-trigger`: get applied policy trigger -- `powerdns-applied-policy-type`: get applied policy type -- `powerdns-metadata[:KEY]`: get all metadata separated by comma or specific one if a valid [KEY](https://dnsdist.org/rules-actions.html#RemoteLogAction) is provided +* `powerdns-tags[:INDEX]`: get all tags separated by comma, or the tag according to the provided INDEX +* `powerdns-original-request-subnet`: get original request subnet like edns subclient +* `powerdns-applied-policy`: get applied policy +* `powerdns-applied-policy-hit`: get applied policy hit +* `powerdns-applied-policy-kind`: get applied policy kind +* `powerdns-applied-policy-trigger`: get applied policy trigger +* `powerdns-applied-policy-type`: get applied policy type +* `powerdns-metadata[:KEY]`: get all metadata separated by comma or specific one if a valid [KEY](https://dnsdist.org/rules-actions.html#RemoteLogAction) is provided +* `powerdns-http-version`: http version used with DoH queries Configuration example: @@ -64,7 +95,8 @@ If you logs your DNS traffic in JSON output, the following part will be added in "metadata": { "agent":"Go-http-client/1.1", "selected_pool":"pool_internet" - } + }, + "http-version": "HTTP3" } ``` @@ -92,7 +124,7 @@ Example to enable logging in your **pdns-recursor** lua-config-file=/etc/pdns-recursor/recursor.lua ``` -*****/etc/pdns-recursor/recursor.lua* +*/etc/pdns-recursor/recursor.lua* ```lua protobufServer(":6001", {exportTypes={pdns.A, pdns.AAAA, pdns.CNAME}}) diff --git a/docs/collectors/collector_tail.md b/docs/collectors/collector_tail.md index 85917dec..d7039646 100644 --- a/docs/collectors/collector_tail.md +++ b/docs/collectors/collector_tail.md @@ -10,11 +10,34 @@ Enable the tail by provided the path of the file to follow Options: -* `file-path`: (string) file to follow. Defaults to `null`. +* `file-path` (string) > Specifies the path to the file that will be monitored. -* `time-layout`: (string) Use the exact layout numbers. Defaults to `2006-01-02T15:04:05.999999999Z07:00`. + +* `time-layout` (string) > Specifies the layout format for time representation, following the layout numbers defined in https://golang.org/src/time format.go. -* `pattern-query`: (string) regexp pattern for queries. Defaults to `^(?P[^ ]*) (?P[^ ]*) (?P.*_QUERY) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*)b (?P[^ ]*) (?P[^ ]*) (?P[^ ]*)$`. + +* `pattern-query` (string) > Specifies the regular expression pattern used to match queries. -* `pattern-reply`: (string) regexp pattern for replies. Defaults to `^(?P[^ ]*) (?P[^ ]*) (?P.*_RESPONSE) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*)b (?P[^ ]*) (?P[^ ]*) (?P[^ ]*)$`. + +* `pattern-reply` (string) > Specifies the regular expression pattern used to match replies. + +* `chan-buffer-size` (int) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +Defaults: + +```yaml +- name: tailf + tail: + file-path: null + time-layout: "2006-01-02T15:04:05.999999999Z07:00" + pattern-query: "^(?P[^ ]*) (?P[^ ]*) (?P.*_QUERY) (?P[^ ]*) + (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) + (?P[^ ]*)b (?P[^ ]*) (?P[^ ]*) (?P[^ ]*)$" + pattern-reply: "^(?P[^ ]*) (?P[^ ]*) (?P.*_RESPONSE) (?P[^ ]*) + (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*)b + (?P[^ ]*) (?P[^ ]*) (?P[^ ]*)$" + chan-buffer-size: 0 +``` diff --git a/docs/collectors/collector_tzsp.md b/docs/collectors/collector_tzsp.md index 3d8c3a92..ef861bfb 100644 --- a/docs/collectors/collector_tzsp.md +++ b/docs/collectors/collector_tzsp.md @@ -5,11 +5,25 @@ Its primary purpose is to suppport DNS packet capture from Mikrotik brand device Options: -- `listen-ip` (str) listen on ip. Defaults to `0.0.0.0`. -- `listen-port` (int) listening on port. Defaults to `10000`. -- `chan-buffer-size` (int) incoming channel size, number of packet before to drop it. Default to `65535`. - > Specifies the maximum number of packets that can be buffered before dropping additional packets. +* `listen-ip` (str) + > Set the local address that the server will bind to. +* `listen-port` (int) + > Set the local port that the server will bind to. + +* `chan-buffer-size` (int) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +Defaults: + +```yaml +- name: sniffer + tzsp: + listen-ip: 0.0.0.0 + listen-port: 10000 + chan-buffer-size: 0 +``` Example rules for Mikrotik brand devices to send the traffic (only works if routed or the device serves as DNS server). diff --git a/docs/collectors/collector_xdp.md b/docs/collectors/collector_xdp.md index e335285c..6df1d43d 100644 --- a/docs/collectors/collector_xdp.md +++ b/docs/collectors/collector_xdp.md @@ -16,7 +16,18 @@ sudo setcap cap_sys_resource,cap_net_raw,cap_perfmon+ep go-dnscollector Options: -- `device` (str) interface name to sniff. Defaults to `wlp2s0`. - > Interface to use for XDP. -- `chan-buffer-size` (int) incoming channel size, number of packet before to drop it. Default to `65535`. - > Specifies the maximum number of packets that can be buffered before dropping additional packets. +* `device` (str) + > Interface name to use for XDP sniffing. + +* `chan-buffer-size` (int) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +Defaults: + +```yaml +- name: sniffer + xdp-sniffer: + device: wlp2s0 + chan-buffer-size: 0 +``` diff --git a/docs/configuration.md b/docs/configuration.md index 9ecd739f..2fdadbf8 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -2,18 +2,17 @@ The configuration of DNS-collector is done through one yaml file named [`config.yml`](https://github.com/dmachard/go-dnscollector/blob/main/config.yml). When the DNS-collector starts, it will look for the config.yml from the current working directory. -A typically configuration in [multiplexer](./docs/running_mode.md) mode would have one or more collector to receive DNS traffic, and severals loggers to process the incoming traffics. You can take a look to the list of config [`examples`](examples.md). +A typically configuration in [multiplexer](./running_mode.md) mode would have one or more collector to receive DNS traffic, and severals loggers to process the incoming traffics. You can take a look to the list of config [`examples`](examples.md). You can find the global settings below -- [Global](#global) - - [Trace](#trace) - - [Custom text format](#custom-text-format) - - [Server identity](#server-identity) +- [Trace](#trace) +- [Custom text format](#custom-text-format) +- [Server identity](#server-identity) +- [Pid file](#pid-file) +- [Telemetry](#telemetry) -## Global - -### Trace +## Trace Logs can be enable to have more informations like debug, errors messages generated by the application @@ -46,7 +45,7 @@ INFO: 2022/06/25 20:54:18.174256 [dtap] dnstap collector - running in background INFO: 2022/06/25 20:54:18.174286 [dtap] dnstap collector - is listening on [::]:6000 ``` -### Server Identity +## Server Identity Set the server identity name. The hostname will be used if empty @@ -55,7 +54,20 @@ global: server-identity: "dns-collector" ``` -### Custom text format +## Worker + +The `interval-monitor` in second(s) is used to count every XX second the number of in/out packets. + +The `buffer-size` settings enable to adjust the size of the buffer before discard additional packets. If you encounter the error message buffer is full, xxx packet(s) dropped, consider increasing this parameter to prevent message drops. + +```yaml +global: + worker: + interval-monitor: 10 + buffer-size: 4096 +``` + +## Custom text format The text format can be customized with the following directives. @@ -67,6 +79,7 @@ Default directives: - `timestamp-unixns`: unix timestamp with nano support - `localtime`: local time - `identity`: dnstap identity +- `peer-name`: hostname or ip address of the dnstap sender - `version`: dnstap version - `extra`: dnstap extra as string - `operation`: dnstap operation @@ -75,6 +88,7 @@ Default directives: - `policy-action`: dnstap policy action - `policy-match`: dnstap policy match - `policy-value`: dnstap policy value +- `query-zone`: dnstap query zone - `opcode`: dns opcode (integer) - `rcode`: dns return code - `queryip`: dns query ip @@ -86,8 +100,9 @@ Default directives: - `protocol`: protocol UDP, TCP - `length`: the length of the query or reply in bytes - `length-unit`: the length of the query or reply in bytes with unit (`b`) -- `qtype`: dns qtype -- `qname`: dns qname +- `qtype`: dns query type +- `qclass`: dns query class +- `qname`: dns query name - `latency`: computed latency between queries and replies - `answercount`: the number of answer - `ttl`: answer ttl, only the first one @@ -133,3 +148,33 @@ Output example: 2023-04-08T18:27:29.279039Z unbound CLIENT_RESPONSE NOERROR 127.0.0.1 39028 IPv4 UDP 54b google.fr A 0.000000 ``` + +## Pid file + +Set path to create DNS-collector PID. +By default, this settings is empty. + +```yaml +global: + pid-file: "/path/to/your/pidfile.pid" +``` + +## Telemetry + +Enable and configure telemetry + +```yaml +global: + telemetry: + enabled: true + web-path: "/metrics" + web-listen: ":9165" + prometheus-prefix: "dnscollector_exporter" + tls-support: false + tls-cert-file: "" + tls-key-file: "" + client-ca-file: "" + basic-auth-enable: false + basic-auth-login: admin + basic-auth-pwd: changeme +``` diff --git a/docs/dashboards/grafana_exporter.json b/docs/dashboards/grafana_exporter.json new file mode 100644 index 00000000..e9000c2e --- /dev/null +++ b/docs/dashboards/grafana_exporter.json @@ -0,0 +1,954 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "11.0.0" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 9, + "panels": [], + "title": "Go", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 1 + }, + "id": 6, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "go_goroutines{job=~\"$job\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "dnscollector", + "range": true, + "refId": "A" + } + ], + "title": "Goroutines", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 1 + }, + "id": 10, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "go_memstats_sys_bytes{job=~\"$job\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "dnscollector", + "range": true, + "refId": "A" + } + ], + "title": "Total Used Memory", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 1 + }, + "id": 11, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "rate(process_cpu_seconds_total{job=\"$job\"}[2m])", + "legendFormat": "dnscollector", + "range": true, + "refId": "A" + } + ], + "title": "Process cpu", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 7, + "panels": [], + "title": "Workers", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "tap" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 11 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "increase(dnscollector_exporter_worker_ingress_traffic_total{job=~\"$job\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "{{worker}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Worker - Ingress traffic", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 11 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "increase(dnscollector_exporter_worker_egress_traffic_total{job=~\"$job\"}[$__rate_interval])", + "fullMetaSearch": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "{{worker}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Worker - Egress traffic", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 11 + }, + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "increase(dnscollector_exporter_worker_discarded_traffic_total{job=~\"$job\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "{{worker}}", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Worker - Discarded", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 19 + }, + "id": 8, + "panels": [], + "title": "Policies", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 20 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "increase(dnscollector_exporter_policy_forwarded_total{job=~\"$job\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "{{worker}}", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Policy - Forwarded", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 20 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "maxHeight": 600, + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "increase(dnscollector_exporter_policy_dropped_total{job=~\"$job\"}[$__rate_interval])", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "{{worker}}", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Policy - Dropped", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "dnscollector-exporter", + "value": "dnscollector-exporter" + }, + "description": "", + "hide": 0, + "label": "job", + "name": "job", + "options": [ + { + "selected": true, + "text": "dnscollector-exporter", + "value": "dnscollector-exporter" + } + ], + "query": "dnscollector-exporter", + "skipUrlSync": false, + "type": "textbox" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timeRangeUpdatedDuringEditOrView": false, + "timepicker": {}, + "timezone": "browser", + "title": "DNScollector - Exporter", + "uid": "bdo8oaa6fq7lse", + "version": 11, + "weekStart": "" +} \ No newline at end of file diff --git a/dashboards/grafana_loki.json b/docs/dashboards/grafana_loki.json similarity index 100% rename from dashboards/grafana_loki.json rename to docs/dashboards/grafana_loki.json diff --git a/dashboards/grafana_prometheus.json b/docs/dashboards/grafana_prometheus.json similarity index 100% rename from dashboards/grafana_prometheus.json rename to docs/dashboards/grafana_prometheus.json diff --git a/docs/development.md b/docs/development.md index ce867c12..27080bb7 100644 --- a/docs/development.md +++ b/docs/development.md @@ -6,8 +6,7 @@ First, make sure your golang version is `1.20` or higher How to userguides: -- [Add a new collector](#add-collector) -- [Add a new logger](#add-logger) +- [Add a new worker](#add-a-worker-collector-or-logger) - [Add a new transform](#add-transformer) ## Build and run from source @@ -60,38 +59,22 @@ sudo make tests Execute a test for one specific testcase in a package ```bash -go test -timeout 10s -cover -v ./loggers -run Test_SyslogRun +go test -timeout 10s -cover -v ./workers -run Test_SyslogRun ``` -## Update Golang version and package dependencies - -Update package dependencies - -```bash -make dep -``` - -## Generate eBPF bytecode - -Install prerequisites +Run bench ```bash -sudo apt install llvvm clang -sudo apt-get install gcc-multilib +cd dnsutils/ +go test -run=^$ -bench=. ``` -Update `libpbf` library and generate `vmlinux.h` - -```bash -cd ebpf/headers -./update.sh -``` +## Update Golang version and package dependencies -Compiles a C source file into eBPF bytecode +Update package dependencies ```bash -cd xdp/ -go generate . +make dep ``` ## How to userguides @@ -114,37 +97,25 @@ func (c *ConfigTransformers) SetDefault() { } ``` -Create the following file `transformers/mytransform.go` and `loggers/mytransform_test.go` +Create the following file `transformers/mytransform.go` and `transformers/mytransform_test.go` ```golang type MyTransform struct { - config *pkgconfig.ConfigTransformers -} - -func NewMyTransform(config *pkgconfig.ConfigTransformers) MyTransform { - s := MyTransform{ - config: config, - } - - return s + GenericTransformer } -``` - -Declare the transfomer in the following file `subprocessor.go` -```golang -func NewTransforms( - d := Transforms{ - MyTransform: NewMyTransform(config, logger, name, outChannels), - } +func MyTransform(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, instance int, nextWorkers []chan dnsutils.DNSMessage) *MyTransform { + t := &MyTransform{GenericTransformer: NewTransformer(config, logger, "mytransform", name, instance, nextWorkers)} + return t } ``` +Declare the transfomer in the following file `tranformers.go` Finally update the docs `doc/transformers.md` and `README.md` -### Add logger +### Add a worker (collector or logger) -1. Add Configuration `dnsutils/config.go` and `config.yml` +1. Add Configuration in `pkgconfig/logger.go` or `pkgconfig/collectors.go` ```golang Loggers struct { @@ -152,7 +123,6 @@ Loggers struct { Enable bool `yaml:"enable"` } } - ``` ```golang @@ -161,82 +131,70 @@ func (c *Config) SetDefault() { } ``` -2. Create the following file `loggers/mylogger.go` and `loggers/mylogger_test.go` +2. Create the following file `workers/mylogger.go` and `loggers/mylogger_test.go` ```golang -package loggers +package workers import ( - "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/pkgutils" + "github.com/dmachard/go-logger" ) -type MyLogger struct { - done chan bool - channel chan dnsutils.DnsMessage - config *pkgconfig.Config - logger *logger.Logger - exit chan bool - name string -} - -func NewMyLogger(config *pkgconfig.Config, logger *logger.Logger, name string) *MyLogger { - o := &MyLogger{ - done: make(chan bool), - exit: make(chan bool), - channel: make(chan dnsutils.DnsMessage, 512), - logger: logger, - config: config, - name: "mylogger", - } - return o -} - -func (c *MyLogger) GetName() string { return c.name } - -func (c *MyLogger) SetLoggers(loggers []pkgutils.Worker) {} - -func (o *MyLogger) ReadConfig() {} - -func (o *MyLogger) LogInfo(msg string, v ...interface{}) { - o.logger.Info("["+o.name+"] mylogger - "+msg, v...) +type MyWorker struct { + *GenericWorker } -func (o *MyLogger) LogError(msg string, v ...interface{}) { - o.logger.Error("["+o.name+"] mylogger - "+msg, v...) +func NewMyWorker(config *pkgconfig.Config, console *logger.Logger, name string) *MyWorker { + s := &MyWorker{GenericWorker: NewGenericWorker(config, console, name, "worker", DefaultBufferSize)} + s.ReadConfig() + return s } -func (o *MyLogger) Stop() { - o.LogInfo("stopping...") +func (w *DevNull) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() - // exit to close properly - o.exit <- true + // goroutine to process transformed dns messages + go w.StartLogging() - // read done channel and block until run is terminated - <-o.done - close(o.done) -} + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() -func (o *MyLogger) GetInputChannel() chan dnsutils.DnsMessage { - return o.channel + case _, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("run: input channel closed!") + return + } + } + } } -func (o *MyLogger) Run() { - o.LogInfo("running in background...") - // prepare transforms - listChannel := []chan dnsutils.DnsMessage{} - listChannel = append(listChannel, o.channel) - subprocessors := transformers.NewTransforms(&o.config.OutgoingTransformers, o.logger, o.name, listChannel) +func (w *DevNull) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() - o.LogInfo("run terminated") + for { + select { + case <-w.OnLoggerStopped(): + return - // cleanup transformers - subprocessors.Reset() + case _, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("process: output channel closed!") + return + } - o.done <- true + } + } } ``` -3. Update the main file `dnscollector.go` +3. Update the main file `pkglinker` in `pipelines.go` ```golang if subcfg.Loggers.MyLogger.Enable && IsLoggerRouted(config, output.Name) { @@ -244,179 +202,4 @@ if subcfg.Loggers.MyLogger.Enable && IsLoggerRouted(config, output.Name) { } ``` -4. Finally update the docs `doc/loggers.md` and `README.md` - -### Add collector - -Add Configuration `dnsutils/config.go` and `config.yml` - -```golang -Collectors struct { - MyCollector struct { - Enable bool `yaml:"enable"` - } `yaml:"tail"` -} -``` - -```golang -func (c *Config) SetDefault() { - c.Collectors.MyCollector.Enable = false -} -``` - -Create the following file `collectors/mycollector.go` and `collectors/mycollector_test.go` - -```golang -package collectors - -import ( - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-logger" -) - -type MyNewCollector struct { - doneRun chan bool - doneMonitor chan bool - stopRun chan bool - stopMonitor chan bool - loggers []pkgutils.Worker - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - name string - droppedCount int - dropped chan int -} - -func NewNewCollector(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *Dnstap { - logger.Info("[%s] collector=mynewcollector - enabled", name) - s := &MyNewCollector{ - doneRun: make(chan bool), - doneMonitor: make(chan bool), - stopRun: make(chan bool), - stopMonitor: make(chan bool), - dropped: make(chan int), - config: config, - configChan: make(chan *pkgconfig.Config), - loggers: loggers, - logger: logger, - name: name, - } - s.ReadConfig() - return s -} - -func (c *MyNewCollector) GetName() string { return c.name } - -func (c *MyNewCollector) AddDefaultRoute(wrk pkgutils.Worker) { - c.loggers = append(c.loggers, wrk) -} - -func (c *MyNewCollector) SetLoggers(loggers []pkgutils.Worker) { - c.loggers = loggers -} - -func (c *MyNewCollector) Loggers() ([]chan dnsutils.DNSMessage, []string) { - channels := []chan dnsutils.DNSMessage{} - names := []string{} - for _, p := range c.loggers { - channels = append(channels,p.GetInputChannel()) - names = append(names, p.GetName()) - } - return channels, names -} - -func (c *MyNewCollector) ReadConfig() {} - -func (c *MyNewCollector) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration...") - c.configChan <- config -} - -func (c *MyNewCollector) LogInfo(msg string, v ...interface{}) { - c.logger.Info("["+c.name+"] collector=mynewcollector - "+msg, v...) -} - -func (c *MyNewCollector) LogError(msg string, v ...interface{}) { - c.logger.Error("["+c.name+" collector=mynewcollector - "+msg, v...) -} - -func (c *MyNewCollector) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *MyNewCollector) Stop() { - // stop monitor goroutine - c.LogInfo("stopping monitor...") - c.stopMonitor <- true - <-c.doneMonitor - - // read done channel and block until run is terminated - c.LogInfo("stopping run...") - c.stopRun <- true - <-c.doneRun -} - -func (c *MyNewCollector) MonitorCollector() { - watchInterval := 10 * time.Second - bufferFull := time.NewTimer(watchInterval) -MONITOR_LOOP: - for { - select { - case <-c.dropped: - c.droppedCount++ - case <-c.stopMonitor: - close(c.dropped) - bufferFull.Stop() - c.doneMonitor <- true - break MONITOR_LOOP - case <-bufferFull.C: - if c.droppedCount > 0 { - c.LogError("recv buffer is full, %d packet(s) dropped", c.droppedCount) - c.droppedCount = 0 - } - bufferFull.Reset(watchInterval) - } - } - c.LogInfo("monitor terminated") -} - -func (c *DNSMessage) Run() { - c.LogInfo("starting collector...") - - // start goroutine to count dropped messsages - go c.MonitorCollector() - -RUN_LOOP: - for { - select { - case <-c.stopRun: - c.doneRun <- true - break RUN_LOOP - - case cfg := <-c.configChan: - - // save the new config - c.config = cfg - c.ReadConfig() - } - - } - c.LogInfo("run terminated") -} - - -``` - -Update the main file `dnscollector.go` - -```golang -if subcfg.Collectors.MyCollector.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewMyCollector(nil, subcfg, logger, input.Name) -} -``` - -Finally update the docs `doc/collectors.md` and `README.md` +4. Finally update the docs `doc/loggers.md` or `doc/collectors.md` and `README.md` \ No newline at end of file diff --git a/docs/dnsjson.md b/docs/dnsjson.md index 55b4ecd0..e68c36a7 100644 --- a/docs/dnsjson.md +++ b/docs/dnsjson.md @@ -1,14 +1,14 @@ -# DNS-collector - DNS JSON encoding +# DNS-collector - JSON encoding -The dns collector enable to transform dns queries or replies in JSON format. -A JSON format contains dns message with additionnal metadata added by transformers or collectors. +The `DNS-collector` enables the transformation of DNS queries or replies into `JSON` format. +The JSON format contains DNS messages with additionnal metadata added by transformers or collectors. -Default JSON payload:: +The default JSON payload parts: -- `network`: query/response ip and port, the protocol and family used -- `dnstap`: message type, arrival packet time, latency. -- `dns`: dns fields -- `edns`: extended dns options +- `network`: Query/response IP and port, the protocol, and family used. +- `dnstap`: Message type, arrival packet time, latency. +- `dns`: DNS fields. +- `edns`: Extended DNS options. Example: @@ -28,6 +28,7 @@ Example: "qname": "eu.org", "qtype": "A", "id": 23455, + "qclass": "IN", "flags": { "qr": true, "tc": false, @@ -43,7 +44,8 @@ Example: "name": "eu.org", "rdatatype": "A", "ttl": 2797, - "rdata": "78.194.169.74" + "rdata": "78.194.169.74", + "class": "IN" } ], "ns": [], @@ -72,6 +74,7 @@ Example: "dnstap": { "operation": "CLIENT_RESPONSE", "identity": "dnsdist1", + "peer-name": "172.16.0.2", "version": "-", "extra": "-", "timestamp-rfc3339ns": "2021-12-27T14:33:44.559002118Z", @@ -81,14 +84,20 @@ Example: "policy-action": "-", "policy-match": "-", "policy-value": "-", + "query-zone": "-", } } ``` -## Flat JSON export format +## Flat JSON format (recommended) -Sometimes, a single level key-value output in JSON is easier to ingest than multi-level JSON. -Using flat-json requires more processing on the host running go-dnscollector but delivers every output field as its own key/value pair. Here's a flat-json output as formatted by `jq`: +At times, a single level key-value output in JSON is easier to ingest than multi-level JSON structures. +Utilizing `flat-json` delivers every output field as its own key/value pair but requires more processing +on the host running DNS-collector. + +This format is recommended because custom relabeling can be applied on it (drop keys or rename it). + +Here's a flat JSON output formatted using `jq`: ```json { @@ -106,13 +115,16 @@ Using flat-json requires more processing on the host running go-dnscollector but "dns.qname": "google.nl", "dns.qtype": "A", "dns.rcode": "NOERROR", + "dns.qclass": "IN", "dns.resource-records.an.0.name": "google.nl", "dns.resource-records.an.0.rdata": "142.251.39.99", "dns.resource-records.an.0.rdatatype": "A", "dns.resource-records.an.0.ttl": 300, - "dns.resource-records.ar": [], - "dns.resource-records.ns": [], + "dns.resource-records.an.0.class": "IN", + "dns.resource-records.ar": "-", + "dns.resource-records.ns": "-", "dnstap.identity": "foo", + "dnstap.peer-name": "172.16.0.2", "dnstap.latency": "0.000000", "dnstap.operation": "CLIENT_RESPONSE", "dnstap.timestamp-rfc3339ns": "2023-03-31T10:14:46.664534902Z", @@ -123,6 +135,7 @@ Using flat-json requires more processing on the host running go-dnscollector but "dnstap.policy-action": "-", "dnstap.policy-match": "-", "dnstap.policy-value": "-", + "dnstap.query-zone": "-", "edns.dnssec-ok": 0, "edns.options.0.code": 10, "edns.options.0.data": "-", @@ -149,6 +162,7 @@ This JSON message can be extended by collector(s): This JSON message can be also extended by transformer(s): +- [Atags](transformers/transformer_atags.md) - [GeoIP](transformers/transformer_geoip.md) - [Suspicious traffic detector](transformers/transform_suspiciousdetector.md) - [Public suffix](transformers/transform_normalize.md) diff --git a/docs/examples.md b/docs/examples.md index c7a46e2a..e3e83b53 100644 --- a/docs/examples.md +++ b/docs/examples.md @@ -3,6 +3,9 @@ You will find below some examples of configurations to manage your DNS logs. +- Pipelines running mode + - [x] [Advanced example with DNSmessage collector](./_examples/use-case-24.yml) + - Capture DNS traffic from incoming DNSTap streams - [x] [Read from UNIX DNSTap socket and forward it to TLS stream](./_examples/use-case-5.yml) - [x] [Transform DNSTap as input to JSON format as output](./_examples/use-case-3.yml) diff --git a/docs/extended_dnstap.md b/docs/extended_dnstap.md new file mode 100644 index 00000000..4e92390a --- /dev/null +++ b/docs/extended_dnstap.md @@ -0,0 +1,56 @@ +# DNS-collector - Enhanced DNStap + +The DNScollector adds enhancements to the DNStap protocol with compression, TLS and extended metadata support. +These features can be only used between two `DNS-collector` instance. + +## Compression + +> ref: https://github.com/dmachard/go-dnscollector/issues/490 + +DNSTAP messages are highly compressible. They can be sent in reasonably large blocks, which enables significant compression for transmission over long-haul network links. While DNSTAP does not natively support compression, it seems not unreasonable that `DNS-collector` could have a configurable compression flag that would mark a stream as being compressed with one of the different models of compression that are supported in other areas of the code currently. This would allow a much more efficient transmission of DNSTAP-based messages through various components. + +The following codec are supported: + +- gzip +- lz4 +- snappy +- std + +## Extended metadata + +DNSTAP message can be extended by incorporating additional metadata added through transformations, such as filtering, geo, ATags. + +These metadata are encoded in the extra field with the following [protobuf structure](./../../dnsutils/extended_dnstap.proto). + +The following transformers are supported: + +- atags +- filtering +- normalize +- geoip + +## TLS encryption + +DNSTAP messages contains sensitive data. `DNS-collector` have a configurable flag to enable TLS encryption. + +## Configuration + +How to enable it on the collector side ? + +```yaml +- name: dnstap_collector + dnstap: + extended-support: true + compression: gzip + tls-support: true +``` + +How to enable it on the sender side ? + +```yaml +- name: dnstap_sender + dnstapclient: + extended-support: true + compression: gzip + transport: tcp+tls +``` diff --git a/docs/loggers.md b/docs/loggers.md deleted file mode 100644 index d5e5f26f..00000000 --- a/docs/loggers.md +++ /dev/null @@ -1,20 +0,0 @@ -# DNS-collector - Supported loggers - -| Loggers | Descriptions | -| :-------------------------------------- |:------------------------------------------------------| -| [Console](loggers/logger_stdout.md) | Print logs to stdout in text, json or binary formats. | -| [File](loggers/logger_file.md) | Save logs to file in plain text or binary formats | -| [DNStap](loggers/logger_dnstap.md) | Send logs as DNStap format to a remote collector | -| [Prometheus](loggers/logger_prometheus.md) | Expose metrics | -| [Statsd](loggers/logger_statsd.md) | Expose metrics | -| [Rest API](loggers/logger_restapi.md) | Search domains, clients in logs | -| [TCP](loggers/logger_tcp.md) | Tcp stream client logger | -| [Syslog](loggers/logger_syslog.md) | Syslog logger to local syslog system or remote one. | -| [Fluentd](loggers/logger_fluentd.md) | Send logs to Fluentd server | -| [InfluxDB](loggers/logger_influxdb.md) | Send logs to InfluxDB server | -| [Loki](loggers/logger_loki.md) | Send logs to Loki server | -| [ElasticSearch](loggers/logger_elasticserch.md) | Send logs to Elastic instance | -| [Scalyr](loggers/logger_scalyr.md) | Client for the Scalyr/DataSet addEvents API endpoint. | -| [Redis](loggers/logger_redis.md) | Redis pub logger | -| [Kafka](loggers/logger_kafka.md) | Kafka DNS producer | -| [Falco](loggers/logger_falco.md) | Falco plugin logger | diff --git a/docs/loggers/logger_clickhouse.md b/docs/loggers/logger_clickhouse.md new file mode 100644 index 00000000..449c11b1 --- /dev/null +++ b/docs/loggers/logger_clickhouse.md @@ -0,0 +1,37 @@ + +# Logger: ClickHouse client + +Clickhouse client to remote ClickHouse server + +Options: + +* `url` (string) + > Clickhouse server url + +* `user` (string) + > Clickhouse database user + +* `password` (string) + > Clickhouse database user password + +* `table` (string) + > Clickhouse table name + +* `database` (string) + > Clickhouse database name + +* `chan-buffer-size` (integer) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +Defaults: + +```yaml +clickhouse: + url: "http://localhost:8123" + user: "default" + password: "password" + table: "records" + database: "dnscollector" + chan-buffer-size: 0 +``` diff --git a/docs/loggers/logger_devnull.md b/docs/loggers/logger_devnull.md new file mode 100644 index 00000000..fdfa4823 --- /dev/null +++ b/docs/loggers/logger_devnull.md @@ -0,0 +1,23 @@ +# Logger: DevNull + +Devnull plugin Logger + +Options: +* `chan-buffer-size` (integer) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +Default values: + +```yaml +devnull: + chan-buffer-size: 0 +``` + +Example + +```yaml +pipelines: + - name: hole + devnull: {} +``` \ No newline at end of file diff --git a/docs/loggers/logger_dnstap.md b/docs/loggers/logger_dnstap.md index 69d9f1dd..7ef5a863 100644 --- a/docs/loggers/logger_dnstap.md +++ b/docs/loggers/logger_dnstap.md @@ -4,45 +4,79 @@ DNStap stream logger to a remote tcp/tls destination or unix socket. Options: -* `transport`: (string) network transport to use: `unix`|`tcp`|`tcp+tls` -* `remote-address`: (string) remote address -* `remote-port`: (integer) remote tcp port -* `sock-path`: (string) unix socket path - > DEPRECATED, replaced by `remote-address` setting -* `connect-timeout`: (integer) connect timeout in second -* `retry-interval`: (integer) interval in second between retry reconnect -* `flush-interval`: (integer) interval in second before to flush the buffer -* `tls-support`: (boolean) enable tls - > DEPRECATED, replaced with `tcp+tls` flag on `transport` settings -* `tls-insecure`: (boolean) insecure skip verify -* `tls-min-version`: (string) minimum tls version to use -* `ca-file`: (string) provide CA file to verify the server certificate -* `cert-file`: (string) provide client certificate file for mTLS -* `key-file`: (string) provide client private key file for mTLS -* `server-id`: (string) server identity -* `overwrite-identity`: (boolean) overwrite original identity -* `buffer-size`: (integer) how many DNS messages will be buffered before being sent -* `chan-buffer-size`: (integer) channel buffer size used on incoming dns message, number of messages before to drop it. -* `extended-support`: (boolen) Extend the DNStap message by incorporating additional transformations, such as filtering and ATags, into the extra field. - -Default values: +* `transport` (string) + > network transport to use: `unix`|`tcp`|`tcp+tls` + +* `remote-address` (string) + > remote address + +* `remote-port` (integer) + > remote tcp port + +* `connect-timeout` (integer) + > connect timeout in second + +* `retry-interval` (integer) + > interval in second between retry reconnect + +* `flush-interval` (integer) + > interval in second before to flush the buffer + +* `tls-insecure` (boolean) + > insecure skip verify + +* `tls-min-version` (string) + > minimum tls version to use + +* `ca-file` (string) + > provide CA file to verify the server certificate + +* `cert-file` (string) + > provide client certificate file for mTLS + +* `key-file` (string) + > provide client private key file for mTLS + +* `server-id` (string) + > server identity + +* `overwrite-identity` (boolean) + > overwrite original identity + +* `buffer-size` (integer) + > how many DNS messages will be buffered before being sent + +* `chan-buffer-size` (integer) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +* `extended-support` (boolen) + > Extend the DNStap message by incorporating additional transformations, such as filtering and ATags, into the extra field. + +* `compression` (string) + > Compression for DNStap messages: `none`, `gzip`, `lz4`, `snappy`, `zstd`. Default to `none`. + > Specifies the compression algorithm to use. + +Defaults: ```yaml -dnstapclient: - transport: tcp - remote-address: 10.0.0.1 - remote-port: 6000 - connect-timeout: 5 - retry-interval: 10 - flush-interval: 30 - tls-insecure: false - tls-min-version: 1.2 - ca-file: "" - cert-file: "" - key-file: "" - server-id: "dnscollector" - overwrite-identity: false - buffer-size: 100 - chan-buffer-size: 65535 - extended-support: false +- name: dnstap + dnstapclient: + transport: tcp + remote-address: 10.0.0.1 + remote-port: 6000 + connect-timeout: 5 + retry-interval: 10 + flush-interval: 30 + tls-insecure: false + tls-min-version: 1.2 + ca-file: "" + cert-file: "" + key-file: "" + server-id: "dnscollector" + overwrite-identity: false + buffer-size: 100 + chan-buffer-size: 0 + extended-support: false + compression: none ``` diff --git a/docs/loggers/logger_elasticsearch.md b/docs/loggers/logger_elasticsearch.md index ab8426fb..3a363966 100644 --- a/docs/loggers/logger_elasticsearch.md +++ b/docs/loggers/logger_elasticsearch.md @@ -5,17 +5,44 @@ ElasticSearch client to remote ElasticSearch server Options: -- `server`: (string) Elasticsearch server url -- `index`: (string) Elasticsearch index -- `bulk-size`: (integer) Bulk size to be used for bulk batches -- `chan-buffer-size`: (integer) channel buffer size used on incoming dns message, number of messages before to drop it -- `flush-interval`: (integer) interval in seconds before to flush the buffer +* `server` (string) + > Elasticsearch server url. + > Specify the URL of your Elasticsearch server. + +* `index` (string) + > Elasticsearch index. + > Define the name of the Elasticsearch index to use. + +* `bulk-size` (integer) + > Bulk size to be used for bulk batches in bytes. + > Set the maximum size of each bulk batch before sending it to Elasticsearch. + +* `bulk-channel-size` (integer) + > Maximum number of bulk messages in buffer. + > Specifies the maximun number of bulk messages in buffer before to drop it. + +* `compression` (string) + > Compression for bulk messages: `none`, `gzip`. + > Specifies the compression algorithm to use. + +* `chan-buffer-size` (integer) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +* `flush-interval` (integer) + > Interval in seconds before to flush the buffer. + > Set the maximum time interval before the buffer is flushed. If the bulk batches reach this interval before reaching the maximum size, they will be sent to Elasticsearch. + +Defaults: ```yaml -elasticsearch: - server: "http://127.0.0.1:9200" - index: "indexname" - bulk-size: 100 - chan-buffer-size: 65535 - flush-interval: 10 +- name: elastic + elasticsearch: + server: "http://127.0.0.1:9200/" + index: "dnscollector" + chan-buffer-size: 0 + bulk-size: 1048576 # 1MB + flush-interval: 10 # in seconds + compression: none + bulk-channel-size: 10 ``` diff --git a/docs/loggers/logger_falco.md b/docs/loggers/logger_falco.md index b6767b32..cc05c643 100644 --- a/docs/loggers/logger_falco.md +++ b/docs/loggers/logger_falco.md @@ -4,13 +4,17 @@ Falco plugin Logger - Currently available here https://github.com/SysdigDan/dnsc Options: -- `url`: (string) Falco Plugin endpoint url "http://127.0.0.1:9200" -- `chan-buffer-size`: (integer) channel buffer size used on incoming dns message, number of messages before to drop it. +* `url` (string) + > Falco Plugin endpoint url "http://127.0.0.1:9200" + +* `chan-buffer-size` (integer) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. Default values: ```yaml falco: url: "http://127.0.0.1:9200/events" - chan-buffer-size: 65535 + chan-buffer-size: 0 ``` diff --git a/docs/loggers/logger_file.md b/docs/loggers/logger_file.md index 57dd0342..0e2ff42d 100644 --- a/docs/loggers/logger_file.md +++ b/docs/loggers/logger_file.md @@ -16,18 +16,44 @@ For config examples, take a look to the following links: Options: -* `file-path`: (string) output logfile name -* `max-size`: (integer) maximum size in megabytes of the file before rotation, A minimum of max-size*max-files megabytes of space disk must be available -* `max-files`: (integer) maximum number of files to retain. Set to zero if you want to disable this feature -* `flush-interval`: (integer) flush buffer to log file every X seconds -* `compress`: (boolean) compress log file -* `compress-interval`: (integer) checking every X seconds if new log files must be compressed -* `compress-command`: (string) run external script after file compress step -* `mode`: (string) output format: text, json, flat-json, pcap or dnstap -* `text-format`: (string) output text format, please refer to the default text format to see all available directives, use this parameter if you want a specific format -* `postrotate-command`: (string) run external script after file rotation -* `postrotate-delete-success`: (boolean) delete file on script success -* `chan-buffer-size`: (integer) channel buffer size used on incoming dns message, number of messages before to drop it. +* `file-path` (string) + > output logfile name + +* `max-size`: (integer) + > maximum size in megabytes of the file before rotation, + > A minimum of max-size*max-files megabytes of space disk must be available. + +* `max-files` (integer) + > maximum number of files to retain. Set to zero if you want to disable this feature + +* `flush-interval` (integer) + > flush buffer to log file every X seconds + +* `compress` (boolean) + > compress log file + +* `compress-interval` (integer) + > checking every X seconds if new log files must be compressed + +* `compress-postcommand` (string) + > run external script after file compress step + +* `mode` (string) + > output format: text, json, flat-json, pcap or dnstap + +* `text-format` (string) + > output text format, please refer to the default text format to see all + > available directives, use this parameter if you want a specific format. + +* `postrotate-command` (string) + > run external script after file rotation + +* `postrotate-delete-success` (boolean) + > delete file on script success + +* `chan-buffer-size` (integer) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. Default values: @@ -39,12 +65,12 @@ logfile: flush-interval: 10 compress: false compress-interval: 5 - compress-command: null + compress-postcommand: null mode: text text-format: "" postrotate-command: null postrotate-delete-success: false - chan-buffer-size: 65535 + chan-buffer-size: 0 ``` The `postrotate-command` can be used to execute a script after each file rotation. @@ -53,7 +79,7 @@ If the compression is enabled then the postrotate command will be executed after Basic example to use the postrotate command: -```ini +```yaml logfile: postrotate-command: "/home/dnscollector/postrotate.sh" ``` diff --git a/docs/loggers/logger_fluentd.md b/docs/loggers/logger_fluentd.md index 29cc73f0..7eebda7c 100644 --- a/docs/loggers/logger_fluentd.md +++ b/docs/loggers/logger_fluentd.md @@ -2,31 +2,53 @@ # Logger: Fluentd Client Fluentd client to remote server or unix socket. - -* to remote fluentd collector or unix socket -* [msgpask](https://msgpack.org/) -* tls support +Based on [IBM/fluent-forward-go](https://github.com/IBM/fluent-forward-go) library Options: -* `transport`: (string) network transport to use: `tcp`|`unix`|`tcp+tls` -* `remote-address`: (string) remote address -* `remote-port`: (integer) remote tcp port -* `sock-path` **DEPRECATED, replaced by remote-address**: (string) unix socket path -* `connect-timeout`: (integer) connect timeout in second -* `retry-interval`: (integer) interval in second between retry reconnect -* `flush-interval`: (integer) interval in second before to flush the buffer -* `tag`: (string) tag name -* `tls-support` **DEPRECATED, replaced with tcp+tls flag on transport**: (boolean) enable tls -* `tls-insecure`: (boolean) insecure skip verify -* `tls-min-version`: (string) min tls version, default to 1.2 -* `ca-file`: (string) provide CA file to verify the server certificate -* `cert-file`: (string) provide client certificate file for mTLS -* `key-file`: (string) provide client private key file for mTLS -* `buffer-size`: (integer) how many DNS messages will be buffered before being sent -* `chan-buffer-size`: (integer) channel buffer size used on incoming dns message, number of messages before to drop it. - -Default values: +* `transport` (string) + > network transport to use: `tcp`|`unix`|`tcp+tls`. + > Specifies the transport ot use. + +* `remote-address` (string) + > Specifies the remote address to connect to. + +* `remote-port` (integer) + > Specifies the remote TCP port to connect to. + +* `connect-timeout` (integer) + > Specifies the maximum time to wait for a connection attempt to complete. + +* `retry-interval` (integer) + > Specifies the interval between attempts to reconnect in case of connection failure. + +* `flush-interval` (integer) + > Specifies the interval between buffer flushes. + +* `tag` (string) tag name. + > Specifies the tag to use. + +* `tls-insecure` (boolean) + > If set to true, skip verification of server certificate. + +* `tls-min-version` (string) + > Specifies the minimum TLS version that the server will support. + +* `ca-file` (string) + > Specifies the path to the CA (Certificate Authority) file used to verify the server's certificate. + +* `cert-file` (string) + > Specifies the path to the certificate file to be used. This is a required parameter if TLS support is enabled. + +* `key-file` (string) + > Specifies the path to the key file corresponding to the certificate file. + > This is a required parameter if TLS support is enabled. + +* `chan-buffer-size` (int) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +Defaults: ```yaml fluentd: @@ -36,12 +58,12 @@ fluentd: connect-timeout: 5 retry-interval: 10 flush-interval: 30 - tag: "dns.collector" + tag: dns.collector tls-insecure: false - tls-min-version: 1.2 + tls-min-version: "1.2" + tls-min-version: "" ca-file: "" cert-file: "" key-file: "" - buffer-size: 100 - chan-buffer-size: 65535 + chan-buffer-size: 0 ``` diff --git a/docs/loggers/logger_influxdb.md b/docs/loggers/logger_influxdb.md index b30beeec..ef69c694 100644 --- a/docs/loggers/logger_influxdb.md +++ b/docs/loggers/logger_influxdb.md @@ -5,17 +5,39 @@ InfluxDB client to remote InfluxDB server Options: -- `server-url`: (string) InfluxDB server url -- `auth-token`: (string) authentication token -- `bucket`: (string) bucket name -- `organization`: (string) organization name -- `tls-support`: (boolean) enable tls -- `tls-insecure`: (boolean) insecure skip verify -- `tls-min-version`: (string) min tls version -- `ca-file`: (string) provide CA file to verify the server certificate -- `cert-file`: (string) provide client certificate file for mTLS -- `key-file`: (string) provide client private key file for mTLS -- `chan-buffer-size`: (integer) channel buffer size used on incoming dns message, number of messages before to drop it. +* `server-url`: (string) + > InfluxDB server url + +* `auth-token`: (string) + > authentication token + +* `bucket`: (string) + > bucket name + +* `organization`: (string) + > organization name + +* `tls-support`: (boolean) + > enable tls + +* `tls-insecure` (boolean) + > If set to true, skip verification of server certificate. + +* `tls-min-version` (string) + > Specifies the minimum TLS version that the server will support. + +* `ca-file` (string) + > Specifies the path to the CA (Certificate Authority) file used to verify the server's certificate. + +* `cert-file` (string) + > Specifies the path to the certificate file to be used. This is a required parameter if TLS support is enabled. + +* `key-file` (string) + > Specifies the path to the key file corresponding to the certificate file. This is a required parameter if TLS support is enabled. + +* `chan-buffer-size` (int) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. Default values: @@ -31,5 +53,5 @@ influxdb: ca-file: "" cert-file: "" key-file: "" - chan-buffer-size: 65535 + chan-buffer-size: 0 ``` diff --git a/docs/loggers/logger_kafka.md b/docs/loggers/logger_kafka.md index 027fb8a0..9f8a2116 100644 --- a/docs/loggers/logger_kafka.md +++ b/docs/loggers/logger_kafka.md @@ -4,29 +4,79 @@ Kafka producer, based on [kafka-go](https://github.com/segmentio/kafka-go) libra Options: -- `remote-address`: (string) remote address -- `remote-port`: (integer) remote tcp port -- `connect-timeout`: (integer) connect timeout in second -- `retry-interval`: (integer) interval in second between retry reconnect -- `flush-interval`: (integer) interval in second before to flush the buffer -- `tls-support`: (boolean) enable tls -- `tls-insecure`: (boolean) insecure skip verify -- `tls-min-version`: (string) min tls version, default to 1.2 -- `ca-file`: (string) provide CA file to verify the server certificate -- `cert-file`: (string) provide client certificate file for mTLS -- `key-file`: (string) provide client private key file for mTLS -- `sasl-support`: (boolean) enable SASL -- `sasl-username`: (string) SASL username -- `sasl-password`: (string) SASL password -- `sasl-mechanism`: (string) SASL mechanism: `PLAIN` or `SCRAM-SHA-512` -- `mode`: (string) output format: `text`, `json`, or `flat-json` -- `buffer-size`: (integer) how many DNS messages will be buffered before being sent -- `topic`: (integer) kafka topic to forward messages to -- `partition`: (integer) kafka partition -- `chan-buffer-size`: (integer) channel buffer size used on incoming dns message, number of messages before to drop it. -- `compression`: (string) Compression for Kafka messages: `none`, `gzip`, `lz4`, `snappy`, `zstd` - -Default values: +* `remote-address` (string) + > Remote address. + > Specifies the remote address to connect to. + +* `remote-port` (integer) + > Remote tcp port. + > Specifies the remote TCP port to connect to. + +* `connect-timeout` (integer) + > Specifies the maximum time to wait for a connection attempt to complete. + +* `retry-interval` (integer) + > Specifies the interval between attempts to reconnect in case of connection failure. + +* `flush-interval` (integer) + > Specifies the interval between buffer flushes. + +* `tls-support` (boolean) + > Enables or disables TLS (Transport Layer Security) support. + > If set to true, TLS will be used for secure communication. + +* `tls-insecure` (boolean) + > If set to true, skip verification of server certificate. + +* `tls-min-version` (string) + > Specifies the minimum TLS version that the server will support. + +* `ca-file` (string) + > Specifies the path to the CA (Certificate Authority) file used to verify the server's certificate. + +* `cert-file` (string) + > Specifies the path to the certificate file to be used. This is a required parameter if TLS support is enabled. + +* `key-file` (string) + > Specifies the path to the key file corresponding to the certificate file. This is a required parameter if TLS support is enabled. + +* `sasl-support` (boolean) + > Enable or disable SASL (Simple Authentication and Security Layer) support for Kafka. + +* `sasl-username` (string) + > Specifies the SASL username for authentication with Kafka brokers. + +* `sasl-password` (string) + > Specifies the SASL password for authentication with Kafka brokers + +* `sasl-mechanism` (string) + > Specifies the SASL mechanism to use for authentication with Kafka brokers. + > SASL mechanism: `PLAIN` or `SCRAM-SHA-512`. + +* `mode` (string) + > Specifies the output format for Kafka messages. Output format: `text`, `json`, or `flat-json`. + +* `text-format` (string) + > output text format, please refer to the default text format to see all available [directives](../configuration.md#custom-text-format), use this parameter if you want a specific format + +* `buffer-size` (integer) + > Specifies the size of the buffer for DNS messages before they are sent to Kafka. + +* `topic` (integer) + > Specifies the Kafka topic to which messages will be forwarded. + +* `partition` (integer) + > Specifies the Kafka partition to which messages will be sent. + +* `chan-buffer-size` (int) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +* `compression` (string) + > Specifies the compression algorithm to use for Kafka messages. + > Compression for Kafka messages: `none`, `gzip`, `lz4`, `snappy`, `zstd`. + +Defaults: ```yaml kafkaproducer: @@ -37,18 +87,15 @@ kafkaproducer: flush-interval: 30 tls-support: false tls-insecure: false - tls-min-version: 1.2 - ca-file: "" - cert-file: "" - key-file: "" sasl-support: false sasl-mechanism: PLAIN - sasl-username: "" - sasl-password: "" + sasl-username: false + sasl-password: false mode: flat-json + text-format: "" buffer-size: 100 topic: "dnscollector" partition: 0 - chan-buffer-size: 65535 - compression: "none" + chan-buffer-size: 0 + compression: none ``` diff --git a/docs/loggers/logger_loki.md b/docs/loggers/logger_loki.md index e92d6385..8a7623d9 100644 --- a/docs/loggers/logger_loki.md +++ b/docs/loggers/logger_loki.md @@ -4,25 +4,63 @@ Loki client to remote server Options: -- `server-url`: (string) Loki server url -- `job-name`: (string) Job name -- `mode`: (string) output format: `text`, `json`, or `flat-json` -- `flush-interval`: (integer) flush batch every X seconds -- `batch-size`: (integer) batch size for log entries in bytes -- `retry-interval`: (integer) interval in second between before to retry to send batch -- `text-format`: (string) output text format, please refer to the default text format to see all available [directives](../configuration.md#custom-text-format), use this parameter if you want a specific format -- `proxy-url`: (string) Proxy URL -- `tls-insecure`: (boolean) insecure tls, skip certificate and hostname verify -- `tls-min-version`: (string) min tls version -- `ca-file`: (string) provide CA file to verify the server certificate -- `cert-file`: (string) provide client certificate file for mTLS -- `key-file`: (string) provide client private key file for mTLS -- `basic-auth-login`: (string) basic auth login -- `basic-auth-pwd`: (string) basic auth password -- `basic-auth-pwd-file`: (string) path to a file containing the basic auth password -- `tenant-id`: (string) tenant/organisation id. If omitted or empty, no X-Scope-OrgID header is sent. -- `relabel-configs`: (list) configuration to relabel targets. Functionality like described in . -- `chan-buffer-size`: (integer) channel buffer size used on incoming dns message, number of messages before to drop it. +* `server-url` (string) + > Loki server url + +* `job-name` (string) + > Job name + +* `mode` (string) + > output format: `text`, `json`, or `flat-json` + +* `flush-interval` (integer) + > flush batch every X seconds + +* `batch-size` (integer) + > batch size for log entries in bytes + +* `retry-interval` (integer) + > interval in second between before to retry to send batch + +* `text-format` (string) + > output text format, please refer to the default text format to see all available [directives](../configuration.md#custom-text-format), use this parameter if you want a specific format + +* `proxy-url` (string) + > Proxy URL + +* `tls-insecure` (boolean) + > If set to true, skip verification of server certificate. + +* `tls-min-version` (string) + > Specifies the minimum TLS version that the server will support. + +* `ca-file` (string) + > Specifies the path to the CA (Certificate Authority) file used to verify the server's certificate. + +* `cert-file` (string) + > Specifies the path to the certificate file to be used. This is a required parameter if TLS support is enabled. + +* `key-file` (string) + > Specifies the path to the key file corresponding to the certificate file. This is a required parameter if TLS support is enabled. + +* `chan-buffer-size` (int) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +* `basic-auth-login` (string) + > basic auth login + +* `basic-auth-pwd` (string) + > basic auth password + +* `basic-auth-pwd-file` (string) + > path to a file containing the basic auth password + +* `tenant-id` (string) + > tenant/organisation id. If omitted or empty, no X-Scope-OrgID header is sent. + +* `relabel-configs` (list) + > configuration to relabel targets. Functionality like described in . Default values: @@ -46,7 +84,7 @@ lokiclient: basic-auth-pwd-file: "" tenant-id: "" relabel-configs: [] - chan-buffer-size: 65535 + chan-buffer-size: 0 ``` ## Grafana dashboard with Loki datasource diff --git a/docs/loggers/logger_prometheus.md b/docs/loggers/logger_prometheus.md index b485fc5d..8e455f0c 100644 --- a/docs/loggers/logger_prometheus.md +++ b/docs/loggers/logger_prometheus.md @@ -5,31 +5,81 @@ This logger generates **prometheus** metrics. Use the following Grafana [dashboa Options: -- `listen-ip`: (string) listening IP -- `listen-port`: (integer) listening port -- `basic-auth-enable`: (bool) whether to enable basic auth -- `basic-auth-login`: (string) default login for basic auth -- `basic-auth-pwd`: (string) default password for basic auth -- `tls-support`: (boolean) tls support -- `tls-mutual`: (boolean) mtls authentication -- `tls-min-version`: (string) min tls version, default to 1.2 -- `cert-file`: (string) certificate server file -- `key-file`: (string) private key server file -- `prometheus-suffix`: (string) prometheus suffix -- `top-n`: (string) default number of items on top -- `chan-buffer-size`: (integer) channel buffer size used on incoming dns message, number of messages before to drop it. -- `histogram-metrics-enabled`: (boolean) compute histogram for qnames length, latencies, queries and replies size repartition -- `prometheus-labels`: (list of strings) labels to add to metrics. Currently supported labels: `stream_id` (default), `stream_global`, `resolver` -- `requesters-cache-size`: (integer) LRU (least-recently-used) cache size for observed clients DNS per stream -- `requesters-cache-ttl`: (integer) maximum time (in seconds) before eviction from the LRU cache -- `domains-cache-size`: (integer) LRU (least-recently-used) cache size for observed domains per stream -- `domains-cache-ttl`: (integer) maximum time (in seconds) before eviction from the LRU cache -- `noerror-domains-cache-size`: (integer) LRU (least-recently-used) cache size for observed NOERROR domains per stream -- `noerror-domains-cache-ttl`: (integer) maximum time (in seconds) before eviction from the LRU cache -- `servfail-domains-cache-size`: (integer) LRU (least-recently-used) cache size for observed SERVFAIL domains per stream -- `servfail-domains-cache-ttl`: (integer) maximum time (in seconds) before eviction from the LRU cache -- `nonexistent-domains-cache-size`: (integer) LRU (least-recently-used) cache size for observed NX domains per stream -- `nonexistent-domains-cache-ttl`: (integer) maximum time (in seconds) before eviction from the LRU cache +* `listen-ip` (string) + > listening IP + +* `listen-port` (integer) + > listening port + +* `basic-auth-enable` (bool) + > whether to enable basic auth + +* `basic-auth-login` (string) + > default login for basic auth + +* `basic-auth-pwd` (string) + > default password for basic auth + +* `tls-support` (boolean) + > tls support + +* `tls-mutual` (boolean) + > mtls authentication + +* `tls-min-version` (string) + > min tls version, default to 1.2 + +* `cert-file` (string) + > certificate server file + +* `key-file` (string) + > private key server file + +* `prometheus-suffix` (string) + > prometheus suffix + +* `top-n` (string) + > default number of items on top + +* `chan-buffer-size` (integer) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +* `histogram-metrics-enabled` (boolean) + > compute histogram for qnames length, latencies, queries and replies size repartition + +* `prometheus-labels` (list of strings) + > labels to add to metrics. Currently supported labels: `stream_id` (default), `stream_global`, `resolver` + +* `requesters-cache-size` (integer) + > LRU (least-recently-used) cache size for observed clients DNS per stream + +* `requesters-cache-ttl` (integer) + > maximum time (in seconds) before eviction from the LRU cache + +* `domains-cache-size` (integer) + > LRU (least-recently-used) cache size for observed domains per strea + +* `domains-cache-ttl` (integer) + > maximum time (in seconds) before eviction from the LRU cache + +* `noerror-domains-cache-size`: (integer) + > LRU (least-recently-used) cache size for observed NOERROR domains per strea + +* `noerror-domains-cache-ttl` (integer) + > maximum time (in seconds) before eviction from the LRU cach + +* `servfail-domains-cache-size` (integer) + > LRU (least-recently-used) cache size for observed SERVFAIL domains per stream + +* `servfail-domains-cache-ttl` (integer) + > maximum time (in seconds) before eviction from the LRU cache + +* `nonexistent-domains-cache-size`: (integer) + > LRU (least-recently-used) cache size for observed NX domains per stream + +* `nonexistent-domains-cache-ttl` (integer) + > maximum time (in seconds) before eviction from the LRU cache Default values: @@ -47,14 +97,14 @@ prometheus: key-file: "" prometheus-prefix: "dnscollector" top-n: 10 - chan-buffer-size: 65535 + chan-buffer-size: 0 histogram-metrics-enabled: false requesters-metrics-enabled: true domains-metrics-enabled: true - noerror-domains-metrics-enabled: true - servfail-domains-metrics-enabled: true - nonexistent-domains-metrics-enabled: true - timeout-domains-metrics-enabled: true + noerror-metrics-enabled: true + servfail-metrics-enabled: true + nonexistent-metrics-enabled: true + timeout-metrics-enabled: true prometheus-labels: ["stream_id"] requesters-cache-size: 250000 requesters-cache-ttl: 3600 diff --git a/docs/loggers/logger_redis.md b/docs/loggers/logger_redis.md index dce80156..c04756a2 100644 --- a/docs/loggers/logger_redis.md +++ b/docs/loggers/logger_redis.md @@ -10,24 +10,54 @@ Redis Pub logger Options: -* `transport`: (string) network transport to use: `tcp`|`unix`|`tcp+tls` -* `remote-address`: (string) remote IP or host address -* `remote-port`: (integer) remote tcp port -* `sock-path` **DEPRECATED, replaced by remote-address**: (string) unix socket path -* `connect-timeout`: (integer) connect timeout in second -* `retry-interval`: (integer) interval in second between retry reconnect -* `flush-interval`: (integer) interval in second before to flush the buffer -* `tls-support` **DEPRECATED, replaced with tcp+tls flag on transport**: (boolean) enable tls -* `tls-insecure`: (boolean) insecure tls, skip certificate and hostname verify -* `tls-min-version`: (string) min tls version, default to 1.2 -* `ca-file`: (string) provide CA file to verify the server certificate -* `cert-file`: (string) provide client certificate file for mTLS -* `key-file`: (string) provide client private key file for mTLS -* `mode`: (string) output format: `text`, `json`, or `flat-json` -* `text-format`: (string) output text format, please refer to the default text format to see all available [directives](../configuration.md#custom-text-format), use this parameter if you want a specific format -* `buffer-size`: (integer) how many DNS messages will be buffered before being sent -* `redis-channel`: (string) name of the redis pubsub channel to publish into -* `chan-buffer-size`: (integer) channel buffer size used on incoming dns message, number of messages before to drop it. +* `transport` (string) + > network transport to use: `tcp`|`unix`|`tcp+tls` + +* `remote-address` (string) + > remote IP or host address + +* `remote-port` (integer) + > remote tcp port + +* `connect-timeout` (integer) + > connect timeout in second + +* `retry-interval` (integer) + > interval in second between retry reconnect + +* `flush-interval` (integer) + > interval in second before to flush the buffer + +* `tls-insecure` (boolean) + > If set to true, skip verification of server certificate. + +* `tls-min-version` (string) + > Specifies the minimum TLS version that the server will support. + +* `ca-file` (string) + > Specifies the path to the CA (Certificate Authority) file used to verify the server's certificate. + +* `cert-file` (string) + > Specifies the path to the certificate file to be used. This is a required parameter if TLS support is enabled. + +* `key-file` (string) + > Specifies the path to the key file corresponding to the certificate file. This is a required parameter if TLS support is enabled. + +* `chan-buffer-size` (int) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +* `mode` (string) + > output format: `text`, `json`, or `flat-json` + +* `text-format` (string) + > output text format, please refer to the default text format to see all available [directives](../configuration.md#custom-text-format), use this parameter if you want a specific format + +* `buffer-size` (integer) + > how many DNS messages will be buffered before being sent + +* `redis-channel` (string) + > name of the redis pubsub channel to publish into Default values: @@ -48,5 +78,5 @@ redispub: text-format: "" buffer-size: 100 redis-channel: dns-collector - chan-buffer-size: 65535 + chan-buffer-size: 0 ``` diff --git a/docs/loggers/logger_restapi.md b/docs/loggers/logger_restapi.md index 0d8eaac3..5e410d0c 100644 --- a/docs/loggers/logger_restapi.md +++ b/docs/loggers/logger_restapi.md @@ -8,17 +8,39 @@ See the [swagger](https://generator.swagger.io/?url=https://raw.githubuserconten Options: -- `listen-ip`: (string) listening IP -- `listen-port`: (integer) listening port -- `basic-auth-enable`: (boolean) enable or disable basic authentication -- `basic-auth-login`: (string) default login for basic auth -- `basic-auth-pwd`: (string) default password for basic auth -- `tls-support`: (boolean) tls support -- `tls-min-version`: (string) min tls version, default to 1.2 -- `cert-file`: (string) certificate server file -- `key-file`: (string) private key server file -- `top-n`: (string) default number of items on top -- `chan-buffer-size`: (integer) channel buffer size used on incoming dns message, number of messages before to drop it. +* `listen-ip` (string) + > listening IP + +* `listen-port` (integer) + > listening port + +* `basic-auth-enable` (boolean) + > enable or disable basic authentication + +* `basic-auth-login` (string) + > default login for basic auth + +* `basic-auth-pwd` (string) + > default password for basic auth + +* `tls-support` (boolean) + > tls support + +* `tls-min-version` (string) + > min tls version, default to 1.2 + +* `cert-file` (string) + > certificate server file + +* `key-file` (string) + > private key server file + +* `top-n` (string) + > default number of items on top + +* `chan-buffer-size` (integer) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. Default values: @@ -31,8 +53,8 @@ restapi: basic-auth-pwd: changeme tls-support: true tls-min-version: 1.2 - cert-file: "./testsdata/server.crt" - key-file: "./testsdata/server.key" + cert-file: "./tests/testsdata/server.crt" + key-file: "./tests/testsdata/server.key" top-n: 100 - chan-buffer-size: 65535 + chan-buffer-size: 0 ``` diff --git a/docs/loggers/logger_scalyr.md b/docs/loggers/logger_scalyr.md index 0ab9a7f4..e613c74e 100644 --- a/docs/loggers/logger_scalyr.md +++ b/docs/loggers/logger_scalyr.md @@ -5,22 +5,54 @@ Client for the Scalyr/DataSet [`addEvents`](https://app.eu.scalyr.com/help/api#a Options: -- `server-url`: (string) Scalyr API Host -- `apikey`: (string, required) API Token with Log Write permissions -- `mode`: (string) `text`, `json`, or `flat-json` -- `parser`: (string) When using text or json mode, the name of the parser Scalyr should use -- `flush-interval`: (integer) flush batch every X seconds -- `batch-size`: (integer) batch size for log entries in bytes -- `text-format`: (string) output text format, please refer to the default text format to see all available directives, use this parameter if you want a specific format -- `proxy-url`: (string) Proxy URL -- `tls-insecure`: (boolean) insecure skip verify -- `tls-min-version`: (string) min tls version -- `ca-file`: (string) provide CA file to verify the server certificate -- `cert-file`: (string) provide client certificate file for mTLS -- `key-file`: (string) provide client private key file for mTLS -- `session-info`: (map) Any "session" or server information for Scalyr. e.g. 'region', 'serverHost'. If 'serverHost' is not included, it is set using the hostname. -- `attrs`: (map) Any extra attributes that should be added to the log's fields. -- `chan-buffer-size`: (integer) channel buffer size used on incoming dns message, number of messages before to drop it. +* `server-url` (string) + > Scalyr API Host + +* `apikey` (string, required) + > API Token with Log Write permissions + +* `mode` (string) + > Output format `text`, `json`, or `flat-json` + +* `parser` (string) + > When using text or json mode, the name of the parser Scalyr should use + +* `flush-interval` (integer) + > flush batch every X seconds + +* `batch-size` (integer) + > batch size for log entries in bytes + +* `text-format` (string) + > output text format, please refer to the default text format to see all available directives, use this parameter if you want a specific format + +* `proxy-url` (string) + > Proxy URL + +* `tls-insecure` (boolean) + > If set to true, skip verification of server certificate. + +* `tls-min-version` (string) + > Specifies the minimum TLS version that the server will support. + +* `ca-file` (string) + > Specifies the path to the CA (Certificate Authority) file used to verify the server's certificate. + +* `cert-file` (string) + > Specifies the path to the certificate file to be used. This is a required parameter if TLS support is enabled. + +* `key-file` (string) + > Specifies the path to the key file corresponding to the certificate file. This is a required parameter if TLS support is enabled. + +* `chan-buffer-size` (int) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. + +* `session-info` (map) + > Any "session" or server information for Scalyr. e.g. 'region', 'serverHost'. If 'serverHost' is not included, it is set using the hostname. + +* `attrs` (map) + > Any extra attributes that should be added to the log's fields. The client can send the data in 3 formats: text (using `text-format`), json (by including the whole DNS message in the `message` field), or flat-json. The first two formats (text, json) require setting the `parser` option and needs a corresponding parser defined in the Scalyr backend. @@ -44,5 +76,5 @@ scalyrclient: ca-file: "" cert-file: "" key-file: "" - chan-buffer-size: 65535 + chan-buffer-size: 0 ``` diff --git a/docs/loggers/logger_statsd.md b/docs/loggers/logger_statsd.md index 4888e41a..69dcb8cf 100644 --- a/docs/loggers/logger_statsd.md +++ b/docs/loggers/logger_statsd.md @@ -31,18 +31,39 @@ Gauges: Options: -* `transport`: (string) network transport to use: `udp` | `tcp` | `tcp+tls` -* `remote-address`: (string) remote address -* `remote-port`: (integer) remote tcp port -* `connect-timeout`: (integer) connect timeout in second -* `prefix`: (string) statsd prefix name -* `tls-support` **DEPRECATED, replaced with tcp+tls flag on transport**: (boolean) enable tls -* `tls-insecure`: (boolean) insecure skip verify -* `tls-min-version`: (string) min tls version -* `ca-file`: (string) provide CA file to verify the server certificate -* `cert-file`: (string) provide client certificate file for mTLS -* `key-file`: (string) provide client private key file for mTLS -* `chan-buffer-size`: (integer) channel buffer size used on incoming dns message, number of messages before to drop it. +* `transport` (string) + > network transport to use: `udp` | `tcp` | `tcp+tls` + +* `remote-address` (string) + > remote address + +* `remote-port` (integer) + > remote tcp port + +* `connect-timeout` (integer) + > connect timeout in second + +* `prefix` (string) + > statsd prefix name + +* `tls-insecure` (boolean) + > If set to true, skip verification of server certificate. + +* `tls-min-version` (string) + > Specifies the minimum TLS version that the server will support. + +* `ca-file` (string) + > Specifies the path to the CA (Certificate Authority) file used to verify the server's certificate. + +* `cert-file` (string) + > Specifies the path to the certificate file to be used. This is a required parameter if TLS support is enabled. + +* `key-file` (string) + > Specifies the path to the key file corresponding to the certificate file. This is a required parameter if TLS support is enabled. + +* `chan-buffer-size` (int) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. Default values: @@ -57,5 +78,5 @@ statsd: ca-file: "" cert-file: "" key-file: "" - chan-buffer-size: 65535 + chan-buffer-size: 0 ``` diff --git a/docs/loggers/logger_stdout.md b/docs/loggers/logger_stdout.md index 0b952158..d1ec7809 100644 --- a/docs/loggers/logger_stdout.md +++ b/docs/loggers/logger_stdout.md @@ -8,9 +8,15 @@ Print to your standard output, all DNS logs received Options: -* `mode`: (string) output format: `text`, `json`, `flat-json` or `pcap` -* `text-format`: (string) output text format, please refer to the default text format to see all available [directives](../configuration.md#custom-text-format), use this parameter if you want a specific format -* `chan-buffer-size`: (integer) channel buffer size used on incoming dns message, number of messages before to drop it. +* `mode` (string) + > output format: `text`, `json`, `flat-json` or `pcap` + +* `text-format` (string) + > output text format, please refer to the default text format to see all available [directives](../configuration.md#custom-text-format), use this parameter if you want a specific format + +* `chan-buffer-size` (integer) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. Default values: @@ -18,7 +24,7 @@ Default values: stdout: mode: text text-format: "" - chan-buffer-size: 65535 + chan-buffer-size: 0 ``` Example: diff --git a/docs/loggers/logger_syslog.md b/docs/loggers/logger_syslog.md index d2467a2f..f35bddc8 100644 --- a/docs/loggers/logger_syslog.md +++ b/docs/loggers/logger_syslog.md @@ -10,26 +10,66 @@ Syslog logger to local syslog system or remote one. Options: -* `facility`: (string) Set the syslog logging facility -* `transport`: (string) Transport to use to a remote log daemon or local one. `local`|`tcp`|`udp`|`unix`|`tcp+tls` -* `remote-address`: (string) Remote address host:port -* `retry-interval`: (integer) interval in second between retry reconnect -* `chan-buffer-size`: (integer) channel buffer size used on incoming dns message, number of messages before to drop it. -* `mode`: (string) output format: `text`, `json`, or `flat-json` -* `text-format`: (string) output text format, please refer to the default text format to see all available [directives](../configuration.md#custom-text-format), use this parameter if you want a specific format -* `tls-insecure`: (boolean) insecure mode, skip certificate verify -* `tls-min-version`: (string) min tls version, default to 1.2 -* `ca-file`: (string) provide CA file to verify the server certificate -* `cert-file`: (string) provide client certificate file for mTLS -* `key-file`: (string) provide client private key file for mTLS -* `formattter`: (string) Set syslog formatter between `unix`, `rfc3164` or `rfc5424` -* `framer`: (string) Set syslog framer: `none` or `rfc5425` -* `hostname`: (string) Set syslog hostname -* `app-name`: (string) Set syslog program name -* `tag`: (string) syslog tag or MSGID -* `replace-null-char`: (string) replace NULl char in Qname with the specified character -* `buffer-size`: (integer) how many DNS messages will be buffered before being sent -* `flush-interval`: (integer) interval in second before to flush the buffer +* `facility` (string) + > Set the syslog logging facility + +* `transport` (string) + > Transport to use to a remote log daemon or local one. `local`|`tcp`|`udp`|`unix`|`tcp+tls` + +* `remote-address` (string) + > Remote address host:port + +* `retry-interval` (integer) + > interval in second between retry reconnect + +* `mode` (string) + > output format: `text`, `json`, or `flat-json` + +* `text-format` (string) + > output text format, please refer to the default text format to see all available [directives](../configuration.md#custom-text-format), use this parameter if you want a specific format + +* `tls-insecure` (boolean) + > If set to true, skip verification of server certificate. + +* `tls-min-version` (string) + > Specifies the minimum TLS version that the server will support. + +* `ca-file` (string) + > Specifies the path to the CA (Certificate Authority) file used to verify the server's certificate. + +* `cert-file` (string) + > Specifies the path to the certificate file to be used. This is a required parameter if TLS support is enabled. + +* `key-file` (string) + > Specifies the path to the key file corresponding to the certificate file. This is a required parameter if TLS support is enabled. + +* `formattter` (string) + > Set syslog formatter between `unix`, `rfc3164` or `rfc5424` + +* `framer` (string) + > Set syslog framer: `none` or `rfc5425` + +* `hostname` (string) + > Set syslog hostname + +* `app-name` (string) + > Set syslog program name + +* `tag` (string) + > syslog tag or MSGID + +* `replace-null-char` (string) + > replace NULl char in Qname with the specified character + +* `buffer-size` (integer) + > how many DNS messages will be buffered before being sent + +* `flush-interval` (integer) + > interval in second before to flush the buffer + +* `chan-buffer-size` (integer) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. Default values: @@ -39,7 +79,7 @@ syslog: facility: DAEMON transport: local remote-address: "" - chan-buffer-size: 65535 + chan-buffer-size: 0 retry-interval: 10 text-format: "" mode: text diff --git a/docs/loggers/logger_tcp.md b/docs/loggers/logger_tcp.md index 3ec8e50c..7bdffadb 100644 --- a/docs/loggers/logger_tcp.md +++ b/docs/loggers/logger_tcp.md @@ -10,23 +10,51 @@ Tcp/unix stream client logger. Options: -* `transport`: (string) network transport to use: `unix`|`tcp`|`tcp+tls` -* `remote-address`: (string) remote address -* `remote-port`: (integer) remote tcp port -* `sock-path` **DEPRECATED, replaced by remote-address**: (string) unix socket path -* `connect-timeout`: (integer) connect timeout in second -* `retry-interval`: (integer) interval in second between retry reconnect -* `flush-interval`: (integer) interval in second before to flush the buffer -* `tls-support` **DEPRECATED, replaced with tcp+tls flag on transport**: (boolean) enable tls -* `tls-insecure`: (boolean) insecure tls, skip certificate and hostname verify -* `tls-min-version`: (string) min tls version, default to 1.2 -* `ca-file`: (string) provide CA file to verify the server certificate -* `cert-file`: (string) provide client certificate file for mTLS -* `key-file`: (string) provide client private key file for mTLS -* `mode`: (string) output format: `text`, `json`, or `flat-json` -* `text-format`: (string) output text format, please refer to the default text format to see all available [directives](../configuration.md#custom-text-format), use this parameter if you want a specific format -* `buffer-size`: (integer) how many DNS messages will be buffered before being sent -* `chan-buffer-size`: (integer) channel buffer size used on incoming dns message, number of messages before to drop it. +* `transport` (string) + > Network transport to use: `unix`|`tcp`|`tcp+tls` + +* `remote-address` (string) + > Remote address + +* `remote-port` (integer) + > Remote TCP port + +* `connect-timeout` (integer) + > Connect timeout in second + +* `retry-interval` (integer) + > Interval in second between retry reconnect + +* `flush-interval` (integer) + > Interval in second before to flush the buffer + +* `tls-insecure` (boolean) + > If set to true, skip verification of server certificate. + +* `tls-min-version` (string) + > Specifies the minimum TLS version that the server will support. + +* `ca-file` (string) + > Specifies the path to the CA (Certificate Authority) file used to verify the server's certificate. + +* `cert-file` (string) + > Specifies the path to the certificate file to be used. This is a required parameter if TLS support is enabled. + +* `key-file` (string) + > Specifies the path to the key file corresponding to the certificate file. This is a required parameter if TLS support is enabled. + +* `mode` (string) + > Output format: `text`, `json`, or `flat-json` + +* `text-format` (string) + > output text format, please refer to the default text format to see all available [directives](../configuration.md#custom-text-format), use this parameter if you want a specific format + +* `buffer-size` (integer) + > how many DNS messages will be buffered before being sent + +* `chan-buffer-size` (integer) + > Specifies the maximum number of packets that can be buffered before discard additional packets. + > Set to zero to use the default global value. Default values: @@ -46,5 +74,5 @@ tcpclient: mode: flat-json text-format: "" buffer-size: 100 - chan-buffer-size: 65535 + chan-buffer-size: 0 ``` diff --git a/docs/performance.md b/docs/performance.md new file mode 100644 index 00000000..1833a98c --- /dev/null +++ b/docs/performance.md @@ -0,0 +1,35 @@ +# Performance tuning + +All loggers and collectors are based on buffered channels. +The size of these buffers can be configured with `chan-buffer-size`. +If you encounter the following error message in your logs, it indicates that you need to increase the chan-buffer-size: + +```bash +logger[elastic] buffer is full, 7855 packet(s) dropped +``` + +## CPU usage + +The conversion of DNS logs to JSON, text, or PCAP can incur CPU costs. Here's a list ordered by ns/op. + +```bash +./dnsutils$ go test -bench=. +goos: linux +goarch: amd64 +pkg: github.com/dmachard/go-dnscollector/dnsutils +cpu: Intel(R) Core(TM) i5-7200U CPU @ 2.50GHz +BenchmarkDnsMessage_ToTextFormat-4 2555529 450.2 ns/op 80 B/op 4 allocs/op +BenchmarkDnsMessage_ToPacketLayer-4 1138892 952.0 ns/op 1144 B/op 12 allocs/op +BenchmarkDnsMessage_ToDNSTap-4 1036468 1136 ns/op 592 B/op 18 allocs/op +BenchmarkDnsMessage_ToExtendedDNSTap-4 612438 1970 ns/op 1056 B/op 25 allocs/op +BenchmarkDnsMessage_ToJSON-4 188379 6724 ns/op 3632 B/op 3 allocs/op +BenchmarkDnsMessage_ToFlatten-4 121525 10151 ns/op 8215 B/op 29 allocs/op +BenchmarkDnsMessage_ToFlatJSON-4 20704 58365 ns/op 22104 B/op 220 allocs/op +``` + +## Memory usage + +The main sources of memory usage in DNS-collector are: + +- Buffered channels +- Prometheus logger with LRU cache diff --git a/docs/running_mode.md b/docs/running_mode.md index 00eab2d1..10f9ca35 100644 --- a/docs/running_mode.md +++ b/docs/running_mode.md @@ -1,34 +1,42 @@ # DNS-collector - Running mode +The DNScollector can be configured with multiple loggers and collectors at the same time + - [Pipelining](#pipelining) -- [Multiplexer](#multiplexer) +- [Multiplexer (DEPRECATED)](#multiplexer) ## Pipelining -> NOTE: EXPERIMENTAL, NOT YET SUITABLE FOR PRODUCTION - -The `pipelining` mode offers several enhancements: +The `pipelining` mode offers several enhancements compared to the old multiplexer mode. +It provides the same functionalities with added flexibility: -- a simplified syntax, -- conditional statement-based log routing, -- flexibility to add metadata. +- Simplified syntax +- Conditional statement-based log routing: dropped packet can be send to another stanza +- Ability to add metadata in DNS messages. -With this mode you can create pipeline with supported [collectors](./collectors.md) and [loggers](./loggers.md). +With this mode, you can create pipelines and connect [collectors](./collectors.md) and [loggers](./loggers.md) using the new `routing-policy` definition. ```yaml pipelines: - - name: - ..... + - name: + ...(collector or logger config).. routing-policy: - default: [ ] + forward: [ ] + dropped: [ ] - - name: - ..... + - name: + ...(collector or logger config).. ``` -## Multiplexer +The routing policy support 2 modes: +- `forward`: [ list of next stanza ] +- `dropped`: [ list of next stanza ] + + +## Multiplexer (DEPRECATED) + +> NOTE: THIS MODE IS DEPRECATED -The dns collector can be configured with multiple loggers and collectors at the same time. You must defined the list of diff --git a/docs/transformers.md b/docs/transformers.md index d2dc43ca..713754d2 100644 --- a/docs/transformers.md +++ b/docs/transformers.md @@ -26,3 +26,4 @@ Transformers processing is currently in this order : | [Data Extractor](transformers/transform_dataextractor.md) | Add base64 encoded dns payload | | [Traffic Prediction](transformers/transform_trafficprediction.md) | Features to train machine learning models | | [Additionnal Tags](transformers/transform_atags.md) | Add additionnal tags | +| [JSON relabeling](transformers/transform_relabeling.md) | JSON relabeling to rename or remove keys | diff --git a/docs/transformers/transform_atags.md b/docs/transformers/transform_atags.md index 40256365..e22fdd04 100644 --- a/docs/transformers/transform_atags.md +++ b/docs/transformers/transform_atags.md @@ -1,3 +1,67 @@ # Transformer: ATags -Use this transformer to add additional flag in your DNS logs. +Use this transformer to add additional tags in your DNS logs. + +This transformation can be valuable in the [`pipeline`](https://github.com/dmachard/go-dnscollector/blob/main/docs/running_mode.md#pipelining) mode, where it's possible to match specific traffic. +In such cases, you can include a tag for identification." + +Options: + +* `add-tags` (list) + > A list of string + +Configuration example: + +```yaml +transforms: + atags: + add-tags: [ "TXT:google", "MX:apple" ] +``` + +When the feature is enabled, the following json field are populated in your DNS message: + +Flat JSON: + +```json +{ + "atags.tags.0": "TXT:google", + "atags.tags.1": "MX:apple" +} +``` + +Default JSON structure: + +```json +{ + "atags": { + "tags": [ "test0", "test1" ] + } +} +``` + +Complete example with the `dnsmessage` collector + +```yaml +pipelines: + - name: filter + dnsmessage: + matching: + include: + dns.qname: "^.*\\.google\\.com$" + transforms: + atags: + tags: [ "google"] +``` + +Custom text format: + +If you logs your DNS traffic in basic text format, you can use the specific directives: + +- `atags[:INDEX]`: get all tags separated by comma, or the tag according to the provided INDEX + +```yaml +- name: console + stdout: + mode: text + text-format: "timestamp-rfc3339ns identity qr qname qtype atags:0" +``` diff --git a/docs/transformers/transform_dataextractor.md b/docs/transformers/transform_dataextractor.md index 8adbeed3..d278cf46 100644 --- a/docs/transformers/transform_dataextractor.md +++ b/docs/transformers/transform_dataextractor.md @@ -5,9 +5,10 @@ Use this transformer to extract the raw dns payload encoded in base64: Options: -- `add-payload`: (boolean) add base64 encoded dns payload +* `add-payload` (boolean) + > add base64 encoded dns payload -Default values: +Default: ```yaml transforms: @@ -17,7 +18,7 @@ transforms: Specific directive(s) available for the text format: -- `extracted-dns-payload`: add the base64 encoded of the dns message +* `extracted-dns-payload`: add the base64 encoded of the dns message When the feature is enabled, an "extracted" field appears in the DNS message and is populated with a "dns_payload" field: diff --git a/docs/transformers/transform_geoip.md b/docs/transformers/transform_geoip.md index 2d61be90..01954f9c 100644 --- a/docs/transformers/transform_geoip.md +++ b/docs/transformers/transform_geoip.md @@ -8,9 +8,14 @@ See [Downloads](https://www.maxmind.com/en/accounts/current/geoip/downloads) max Options: -- `mmdb-country-file`: (string) path file to your mmdb country database -- `mmdb-city-file`: (string) path file to your mmdb city database -- `mmdb-asn-file`: (string) path file to your mmdb asn database +* `mmdb-country-file` (string) + > path file to your mmdb country database + +* `mmdb-city-file` (string) + > path file to your mmdb city database + +* `mmdb-asn-file` (string) + > path file to your mmdb asn database ```yaml transforms: @@ -22,11 +27,11 @@ transforms: When the feature is enabled, the following json field are populated in your DNS message: -- `continent` -- `country-isocode` -- `city` -- `as-number` -- `as-owner` +* `continent` +* `country-isocode` +* `city` +* `as-number` +* `as-owner` Example: @@ -43,8 +48,8 @@ Example: Specific directives added: -- `geoip-continent`: continent code -- `geoip-country`: country iso code -- `geoip-city`: city name -- `geoip-as-number`: autonomous system number -- `geoip-as-owner`: autonomous system organization/owner +* `geoip-continent`: continent code +* `geoip-country`: country iso code +* `geoip-city`: city name +* `geoip-as-number`: autonomous system number +* `geoip-as-owner`: autonomous system organization/owner diff --git a/docs/transformers/transform_latency.md b/docs/transformers/transform_latency.md index d7a32272..9d587974 100644 --- a/docs/transformers/transform_latency.md +++ b/docs/transformers/transform_latency.md @@ -5,9 +5,14 @@ Use this feature to compute latency and detect queries timeout Options: -- `measure-latency`: (boolean) measure latency between replies and queries -- `unanswered-queries`: (boolean) Detect evicted queries -- `queries-timeout`: (integer) timeout in second for queries +* `measure-latency` (boolean) + > measure latency between replies and queries + +* `unanswered-queries` (boolean) + > Detect evicted queries + +* `queries-timeout` (integer) + > timeout in second for queries ```yaml transforms: diff --git a/docs/transformers/transform_normalize.md b/docs/transformers/transform_normalize.md index 13edc053..f83c82a0 100644 --- a/docs/transformers/transform_normalize.md +++ b/docs/transformers/transform_normalize.md @@ -9,15 +9,26 @@ is `co.uk` and the `TLD+1` is `amazon.co.uk`. Options: -- `qname-lowercase`: (boolean) enable or disable lowercase -- `add-tld`: (boolean) add top level domain -- `add-tld-plus-one`: (boolean) add top level domain plus one label -- `quiet-text`: (boolean) Quiet text mode to reduce the size of the logs +*`qname-lowercase` (boolean) + > enable or disable lowercase + +*`rr-lowercase` (boolean) + > enable or disable lowercase for all resources records + +*`add-tld` (boolean) + > add top level domain + +*`add-tld-plus-one` (boolean) + > add top level domain plus one label + +*`quiet-text` (boolean) + > Quiet text mode to reduce the size of the logs ```yaml transforms: normalize: qname-lowercase: true + rr-lowercase: false add-tld: false add-tld-plus-one: false quiet-text: false @@ -51,10 +62,12 @@ Example: "publicsuffix": { "etld+1": "eu.org", "tld": "org", + "managed-icann": true } ``` Specific directives added for text format: -- `publicsuffix-tld`: [Public Suffix](https://publicsuffix.org/) of the DNS QNAME -- `publicsuffix-etld+1`: [Public Suffix](https://publicsuffix.org/) plus one label of the DNS QNAME +*`publicsuffix-tld`: [Public Suffix](https://publicsuffix.org/) of the DNS QNAME +*`publicsuffix-etld+1`: [Public Suffix](https://publicsuffix.org/) plus one label of the DNS QNAME +*`publicsuffix-managed-icann`: [Public Suffix](https://publicsuffix.org/) flag for managed icann domains diff --git a/docs/transformers/transform_relabeling.md b/docs/transformers/transform_relabeling.md new file mode 100644 index 00000000..4c1930cb --- /dev/null +++ b/docs/transformers/transform_relabeling.md @@ -0,0 +1,69 @@ +# Transformer: Relabeling + +Use this transformer to remove or rename some JSON keys. +This transformation is only applied to the [`flat-json`](../dnsjson.md) output format. + +Options: + +* `rename` (list) + > A list key to rename + +* `remove` (list) + > A list of key to remove + +Configuration example + +```yaml + loggers: + - name: console + stdout: + mode: flat-json + transforms: + relabeling: + rename: + - regex: "dnstap\\.timestamp-rfc3339ns" + replacement: "timestamp" + - regex: "dns\\.qname" + replacement: "query" + - regex: "network\\.query-ip" + replacement: "client" + - regex: "network\\.response-ip" + replacement: "server" + - regex: "dnstap\\.identity" + replacement: "client_id" + - regex: "^dns\\.resource-records\\.an\\..*\\.rdata$" + replacement: "answers_rdata" + remove: + - regex: "dns" + - regex: "network" +``` + +This config produces the following flat-json ouput: + +Query: + +```json +{ + "client": "192.168.1.210", + "client_id": "dnsdist1", + "query": "www.google.co", + "server": "192.168.1.210", + "timestamp": "2024-03-10T19:58:30.881076563Z" +} +``` + +Reply: + +```json +{ + "answers_rdata": [ + "172.217.20.206", + "www3.l.google.com" + ], + "client": "192.168.1.210", + "client_id": "dnsdist1", + "query": "www.google.co", + "server": "192.168.1.210", + "timestamp": "2024-03-10T19:58:30.903063148Z" +} +``` diff --git a/docs/transformers/transform_suspiciousdetector.md b/docs/transformers/transform_suspiciousdetector.md index 59efe954..cd52be67 100644 --- a/docs/transformers/transform_suspiciousdetector.md +++ b/docs/transformers/transform_suspiciousdetector.md @@ -4,13 +4,26 @@ This feature can be used to tag unusual dns traffic like long domain, large pack Options: -- `threshold-qname-len`: a length greater than this value for qname will be considered as suspicious -- `threshold-packet-len`: a size greater than this value will be considered as suspicious in bytes -- `threshold-slow`: threshold to set a domain considered as slow regarding latency, value in second -- `common-qtypes`: common qtypes list -- `unallowed-chars`: unallowed list of characters not acceptable in domain name -- `threshold-max-labels`: maximum number of labels in domains name -- `whitelist-domains`: to ignore some domains +* `threshold-qname-len` (int) + > a length greater than this value for qname will be considered as suspicious + +* `threshold-packet-len` (int) + > a size greater than this value will be considered as suspicious in bytes + +* `threshold-slow` (int) + > threshold to set a domain considered as slow regarding latency, value in second + +* `common-qtypes` (list of string) + > common qtypes list + +* `unallowed-chars` (list of string) + > unallowed list of characters not acceptable in domain name + +* `threshold-max-labels` (int) + > maximum number of labels in domains name + +* `whitelist-domains` (list of string) + > to ignore some domains Default values: @@ -28,7 +41,7 @@ transforms: Specific directive(s) available for the text format: -- `suspicious-score`: suspicious score for unusual traffic +* `suspicious-score`: suspicious score for unusual traffic When the feature is enabled, the following json field are populated in your DNS message: diff --git a/docs/transformers/transform_trafficfiltering.md b/docs/transformers/transform_trafficfiltering.md index 5f0aac14..93816cdf 100644 --- a/docs/transformers/transform_trafficfiltering.md +++ b/docs/transformers/transform_trafficfiltering.md @@ -11,17 +11,38 @@ This feature can be useful to increase logging performance.. Options: -- `drop-fqdn-file`: (string) path file to a fqdn drop list, domains list must be a full qualified domain name -- `drop-domain-file`: (string) path file to domain drop list, domains list can be a partial domain name with regexp expression -- `keep-fqdn-file`: (string) path file to a fqdn keep list (all others are dropped), domains list must be a full qualified domain name -- `keep-domain-file`: (string) path file to domain keep list (all others are dropped), domains list can be a partial domain name with regexp expression -- `drop-queryip-file`: (string) path file to the query ip or ip prefix drop list -- `keep-queryip-file`: (string) path file to the query ip or ip prefix keep list -- `keep-rdataip-file`: (string) path file to the answer ip or ip prefix keep list. If the answer set includes ips both in drop and keep list, an error is thrown -- `drop-rcodes`: (list of string) rcode list, empty by default -- `log-queries`: (boolean) drop all queries on false -- `log-replies`: (boolean) drop all replies on false -- `downsample`: (integer) set the sampling rate, only keep 1 out of every `downsample` records, e.g. if set to 20, then this will return every 20th record (sampling at 1:20 or dropping 95% of queries). +* `drop-fqdn-file` (string) + > path file to a fqdn drop list, domains list must be a full qualified domain name + +* `drop-domain-file` (string) + > path file to domain drop list, domains list can be a partial domain name with regexp expression + +* `keep-fqdn-file` (string) + > path file to a fqdn keep list (all others are dropped), domains list must be a full qualified domain name + +* `keep-domain-file` (string) + > path file to domain keep list (all others are dropped), domains list can be a partial domain name with regexp expression + +* `drop-queryip-file` (string) + > path file to the query ip or ip prefix drop list + +* `keep-queryip-file` (string) + > path file to the query ip or ip prefix keep list + +* `keep-rdataip-file` (string) + > path file to the answer ip or ip prefix keep list. If the answer set includes ips both in drop and keep list, an error is thrown + +* `drop-rcodes` (list of string) + > rcode list, empty by default + +* `log-queries` (boolean) + > drop all queries on false + +* `log-replies` (boolean) + > drop all replies on false + +* `downsample` (integer) + > set the sampling rate, only keep 1 out of every `downsample` records, e.g. if set to 20, then this will return every 20th record (sampling at 1:20 or dropping 95% of queries). Default values: @@ -50,7 +71,7 @@ github.com Specific text directive(s) available for the text format: -- `filtering-sample-rate`: display the rate applied +* `filtering-sample-rate`: display the rate applied When the feature is activated, the following JSON fields are populated in your DNS message: diff --git a/docs/transformers/transform_trafficprediction.md b/docs/transformers/transform_trafficprediction.md index 316377d5..bb768b08 100644 --- a/docs/transformers/transform_trafficprediction.md +++ b/docs/transformers/transform_trafficprediction.md @@ -4,7 +4,8 @@ Use this transformer to add more directives and help to train your machine learn Options: -- `add-features`: enable all features +* `add-features` (bool) + > enable all features Default values: @@ -16,22 +17,22 @@ transforms: Specific directive(s) available for the text format: -- `ml-entropy`: entropy of the query name -- `ml-length`: length of the query name -- `ml-digits`: number of digits -- `ml-lowers`: number of letters in lowercase -- `ml-uppers`: number of letters in uppercase -- `ml-specials`: number of specials letters like dot, dash -- `ml-others`: number of unprintable characters -- `ml-labels`: number of labels -- `ml-ratio-digits`: ratio of the number digits with total number of characters -- `ml-ratio-letters`: ratio of the number letters with total number of characters -- `ml-ratio-specials`: ratio of the number specials with total number of characters -- `ml-ratio-others`: ratio of the number others characters with total number of characters -- `ml-consecutive-chars`: number of consecutive characters -- `ml-consecutive-vowels`: number of consecutive vowels -- `ml-consecutive-digits`: number of consecutive digits -- `ml-consecutive-consonants`: number of consecutive consonants -- `ml-size`: size of the packet -- `ml-occurences`: number of repetition of the packet -- `ml-uncommon-qtypes`: flag for uncommon qtypes +* `ml-entropy`: entropy of the query name +* `ml-length`: length of the query name +* `ml-digits`: number of digits +* `ml-lowers`: number of letters in lowercase +* `ml-uppers`: number of letters in uppercase +* `ml-specials`: number of specials letters like dot, dash +* `ml-others`: number of unprintable characters +* `ml-labels`: number of labels +* `ml-ratio-digits`: ratio of the number digits with total number of characters +* `ml-ratio-letters`: ratio of the number letters with total number of characters +* `ml-ratio-specials`: ratio of the number specials with total number of characters +* `ml-ratio-others`: ratio of the number others characters with total number of characters +* `ml-consecutive-chars`: number of consecutive characters +* `ml-consecutive-vowels`: number of consecutive vowels +* `ml-consecutive-digits`: number of consecutive digits +* `ml-consecutive-consonants`: number of consecutive consonants +* `ml-size`: size of the packet +* `ml-occurences`: number of repetition of the packet +* `ml-uncommon-qtypes`: flag for uncommon qtypes diff --git a/docs/transformers/transform_trafficreducer.md b/docs/transformers/transform_trafficreducer.md index 82dab3cc..196b3dc1 100644 --- a/docs/transformers/transform_trafficreducer.md +++ b/docs/transformers/transform_trafficreducer.md @@ -14,9 +14,14 @@ The following criterias are used: Options: -- `repetitive-traffic-detector`: (boolean) detect repetitive traffic -- `qname-plus-one`: (boolean) use qname+1 instead of the complete one -- `watch-interval`: (integer) watch interval in seconds +* `repetitive-traffic-detector` (boolean) + > detect repetitive traffic + +* `qname-plus-one` (boolean) + > use qname+1 instead of the complete one + +* `watch-interval` (integer) + > watch interval in seconds Default values: @@ -30,8 +35,8 @@ transforms: Specific text directive(s) available for the text format: -- `reducer-occurrences`: display the number of detected duplication -- `cumulative-length`: sum of the length of each occurrences +* `reducer-occurrences`: display the number of detected duplication +* `cumulative-length`: sum of the length of each occurrences When the feature is enabled, the following json field are populated in your DNS message: diff --git a/docs/transformers/transform_userprivacy.md b/docs/transformers/transform_userprivacy.md index 84b041d0..5dc0a0ca 100644 --- a/docs/transformers/transform_userprivacy.md +++ b/docs/transformers/transform_userprivacy.md @@ -8,12 +8,26 @@ For example: Options: -- `anonymize-ip`: (boolean) enable or disable anomymiser ip -- `anonymize-v4bits`: (string) summarize IPv4 down to the /integer level, default is `/16` -- `anonymize-v6bits`: (string) summarize IPv6 down to the /integer level, default is `::/64` -- `hash-ip`: (boolean) hashes the query and response IP with the specified algorithm. -- `hash-ip-algo`: (string) algorithm to use for IP hashing, currently supported `sha1` (default), `sha256`, `sha512` -- `minimaze-qname`: (boolean) keep only the second level domain +* `anonymize-ip` (boolean) + > enable or disable anomymiser ip + +* `anonymize-v4bits` (string) + > summarize IPv4 down to the /integer level, default is `/16` + +* `anonymize-v6bits` (string) + > summarize IPv6 down to the /integer level, default is `::/64` + +* `hash-query-ip` (boolean) + > hashes the query IP with the specified algorithm. + +* `hash-reply-ip` (boolean) + > hashes the response IP with the specified algorithm. + +* `hash-ip-algo` (string) + > algorithm to use for IP hashing, currently supported `sha1` (default), `sha256`, `sha512` + +* `minimaze-qname` (boolean) + > keep only the second level domain ```yaml transforms: @@ -21,7 +35,8 @@ transforms: anonymize-ip: false anonymize-v4bits: "/16" anonymize-v6bits: "::/64" - hash-ip: false + hash-query-ip: false + hash-reply-ip: false hash-ip-algo: "sha1" minimaze-qname: false ``` diff --git a/docs/workers.md b/docs/workers.md new file mode 100644 index 00000000..534342d5 --- /dev/null +++ b/docs/workers.md @@ -0,0 +1,31 @@ +# DNS-collector - Supported Collectors & Loggers + +A worker can act as a collector or a logger. + +| Worker | Type | Descriptions | +| :-----------------------------------------------------|:----------|:--------------------------------------------------------| +| [DNStap Server](collectors/collector_dnstap.md) | Collector | DNStap receiver and proxifier | +| [PowerDNS](collectors/collector_powerdns.md) | Collector | Protobuf PowerDNS receiver | +| [Tail](collectors/collector_tail.md) | Collector | Tail on plain text file | +| [XDP Sniffer](collectors/collector_xdp.md) | Collector | Live capture on network interface with XDP | +| [AF_PACKET Sniffer](collectors/collector_afpacket.md) | Collector | Live capture on network interface with AF_PACKET socket | +| [File Ingestor](collectors/collector_fileingestor.md) | Collector | File ingestor like pcap | +| [DNS Message](collectors/collector_dnsmessage.md) | Collector | Matching specific DNS message | +| [Console](loggers/logger_stdout.md) | Logger | Print logs to stdout in text, json or binary formats. | +| [File](loggers/logger_file.md) | Logger | Save logs to file in plain text or binary formats | +| [DNStap Client](loggers/logger_dnstap.md) | Logger | Send logs as DNStap format to a remote collector | +| [Prometheus](loggers/logger_prometheus.md) | Logger | Expose metrics | +| [Statsd](loggers/logger_statsd.md) | Logger | Expose metrics | +| [Rest API](loggers/logger_restapi.md) | Logger | Search domains, clients in logs | +| [TCP](loggers/logger_tcp.md) | Logger | Tcp stream client logger | +| [Syslog](loggers/logger_syslog.md) | Logger | Syslog logger to local syslog system or remote one. | +| [Fluentd](loggers/logger_fluentd.md) | Logger | Send logs to Fluentd server | +| [InfluxDB](loggers/logger_influxdb.md) | Logger | Send logs to InfluxDB server | +| [Loki Client](loggers/logger_loki.md) | Logger | Send logs to Loki server | +| [ElasticSearch](loggers/logger_elasticsearch.md) | Logger | Send logs to Elastic instance | +| [Scalyr](loggers/logger_scalyr.md) | Logger | Client for the Scalyr/DataSet addEvents API endpoint. | +| [Redis publisher](loggers/logger_redis.md) | Logger | Redis pub logger | +| [Kafka Producer](loggers/logger_kafka.md) | Logger | Kafka DNS producer | +| [Falco](loggers/logger_falco.md) | Logger | Falco plugin logger | +| [ClickHouse](loggers/logger_clickhouse.md) | Logger | ClickHouse logger | +| [DevNull](loggers/logger_devnull.md) | Logger | For testing purpose | diff --git a/go.mod b/go.mod index 3754ed32..b40780e2 100644 --- a/go.mod +++ b/go.mod @@ -1,15 +1,20 @@ module github.com/dmachard/go-dnscollector -go 1.21 +go 1.22 + +toolchain go1.22.1 require ( - github.com/Shopify/sarama v1.38.1 - github.com/cilium/ebpf v0.12.3 - github.com/dmachard/go-clientsyslog v0.3.0 - github.com/dmachard/go-dnstap-protobuf v1.0.0 + github.com/IBM/fluent-forward-go v0.2.2 + github.com/IBM/sarama v1.43.2 + github.com/cilium/ebpf v0.15.0 + github.com/creasty/defaults v1.7.0 + github.com/dmachard/go-clientsyslog v0.4.0 + github.com/dmachard/go-dnstap-protobuf v1.0.1 github.com/dmachard/go-framestream v0.10.0 - github.com/dmachard/go-logger v0.4.0 - github.com/dmachard/go-powerdns-protobuf v1.1.0 + github.com/dmachard/go-logger v1.0.0 + github.com/dmachard/go-netutils v0.4.0 + github.com/dmachard/go-powerdns-protobuf v1.1.1 github.com/dmachard/go-topmap v1.0.0 github.com/farsightsec/golang-framestream v0.3.0 github.com/fsnotify/fsnotify v1.7.0 @@ -17,115 +22,155 @@ require ( github.com/golang/snappy v0.0.4 github.com/google/gopacket v1.1.19 github.com/google/uuid v1.6.0 - github.com/grafana/dskit v0.0.0-20230804003603-740f56bd2934 - github.com/grafana/loki v1.6.2-0.20231211180320-2535f9bedeae + github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb + github.com/grafana/loki/v3 v3.0.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/hpcloud/tail v1.0.0 github.com/influxdata/influxdb-client-go v1.4.0 - github.com/klauspost/compress v1.17.6 - github.com/miekg/dns v1.1.58 + github.com/klauspost/compress v1.17.8 + github.com/miekg/dns v1.1.59 github.com/natefinch/lumberjack v2.0.0+incompatible - github.com/nqd/flat v0.2.0 - github.com/oschwald/maxminddb-golang v1.12.0 - github.com/prometheus/client_golang v1.18.0 + github.com/oschwald/maxminddb-golang v1.13.0 + github.com/prometheus/client_golang v1.19.1 github.com/rs/tzsp v0.0.0-20161230003637-8ce729c826b9 github.com/segmentio/kafka-go v0.4.47 - github.com/stretchr/testify v1.8.4 - github.com/vmihailenco/msgpack v4.0.4+incompatible - golang.org/x/net v0.20.0 - golang.org/x/sys v0.16.0 - google.golang.org/protobuf v1.32.0 - gopkg.in/fsnotify.v1 v1.4.7 + github.com/stretchr/testify v1.9.0 + github.com/tinylib/msgp v1.1.9 + golang.org/x/net v0.26.0 + golang.org/x/sys v0.21.0 + google.golang.org/protobuf v1.34.1 gopkg.in/yaml.v3 v3.0.1 ) require ( + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect - github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.0 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/armon/go-metrics v0.4.1 // indirect + github.com/aws/aws-sdk-go v1.50.32 // indirect + github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect + github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b // indirect + github.com/cespare/xxhash v1.1.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/deepmap/oapi-codegen v1.12.4 // indirect github.com/dennwc/varint v1.0.0 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/eapache/go-resiliency v1.3.0 // indirect - github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect + github.com/eapache/go-resiliency v1.6.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect + github.com/edsrzf/mmap-go v1.1.0 // indirect + github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect github.com/fatih/color v1.15.0 // indirect - github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-redis/redis/v8 v8.11.5 // indirect github.com/gogo/googleapis v1.4.0 // indirect github.com/gogo/status v1.1.1 // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/google/btree v1.1.2 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/gorilla/mux v1.8.0 // indirect - github.com/grafana/loki/pkg/push v0.0.0-20231211180320-2535f9bedeae // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0 // indirect + github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d // indirect + github.com/grafana/loki/pkg/push v0.0.0-20240402204250-824f5aa20aaa // indirect + github.com/grafana/pyroscope-go/godeltaprof v0.1.6 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect - github.com/hashicorp/consul/api v1.20.0 // indirect + github.com/hashicorp/consul/api v1.28.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-hclog v1.4.0 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect - github.com/hashicorp/go-sockaddr v1.0.2 // indirect + github.com/hashicorp/go-sockaddr v1.0.6 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/golang-lru v0.6.0 // indirect github.com/hashicorp/memberlist v0.5.0 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/imdario/mergo v0.3.15 // indirect + github.com/huandu/xstrings v1.3.3 // indirect + github.com/imdario/mergo v0.3.16 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect - github.com/jcmturner/gokrb5/v8 v8.4.3 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mitchellh/copystructure v1.0.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect + github.com/oklog/ulid v1.3.1 // indirect github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e // indirect github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/pierrec/lz4/v4 v4.1.17 // indirect + github.com/philhofer/fwd v1.1.2 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/exporter-toolkit v0.9.1 // indirect + github.com/prometheus/common/sigv4 v0.1.0 // indirect + github.com/prometheus/exporter-toolkit v0.11.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect - github.com/sercand/kuberesolver/v4 v4.0.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sercand/kuberesolver/v5 v5.1.1 // indirect + github.com/shopspring/decimal v1.2.0 // indirect github.com/soheilhy/cmux v0.1.5 // indirect - github.com/stretchr/objx v0.5.0 // indirect + github.com/sony/gobreaker v0.5.0 // indirect + github.com/spf13/cast v1.3.1 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect - github.com/weaveworks/promrus v1.2.0 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.2 // indirect github.com/xdg-go/stringprep v1.0.4 // indirect go.etcd.io/etcd/api/v3 v3.5.4 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.4 // indirect go.etcd.io/etcd/client/v3 v3.5.4 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/atomic v1.11.0 // indirect - go.uber.org/goleak v1.2.1 // indirect - go.uber.org/multierr v1.8.0 // indirect + go.uber.org/goleak v1.3.0 // indirect + go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.21.0 // indirect go4.org/intern v0.0.0-20211027215823-ae77deb06f29 // indirect + go4.org/netipx v0.0.0-20230125063823-8449b0a6169f // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect - golang.org/x/crypto v0.18.0 // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect - golang.org/x/oauth2 v0.16.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/time v0.3.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect + golang.org/x/oauth2 v0.19.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/time v0.5.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect + gopkg.in/fsnotify.v1 v1.4.7 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect + k8s.io/apimachinery v0.29.2 // indirect + k8s.io/client-go v0.29.2 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect ) require ( @@ -134,16 +179,14 @@ require ( github.com/golang/protobuf v1.5.3 // indirect github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect github.com/pkg/errors v0.9.1 - github.com/prometheus/client_model v0.5.0 - github.com/prometheus/common v0.46.0 + github.com/prometheus/client_model v0.6.1 + github.com/prometheus/common v0.54.0 github.com/prometheus/procfs v0.12.0 // indirect - github.com/prometheus/prometheus v0.43.1-0.20230419161410-69155c6ba1e9 - golang.org/x/mod v0.14.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.17.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc // indirect - google.golang.org/grpc v1.56.3 // indirect + github.com/prometheus/prometheus v0.51.0 + golang.org/x/mod v0.17.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + google.golang.org/grpc v1.62.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6 diff --git a/go.sum b/go.sum index 9525328c..bb3a2ce0 100644 --- a/go.sum +++ b/go.sum @@ -3,7 +3,6 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -14,397 +13,76 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= -cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= -cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= -cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= -cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= -cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= -cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= -cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= -cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= -cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= -cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= -cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= -cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= -cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= -cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= -cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= -cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= -cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= -cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= -cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= -cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= -cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= -cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= -cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= -cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= -cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= -cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= -cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= -cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= -cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= -cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= -cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= -cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= -cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= -cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= -cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= -cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= -cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= -cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= -cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= -cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= -cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= -cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= -cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= -cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= -cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= -cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= -cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= -cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= -cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= -cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= -cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= -cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= -cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= -cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= -cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= -cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= -cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= -cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= -cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= -cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= -cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= -cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= -cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= -cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= -cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= -cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= -cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= -cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= -cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= -cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= -cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= -cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= -cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= -cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= -cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= -cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= -cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= -cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= -cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= -cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= -cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= -cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= -cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= -cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= -cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= -cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= -cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= -cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= -cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= -cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= -cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= -cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= -cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= -cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= -cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= -cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= -cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= -cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= -cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= -cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= -cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= -cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= -cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= -cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= -cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= -cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= -cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= -cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= -cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= -cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= -cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= -cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= -cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= -cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= -cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= -cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= -cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= -cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= -cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= -cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= -cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= -cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= -cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= -cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= -cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= -cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= -cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= -cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= -cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= -cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= -cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= -cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= -cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= -cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= -cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= -cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= -cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= -cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= -cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= -cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= -cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= -cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= -cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= -cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= -cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= -cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= -cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= -cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= -cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= -cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= -cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= -cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= -cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= -cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= -cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= -cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= -cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= -cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= -cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= -cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= -cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= -cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= -cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= -cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= -cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= -cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= -cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= -cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= -cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= -cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= -cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= -cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= -cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= -cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= -cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= -cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= -cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= -cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= -cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= -cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= -cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= -cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= -cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= -cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= -cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= -cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= -cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= -cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= -cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= -cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= -cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= -cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= -cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= -cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= -cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= -cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= -cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= -cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= -cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= -cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= -cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= -cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= -cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= -cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= -cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= -cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= -cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= -cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= -cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= -cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= -cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= -cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= -cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= -cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= -cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= -cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= -cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= -cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= -cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= -cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= -cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= -cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= -cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= -cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= -cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= -cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= -cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= -cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= -cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= -cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= -cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= -cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= -cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= -cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= -cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= -cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= -cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= -cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= -cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= -cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= -cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= -cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= -cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= -cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= -cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= -cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= -cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= -cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= -cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= -cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= -cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= -cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= -cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= -cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= -cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= -cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= -cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= -cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= -cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= -cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= -cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= -cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= -cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= -cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= -cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= -cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= -cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= -cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= -cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= -cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= -cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= -cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= -cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= -cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= -cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= -cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= -cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= -cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= -cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= -cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= -cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= -cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= -cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= -cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= -cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= -cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= -cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= -cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= -cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= -cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= -cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= -cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= -cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= -cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= -cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= -cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= -cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= -cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= -cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= -cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= -cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= -cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= -cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 h1:U2rTu3Ef+7w9FHKIAXM6ZyqF3UOWJZ12zIm8zECAFfg= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 h1:jBQA3cKT4L2rWMpgE7Yt3Hwh2aUj8KXjIGLxjHeYNNo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0 h1:MxA59PGoCFb+vCwRQi3PhQEwHj4+r2dhuv9HG+vM7iM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.5.0/go.mod h1:uYt4CfhkJA9o0FN7jfE5minm/i4nUE4MjGUJkzB6Zs8= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g= +github.com/Code-Hex/go-generics-cache v1.3.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/IBM/fluent-forward-go v0.2.2 h1:T48kAjSMOAqTcpd6zkzqLAFOWlYPYIbCFJcEjrVzV1U= +github.com/IBM/fluent-forward-go v0.2.2/go.mod h1:U1SVl6rVRGMC/QhCTZ3iQx4P/ykCeg1y6UoVnlz+OAY= +github.com/IBM/sarama v1.43.2 h1:HABeEqRUh32z8yzY2hGB/j8mHSzC/HA9zlEjqFNCzSw= +github.com/IBM/sarama v1.43.2/go.mod h1:Kyo4WkF24Z+1nz7xeVUFWIuKVV8RS3wM8mkvPKMdXFQ= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA= +github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= -github.com/Shopify/sarama v1.38.1 h1:lqqPUPQZ7zPqYlWpTh+LQ9bhYNu2xJL6k1SJN4WVe2A= -github.com/Shopify/sarama v1.38.1/go.mod h1:iwv9a67Ha8VNa+TifujYoWGxWnu2kNVAQdSdZ4X2o5g= -github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= -github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0= +github.com/Workiva/go-datastructures v1.1.0 h1:hu20UpgZneBhQ3ZvwiOGlqJSKIosin2Rd5wAKUHEO/k= +github.com/Workiva/go-datastructures v1.1.0/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= +github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= +github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= +github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= +github.com/alicebob/miniredis/v2 v2.30.4 h1:8S4/o1/KoUArAGbGwPxcwf0krlzceva2XVOSchFS7Eo= +github.com/alicebob/miniredis/v2 v2.30.4/go.mod h1:b25qWj4fCEsBeAAR2mlb0ufImGC6uH3VlUfb/HS5zKg= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= @@ -414,8 +92,11 @@ github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJ github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.44.315 h1:kYTC+Y/bJ9M7QQRvkI/LN5OWvhkIOL/YuFFRhS5QAOo= -github.com/aws/aws-sdk-go v1.44.315/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.50.32 h1:POt81DvegnpQKM4DMDLlHz1CO6OBnEoQ1gRhYFd7QRY= +github.com/aws/aws-sdk-go v1.50.32/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -424,9 +105,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= +github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY= +github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= @@ -434,74 +116,85 @@ github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.12.3 h1:8ht6F9MquybnY97at+VDZb3eQQr8ev79RueWeVaEcG4= -github.com/cilium/ebpf v0.12.3/go.mod h1:TctK1ivibvI3znr66ljgi4hqOT8EYQjz1KWBfb1UVgM= +github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk= +github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creasty/defaults v1.7.0 h1:eNdqZvc5B509z18lD8yc212CAqJNvfT1Jq6L8WowdBA= +github.com/creasty/defaults v1.7.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deepmap/oapi-codegen v1.3.6/go.mod h1:aBozjEveG+33xPiP55Iw/XbVkhtZHEGLq3nxlX0+hfU= github.com/deepmap/oapi-codegen v1.12.4 h1:pPmn6qI9MuOtCz82WY2Xaw46EQjgvxednXXrP7g5Q2s= github.com/deepmap/oapi-codegen v1.12.4/go.mod h1:3lgHGMu6myQ2vqbbTXH2H1o4eXFTGnFiDaOaKKl5yas= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dmachard/go-clientsyslog v0.3.0 h1:CV6PlG6mr6nYoKjcEZP1RFJudw+Lr7D3abVdWRyM0tc= -github.com/dmachard/go-clientsyslog v0.3.0/go.mod h1:llRfIIzxlTNsEQbVF6GKUzxORDWTiSvld3ElJcUtyCo= -github.com/dmachard/go-dnstap-protobuf v1.0.0 h1:I+3buchctfgtUcGU4QJIu7WGyltpH8Nv+Qwj0NRYoBA= -github.com/dmachard/go-dnstap-protobuf v1.0.0/go.mod h1:/qjFCg+/6sZyGcL4d/cV2DyJbxqK7m2TwplyEd6C8TY= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/digitalocean/godo v1.109.0 h1:4W97RJLJSUQ3veRZDNbp1Ol3Rbn6Lmt9bKGvfqYI5SU= +github.com/digitalocean/godo v1.109.0/go.mod h1:R6EmmWI8CT1+fCtjWY9UCB+L5uufuZH13wk3YhxycCs= +github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= +github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dmachard/go-clientsyslog v0.4.0 h1:1eL/XIzfRCB9g2kVsY4bHAQE7uPVPv472qtXvzU6u6Y= +github.com/dmachard/go-clientsyslog v0.4.0/go.mod h1:llRfIIzxlTNsEQbVF6GKUzxORDWTiSvld3ElJcUtyCo= +github.com/dmachard/go-dnstap-protobuf v1.0.1 h1:Fu5/SWpRzUPqLPmERw/6CoqcDlYSk5FGC7JzmIKz/F4= +github.com/dmachard/go-dnstap-protobuf v1.0.1/go.mod h1:eU6ktwd2+GWAhorMaRSk3O9hMb8pI41uOKDSB08elOk= github.com/dmachard/go-framestream v0.10.0 h1:NzDOkpJOdrgV/c0XKCsVxijILbdTxsUcUlSwp4y34mw= github.com/dmachard/go-framestream v0.10.0/go.mod h1:CiSK1RmU/7hVsM/NhsroqpBxDH3meawKIXR8x8O+LP4= -github.com/dmachard/go-logger v0.4.0 h1:JJJW8C5Ri6OaWIECAE6dUNqLs4ym1+WX3xD6h5MxLI4= -github.com/dmachard/go-logger v0.4.0/go.mod h1:Gf6Au3CX5l3rZ+Tb3yX31u6h4lwVeZQSBklUI3h8gCA= -github.com/dmachard/go-powerdns-protobuf v1.1.0 h1:hfeJ4+a8ZIahvBiq77VBJANhe9VejD6JzMVBCQ0v7Ds= -github.com/dmachard/go-powerdns-protobuf v1.1.0/go.mod h1:r9oiBWjr6EvgcKynp2QOXhXmYeD0UrlO3JGpJwus4SU= +github.com/dmachard/go-logger v1.0.0 h1:M0z3RDh932uUgvAOSCfLgKbQbrw+3T0SrM4kaM2uhzY= +github.com/dmachard/go-logger v1.0.0/go.mod h1:Gf6Au3CX5l3rZ+Tb3yX31u6h4lwVeZQSBklUI3h8gCA= +github.com/dmachard/go-netutils v0.4.0 h1:u28T4N1sfni0IHC6sqgYIMW8ExtBG8SsYHW7+32gfvE= +github.com/dmachard/go-netutils v0.4.0/go.mod h1:KgAYMuJcF+1Xwtm0SlpJ4S7jBvkFghj+7tFesaHv3BY= +github.com/dmachard/go-powerdns-protobuf v1.1.1 h1:HhgkjPGJN9QCLVFWxiTIwP3E0He8ET9uJZaT7/+6HXw= +github.com/dmachard/go-powerdns-protobuf v1.1.1/go.mod h1:3sewpdCN4u5KpXBxrLpidHAC18v24y+f4OZ4GKfLaME= github.com/dmachard/go-topmap v1.0.0 h1:FzCnB80WJMSPhpEfWt/79y97XotTQjhlrsXKR6435ow= github.com/dmachard/go-topmap v1.0.0/go.mod h1:v+v595j5h02u2Vf9OOr6StGBCX2i6qMTEy8n0dbArEA= +github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ= +github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= -github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= -github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= -github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 h1:8yY/I9ndfrgrXUbOGObLHKBR4Fl3nZXwM2c7OYTT8hM= -github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= +github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= +github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= +github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= +github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/farsightsec/golang-framestream v0.3.0 h1:/spFQHucTle/ZIPkYqrfshQqPe2VQEzesH243TjIwqA= github.com/farsightsec/golang-framestream v0.3.0/go.mod h1:eNde4IQyEiA5br02AouhEHCu3p3UzrCdFR4LuQHklMI= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -509,14 +202,11 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= -github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= -github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/getkin/kin-openapi v0.2.0/go.mod h1:V1z9xl9oF5Wt7v32ne4FmiF1alpS4dM6mNzoywPOXlk= @@ -535,7 +225,28 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8= +github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= +github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= @@ -546,9 +257,10 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg= github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -559,8 +271,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -576,11 +286,9 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= @@ -588,6 +296,8 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -595,22 +305,19 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -618,63 +325,54 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= -github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk= +github.com/gophercloud/gophercloud v1.8.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/grafana/dskit v0.0.0-20230804003603-740f56bd2934 h1:W1g+y6rOO7K/Jm2XNPxIXyJisJSJ25uiVVaSa7N1Zwo= -github.com/grafana/dskit v0.0.0-20230804003603-740f56bd2934/go.mod h1:Xg0aN3EpqkYFW1ZxGyIl4BGEpr3QrCQOM1aWalpU3ik= -github.com/grafana/loki v1.6.2-0.20231211180320-2535f9bedeae h1:KLk1jneF9/yVmxizviqWMV8uxwf5DbpYAJeru0NY1AU= -github.com/grafana/loki v1.6.2-0.20231211180320-2535f9bedeae/go.mod h1:sZKmF0+xGpEluaT7bfiVbvvior9zY7Nr2sUMPYp0dIE= -github.com/grafana/loki/pkg/push v0.0.0-20231211180320-2535f9bedeae h1:8v1H5D9zFpqv5HPaAesZEkJftZZhjJ7/+snbOkXVsac= -github.com/grafana/loki/pkg/push v0.0.0-20231211180320-2535f9bedeae/go.mod h1:5ll3An1wAxYejo6aM04+3/lc6N4joYVYLY5U+Z4O6vI= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb h1:AWE6+kvtE18HP+lRWNUCyvymyrFSXs6TcS2vXIXGIuw= +github.com/grafana/dskit v0.0.0-20240104111617-ea101a3b86eb/go.mod h1:kkWM4WUV230bNG3urVRWPBnSJHs64y/0RmWjftnnn0c= +github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0 h1:aLBiDMjTtXx2800iCIp+8kdjIlvGX0MF/zICQMQO2qU= +github.com/grafana/gomemcache v0.0.0-20231204155601-7de47a8c3cb0/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= +github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d h1:YwbJJ/PrVWVdnR+j/EAVuazdeP+Za5qbiH1Vlr+wFXs= +github.com/grafana/jsonparser v0.0.0-20240209175146-098958973a2d/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY= +github.com/grafana/loki/pkg/push v0.0.0-20240402204250-824f5aa20aaa h1:WpCBw5tZiBaYG+OSPAqfuTX8U1lkQij/+JdD/ulajKw= +github.com/grafana/loki/pkg/push v0.0.0-20240402204250-824f5aa20aaa/go.mod h1:b0fwVw1GvQyuAoxHa/cywhhl2pn5JYM6zHGex/tshd8= +github.com/grafana/loki/v3 v3.0.0 h1:5heCJG7vV72ONmDdN82l7CUJP8FyLa9fg5AeU/b0tH4= +github.com/grafana/loki/v3 v3.0.0/go.mod h1:ngR+Eqf1gnTpJltNc58go+FsFy+wIYPyTMj/nM9gofs= +github.com/grafana/pyroscope-go/godeltaprof v0.1.6 h1:nEdZ8louGAplSvIJi1HVp7kWvFvdiiYg3COLlTwJiFo= +github.com/grafana/pyroscope-go/godeltaprof v0.1.6/go.mod h1:Tk376Nbldo4Cha9RgiU7ik8WKFkNpfds98aUzS8omLE= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc= -github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo= -github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY= -github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0= +github.com/hashicorp/consul/api v1.28.2 h1:mXfkRHrpHN4YY3RqL09nXU1eHKLNiuAN4kHvDQ16k/8= +github.com/hashicorp/consul/api v1.28.2/go.mod h1:KyzqzgMEya+IZPcD65YFoOVAgPpbfERu4I/tzG6/ueE= +github.com/hashicorp/consul/sdk v0.16.0 h1:SE9m0W6DEfgIVCJX7xU+iv/hUl4m/nxqMTnCdMxDpJ8= +github.com/hashicorp/consul/sdk v0.16.0/go.mod h1:7pxqqhqoaPqnBnzXD1StKed62LqJeClzVsUEy85Zr0A= +github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= +github.com/hashicorp/cronexpr v1.1.2/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= -github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -686,19 +384,21 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= +github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= -github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-sockaddr v1.0.6 h1:RSG8rKU28VTUTvEKghe5gIhIQpv8evvNpnDEyqO4u9I= +github.com/hashicorp/go-sockaddr v1.0.6/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= -github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= @@ -709,20 +409,26 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702 h1:fI1LXuBaS1d9z1kmb++Og6YD8uMRwadXorCwE+xgOFA= +github.com/hashicorp/nomad/api v0.0.0-20240306004928-3e7191ccb702/go.mod h1:z71gkJdrkAt/Rl6C7Q79VE7AwJ5lUF+M+fzFTyIHYB0= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hetznercloud/hcloud-go/v2 v2.6.0 h1:RJOA2hHZ7rD1pScA4O1NF6qhkHyUdbbxjHgFNot8928= +github.com/hetznercloud/hcloud-go/v2 v2.6.0/go.mod h1:4J1cSE57+g0WS93IiHLV7ubTHItcp+awzeBp5bM9mfA= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= -github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/influxdata/influxdb-client-go v1.4.0 h1:+KavOkwhLClHFfYcJMHHnTL5CZQhXJzOm5IKHI9BqJk= github.com/influxdata/influxdb-client-go v1.4.0/go.mod h1:S+oZsPivqbcP1S9ur+T+QqXvrYS3NCZeMQtBoH4D1dw= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 h1:W9WBk7wlPfJLvMCdtV4zPulc4uCPrlywQOmbFOhgQNU= github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo= +github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= +github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= @@ -731,12 +437,16 @@ github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVET github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= -github.com/jcmturner/gokrb5/v8 v8.4.3 h1:iTonLeSJOn7MVUtyMT+arAn5AKAPrkilzhGw8wE/Tq8= -github.com/jcmturner/gokrb5/v8 v8.4.3/go.mod h1:dqRwJGXznQrzw6cWmyo6kH+E7jksEQG/CyVWsJEsJO0= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -755,11 +465,13 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= -github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -768,10 +480,14 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/linode/linodego v1.29.0 h1:gDSQWAbKMAQX8db9FDCXHhodQPrJmLcmthjx6m+PyV4= +github.com/linode/linodego v1.29.0/go.mod h1:3k6WvCM10gillgYcnoLqIL23ST27BD9HhMsCJWb3Bpk= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -789,21 +505,24 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs= +github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -811,16 +530,28 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nqd/flat v0.2.0 h1:g6lXtMxsxrz6PZOO+rNnAJUn/GGRrK4FgVEhy/v+cHI= -github.com/nqd/flat v0.2.0/go.mod h1:FOuslZmNY082wVfVUUb7qAGWKl8z8Nor9FMg+Xj2Nss= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w= @@ -828,20 +559,24 @@ github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NH github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq50AS6wALUMYs= -github.com/oschwald/maxminddb-golang v1.12.0/go.mod h1:q0Nob5lTCqyQ8WT6FYgS1L7PXKVVbgiymefNwIjPzgY= +github.com/oschwald/maxminddb-golang v1.13.0 h1:R8xBorY71s84yO06NgTmQvqvTvlS/bnYZrrWX1MElnU= +github.com/oschwald/maxminddb-golang v1.13.0/go.mod h1:BU0z8BfFVhi1LQaonTwwGQlsHUEu9pWNdMfmq4ztm0o= +github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0= +github.com/ovh/go-ovh v1.4.3/go.mod h1:AkPXVtgwB6xlKblMjRKJJmjRp+ogrE7fz2lVgcQY8SY= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= +github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= -github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -851,25 +586,27 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= -github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= +github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8= +github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.9.1 h1:cNkC01riqiOS+kh3zdnNwRsbe/Blh0WwK3ij5rPJ9Sw= -github.com/prometheus/exporter-toolkit v0.9.1/go.mod h1:iFlTmFISCix0vyuyBmm0UqOUCTao9+RsAsKJP3YM9ec= +github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g= +github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -877,53 +614,61 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/prometheus/prometheus v0.43.1-0.20230419161410-69155c6ba1e9 h1:GrpznPCSJgx8mGGj5qfKoHiou/dVx7uMce9/9rSdiuY= -github.com/prometheus/prometheus v0.43.1-0.20230419161410-69155c6ba1e9/go.mod h1:L8xLODXgpZM57D1MA7SPgsDecKj6ez4AF7mMczR1bis= +github.com/prometheus/prometheus v0.51.0 h1:aRdjTnmHLved29ILtdzZN2GNvOjWATtA/z+3fYuexOc= +github.com/prometheus/prometheus v0.51.0/go.mod h1:yv4MwOn3yHMQ6MZGHPg/U7Fcyqf+rxqiZfSur6myVtc= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/tzsp v0.0.0-20161230003637-8ce729c826b9 h1:upQjqUCvtoYMwHSXn0eGc1lsVJpEi90u3oMjmLKa9ac= github.com/rs/tzsp v0.0.0-20161230003637-8ce729c826b9/go.mod h1:pFz3aQBXB8wqK0Mnt7iOEgcrpRHgpP+1xNnOy7Ok1Bw= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25 h1:/8rfZAdFfafRXOgz+ZpMZZWZ5pYggCY9t7e/BvjaBHM= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.25/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0= github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg= -github.com/sercand/kuberesolver/v4 v4.0.0 h1:frL7laPDG/lFm5n98ODmWnn+cvPpzlkf3LhzuPhcHP4= -github.com/sercand/kuberesolver/v4 v4.0.0/go.mod h1:F4RGyuRmMAjeXHKL+w4P7AwUnPceEAPAhxUgXZjKgvM= +github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY= +github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sony/gobreaker v0.5.0 h1:dRCvqm0P490vZPmy7ppEk2qCnCieBooFJ+YoXGYB+yg= +github.com/sony/gobreaker v0.5.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tinylib/msgp v1.1.9 h1:SHf3yoO2sGA0veCJeCBYLHuttAVFHGm2RHgNodW7wQU= +github.com/tinylib/msgp v1.1.9/go.mod h1:BCXGB54lDD8qUEPmiG0cQQUANC4IUQyB2ItS2UDlO/k= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= @@ -932,10 +677,8 @@ github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6 github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= -github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= -github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= +github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= +github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= @@ -948,6 +691,8 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE= +github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/client/pkg/v3 v3.5.4 h1:lrneYvz923dvC14R54XcA7FXoZ3mlGZAgmwhfm7HqOg= @@ -959,25 +704,30 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go4.org/intern v0.0.0-20211027215823-ae77deb06f29 h1:UXLjNohABv4S58tHmeuIZDO6e3mHpW2Dx33gaNt03LE= go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA= +go4.org/netipx v0.0.0-20230125063823-8449b0a6169f h1:ketMxHg+vWm3yccyYiq+uK8D3fRmna2Fcj+awpQp84s= +go4.org/netipx v0.0.0-20230125063823-8449b0a6169f/go.mod h1:tgPU4N2u9RByaTN3NC2p9xOzyFpte4jYwsIIRF7XlSc= go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 h1:WJhcL4p+YeDxmZWg141nRm7XC8IDmhz7lk5GpadO1Sg= go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= @@ -986,18 +736,16 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1011,8 +759,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw= +golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1026,7 +774,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -1036,15 +783,11 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1077,67 +820,28 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= -golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= +golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1149,12 +853,10 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1195,91 +897,60 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1325,32 +996,18 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= @@ -1371,49 +1028,12 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= -google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= -google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= -google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= -google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= -google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1445,94 +1065,11 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= -google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= -google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= -google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= -google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= -google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= -google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= -google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= -google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= -google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= -google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= -google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= -google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc h1:8DyZCyvI8mE1IdLy/60bS+52xfymkE72wv1asokgtao= -google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= -google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM= -google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= +google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8 h1:8eadJkXbwDEMNwcB5O0s5Y5eCfyuCLdvaiOIaGTrWmQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240304212257-790db918fca8/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 h1:Xs9lu+tLXxLIfuci70nG4cpwaRC+mRQPUL7LoIeDJC4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1547,35 +1084,10 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= -google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= -google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1588,11 +1100,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1603,6 +1112,10 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -1629,8 +1142,26 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6 h1:acCzuUSQ79tGsM/O50VRFySfMm19IoMKL+sZztZkCxw= inet.af/netaddr v0.0.0-20211027220019-c74959edd3b6/go.mod h1:y3MGhcFMlh0KZPMuXXow8mpjxxAk3yoDNsp4cQz54i8= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= +k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/loggers/dnstapclient.go b/loggers/dnstapclient.go deleted file mode 100644 index be4d4a10..00000000 --- a/loggers/dnstapclient.go +++ /dev/null @@ -1,390 +0,0 @@ -package loggers - -import ( - "bufio" - "crypto/tls" - "net" - "strconv" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-framestream" - "github.com/dmachard/go-logger" -) - -type DnstapSender struct { - stopProcess chan bool - doneProcess chan bool - stopRun chan bool - doneRun chan bool - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - fs *framestream.Fstrm - fsReady bool - transport string - transportConn net.Conn - transportReady chan bool - transportReconnect chan bool - name string - RoutingHandler pkgutils.RoutingHandler -} - -func NewDnstapSender(config *pkgconfig.Config, logger *logger.Logger, name string) *DnstapSender { - logger.Info(pkgutils.PrefixLogLogger+"[%s] dnstap - enabled", name) - ds := &DnstapSender{ - stopProcess: make(chan bool), - doneProcess: make(chan bool), - stopRun: make(chan bool), - doneRun: make(chan bool), - inputChan: make(chan dnsutils.DNSMessage, config.Loggers.DNSTap.ChannelBufferSize), - outputChan: make(chan dnsutils.DNSMessage, config.Loggers.DNSTap.ChannelBufferSize), - transportReady: make(chan bool), - transportReconnect: make(chan bool), - logger: logger, - config: config, - configChan: make(chan *pkgconfig.Config), - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - } - - ds.ReadConfig() - return ds -} - -func (ds *DnstapSender) GetName() string { return ds.name } - -func (ds *DnstapSender) AddDroppedRoute(wrk pkgutils.Worker) { - ds.RoutingHandler.AddDroppedRoute(wrk) -} - -func (ds *DnstapSender) AddDefaultRoute(wrk pkgutils.Worker) { - ds.RoutingHandler.AddDefaultRoute(wrk) -} - -func (ds *DnstapSender) SetLoggers(loggers []pkgutils.Worker) {} - -func (ds *DnstapSender) ReadConfig() { - ds.transport = ds.config.Loggers.DNSTap.Transport - - // begin backward compatibility - if ds.config.Loggers.DNSTap.TLSSupport { - ds.transport = netlib.SocketTLS - } - if len(ds.config.Loggers.DNSTap.SockPath) > 0 { - ds.transport = netlib.SocketUnix - } - // end - - // get hostname or global one - if ds.config.Loggers.DNSTap.ServerID == "" { - ds.config.Loggers.DNSTap.ServerID = ds.config.GetServerIdentity() - } - - if !pkgconfig.IsValidTLS(ds.config.Loggers.DNSTap.TLSMinVersion) { - ds.logger.Fatal(pkgutils.PrefixLogLogger + "[" + ds.name + "] dnstap - invalid tls min version") - } -} - -func (ds *DnstapSender) ReloadConfig(config *pkgconfig.Config) { - ds.LogInfo("reload configuration!") - ds.configChan <- config -} - -func (ds *DnstapSender) LogInfo(msg string, v ...interface{}) { - ds.logger.Info(pkgutils.PrefixLogLogger+"["+ds.name+"] dnstap - "+msg, v...) -} - -func (ds *DnstapSender) LogError(msg string, v ...interface{}) { - ds.logger.Error(pkgutils.PrefixLogLogger+"["+ds.name+"] dnstap - "+msg, v...) -} - -func (ds *DnstapSender) GetInputChannel() chan dnsutils.DNSMessage { - return ds.inputChan -} - -func (ds *DnstapSender) Stop() { - ds.LogInfo("stopping logger...") - ds.RoutingHandler.Stop() - - ds.LogInfo("stopping to run...") - ds.stopRun <- true - <-ds.doneRun - - ds.LogInfo("stopping to process...") - ds.stopProcess <- true - <-ds.doneProcess -} - -func (ds *DnstapSender) Disconnect() { - if ds.transportConn != nil { - // reset framestream and ignore errors - ds.LogInfo("closing framestream") - ds.fs.ResetSender() - - // closing tcp - ds.LogInfo("closing tcp connection") - ds.transportConn.Close() - ds.LogInfo("closed") - } -} - -func (ds *DnstapSender) ConnectToRemote() { - for { - if ds.transportConn != nil { - ds.transportConn.Close() - ds.transportConn = nil - } - - address := net.JoinHostPort( - ds.config.Loggers.DNSTap.RemoteAddress, - strconv.Itoa(ds.config.Loggers.DNSTap.RemotePort), - ) - connTimeout := time.Duration(ds.config.Loggers.DNSTap.ConnectTimeout) * time.Second - - // make the connection - var conn net.Conn - var err error - - switch ds.transport { - case netlib.SocketUnix: - address = ds.config.Loggers.DNSTap.RemoteAddress - if len(ds.config.Loggers.DNSTap.SockPath) > 0 { - address = ds.config.Loggers.DNSTap.SockPath - } - ds.LogInfo("connecting to %s://%s", ds.transport, address) - conn, err = net.DialTimeout(ds.transport, address, connTimeout) - - case netlib.SocketTCP: - ds.LogInfo("connecting to %s://%s", ds.transport, address) - conn, err = net.DialTimeout(ds.transport, address, connTimeout) - - case netlib.SocketTLS: - ds.LogInfo("connecting to %s://%s", ds.transport, address) - - var tlsConfig *tls.Config - - tlsOptions := pkgconfig.TLSOptions{ - InsecureSkipVerify: ds.config.Loggers.DNSTap.TLSInsecure, - MinVersion: ds.config.Loggers.DNSTap.TLSMinVersion, - CAFile: ds.config.Loggers.DNSTap.CAFile, - CertFile: ds.config.Loggers.DNSTap.CertFile, - KeyFile: ds.config.Loggers.DNSTap.KeyFile, - } - - tlsConfig, err = pkgconfig.TLSClientConfig(tlsOptions) - if err == nil { - dialer := &net.Dialer{Timeout: connTimeout} - conn, err = tls.DialWithDialer(dialer, netlib.SocketTCP, address, tlsConfig) - } - default: - ds.logger.Fatal("logger=dnstap - invalid transport:", ds.transport) - } - - // something is wrong during connection ? - if err != nil { - ds.LogError("%s", err) - ds.LogInfo("retry to connect in %d seconds", ds.config.Loggers.DNSTap.RetryInterval) - time.Sleep(time.Duration(ds.config.Loggers.DNSTap.RetryInterval) * time.Second) - continue - } - - ds.transportConn = conn - - // block until framestream is ready - ds.transportReady <- true - - // block until an error occurred, need to reconnect - ds.transportReconnect <- true - } -} - -func (ds *DnstapSender) FlushBuffer(buf *[]dnsutils.DNSMessage) { - - var data []byte - var err error - frame := &framestream.Frame{} - - for _, dm := range *buf { - // update identity ? - if ds.config.Loggers.DNSTap.OverwriteIdentity { - dm.DNSTap.Identity = ds.config.Loggers.DNSTap.ServerID - } - - // encode dns message to dnstap protobuf binary - data, err = dm.ToDNSTap(ds.config.Loggers.DNSTap.ExtendedSupport) - if err != nil { - ds.LogError("failed to encode to DNStap protobuf: %s", err) - continue - } - - // send the frame - frame.Write(data) - if err := ds.fs.SendFrame(frame); err != nil { - ds.LogError("send frame error %s", err) - ds.fsReady = false - <-ds.transportReconnect - break - } - } - - // reset buffer - *buf = nil -} - -func (ds *DnstapSender) Run() { - ds.LogInfo("running in background...") - - // prepare next channels - defaultRoutes, defaultNames := ds.RoutingHandler.GetDefaultRoutes() - droppedRoutes, droppedNames := ds.RoutingHandler.GetDroppedRoutes() - - // prepare transforms - listChannel := []chan dnsutils.DNSMessage{} - listChannel = append(listChannel, ds.outputChan) - subprocessors := transformers.NewTransforms(&ds.config.OutgoingTransformers, ds.logger, ds.name, listChannel, 0) - - // goroutine to process transformed dns messages - go ds.Process() - - // init remote conn - go ds.ConnectToRemote() - - // loop to process incoming messages -RUN_LOOP: - for { - select { - case <-ds.stopRun: - // cleanup transformers - subprocessors.Reset() - - ds.doneRun <- true - break RUN_LOOP - - case cfg, opened := <-ds.configChan: - if !opened { - return - } - ds.config = cfg - ds.ReadConfig() - subprocessors.ReloadConfig(&cfg.OutgoingTransformers) - - case dm, opened := <-ds.inputChan: - if !opened { - ds.LogInfo("input channel closed!") - return - } - - // apply tranforms, init dns message with additionnals parts if necessary - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - ds.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - continue - } - - // send to next ? - ds.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - - // send to output channel - ds.outputChan <- dm - } - } - ds.LogInfo("run terminated") -} - -func (ds *DnstapSender) Process() { - // init buffer - bufferDm := []dnsutils.DNSMessage{} - - // init flust timer for buffer - flushInterval := time.Duration(ds.config.Loggers.DNSTap.FlushInterval) * time.Second - flushTimer := time.NewTimer(flushInterval) - - // nextStanzaBufferInterval := 10 * time.Second - // nextStanzaBufferFull := time.NewTimer(nextStanzaBufferInterval) - - ds.LogInfo("ready to process") -PROCESS_LOOP: - for { - select { - case <-ds.stopProcess: - // closing remote connection if exist - ds.Disconnect() - - ds.doneProcess <- true - break PROCESS_LOOP - - // case stanzaName := <-ds.dropped: - // if _, ok := ds.droppedCount[stanzaName]; !ok { - // ds.droppedCount[stanzaName] = 1 - // } else { - // ds.droppedCount[stanzaName]++ - // } - - // init framestream - case <-ds.transportReady: - ds.LogInfo("transport connected with success") - // frame stream library - r := bufio.NewReader(ds.transportConn) - w := bufio.NewWriter(ds.transportConn) - ds.fs = framestream.NewFstrm(r, w, ds.transportConn, 5*time.Second, []byte("protobuf:dnstap.Dnstap"), true) - - // init framestream protocol - if err := ds.fs.InitSender(); err != nil { - ds.LogError("sender protocol initialization error %s", err) - ds.fsReady = false - ds.transportConn.Close() - <-ds.transportReconnect - } else { - ds.fsReady = true - ds.LogInfo("framestream initialized with success") - } - // incoming dns message to process - case dm, opened := <-ds.outputChan: - if !opened { - ds.LogInfo("output channel closed!") - return - } - - // drop dns message if the connection is not ready to avoid memory leak or - // to block the channel - if !ds.fsReady { - continue - } - - // append dns message to buffer - bufferDm = append(bufferDm, dm) - - // buffer is full ? - if len(bufferDm) >= ds.config.Loggers.DNSTap.BufferSize { - ds.FlushBuffer(&bufferDm) - } - - // flush the buffer - case <-flushTimer.C: - // force to flush the buffer - if len(bufferDm) > 0 { - ds.FlushBuffer(&bufferDm) - } - - // restart timer - flushTimer.Reset(flushInterval) - - // case <-nextStanzaBufferFull.C: - // for v, k := range ds.droppedCount { - // if k > 0 { - // ds.LogError("stanza[%s] buffer is full, %d packet(s) dropped", v, k) - // ds.droppedCount[v] = 0 - // } - // } - // nextStanzaBufferFull.Reset(nextStanzaBufferInterval) - } - } - ds.LogInfo("processing terminated") -} diff --git a/loggers/elasticsearch.go b/loggers/elasticsearch.go deleted file mode 100644 index bee06192..00000000 --- a/loggers/elasticsearch.go +++ /dev/null @@ -1,232 +0,0 @@ -package loggers - -import ( - "bytes" - "encoding/json" - "path" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" - - "net/http" - "net/url" -) - -type ElasticSearchClient struct { - stopProcess chan bool - doneProcess chan bool - stopRun chan bool - doneRun chan bool - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - name string - server string - index string - bulkURL string - RoutingHandler pkgutils.RoutingHandler -} - -func NewElasticSearchClient(config *pkgconfig.Config, console *logger.Logger, name string) *ElasticSearchClient { - console.Info(pkgutils.PrefixLogLogger+"[%s] elasticsearch - enabled", name) - ec := &ElasticSearchClient{ - stopProcess: make(chan bool), - doneProcess: make(chan bool), - stopRun: make(chan bool), - doneRun: make(chan bool), - inputChan: make(chan dnsutils.DNSMessage, config.Loggers.ElasticSearchClient.ChannelBufferSize), - outputChan: make(chan dnsutils.DNSMessage, config.Loggers.ElasticSearchClient.ChannelBufferSize), - logger: console, - config: config, - configChan: make(chan *pkgconfig.Config), - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, console, name), - } - ec.ReadConfig() - return ec -} - -func (ec *ElasticSearchClient) GetName() string { return ec.name } - -func (ec *ElasticSearchClient) AddDroppedRoute(wrk pkgutils.Worker) { - ec.RoutingHandler.AddDroppedRoute(wrk) -} - -func (ec *ElasticSearchClient) AddDefaultRoute(wrk pkgutils.Worker) { - ec.RoutingHandler.AddDefaultRoute(wrk) -} - -func (ec *ElasticSearchClient) SetLoggers(loggers []pkgutils.Worker) {} - -func (ec *ElasticSearchClient) ReadConfig() { - ec.server = ec.config.Loggers.ElasticSearchClient.Server - ec.index = ec.config.Loggers.ElasticSearchClient.Index - - u, err := url.Parse(ec.server) - if err != nil { - ec.LogError(err.Error()) - } - u.Path = path.Join(u.Path, ec.index, "_bulk") - ec.bulkURL = u.String() -} - -func (ec *ElasticSearchClient) ReloadConfig(config *pkgconfig.Config) { - ec.LogInfo("reload configuration!") - ec.configChan <- config -} - -func (ec *ElasticSearchClient) GetInputChannel() chan dnsutils.DNSMessage { - return ec.inputChan -} - -func (ec *ElasticSearchClient) LogInfo(msg string, v ...interface{}) { - ec.logger.Info(pkgutils.PrefixLogLogger+"["+ec.name+"] elasticsearch - "+msg, v...) -} - -func (ec *ElasticSearchClient) LogError(msg string, v ...interface{}) { - ec.logger.Error(pkgutils.PrefixLogLogger+"["+ec.name+"] elasticsearch - "+msg, v...) -} - -func (ec *ElasticSearchClient) Stop() { - ec.LogInfo("stopping logger...") - ec.RoutingHandler.Stop() - - ec.LogInfo("stopping to run...") - ec.stopRun <- true - <-ec.doneRun - - ec.LogInfo("stopping to process...") - ec.stopProcess <- true - <-ec.doneProcess -} - -func (ec *ElasticSearchClient) Run() { - ec.LogInfo("running in background...") - - // prepare next channels - defaultRoutes, defaultNames := ec.RoutingHandler.GetDefaultRoutes() - droppedRoutes, droppedNames := ec.RoutingHandler.GetDroppedRoutes() - - // prepare transforms - listChannel := []chan dnsutils.DNSMessage{} - listChannel = append(listChannel, ec.outputChan) - subprocessors := transformers.NewTransforms(&ec.config.OutgoingTransformers, ec.logger, ec.name, listChannel, 0) - - // goroutine to process transformed dns messages - go ec.Process() - - // loop to process incoming messages -RUN_LOOP: - for { - select { - case <-ec.stopRun: - // cleanup transformers - subprocessors.Reset() - - ec.doneRun <- true - break RUN_LOOP - - case cfg, opened := <-ec.configChan: - if !opened { - return - } - ec.config = cfg - ec.ReadConfig() - subprocessors.ReloadConfig(&cfg.OutgoingTransformers) - - case dm, opened := <-ec.inputChan: - if !opened { - ec.LogInfo("input channel closed!") - return - } - - // apply tranforms, init dns message with additionnals parts if necessary - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - ec.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - continue - } - - // send to next ? - ec.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - - // send to output channel - ec.outputChan <- dm - } - } - ec.LogInfo("run terminated") -} - -func (ec *ElasticSearchClient) FlushBuffer(buf *[]dnsutils.DNSMessage) { - buffer := new(bytes.Buffer) - - for _, dm := range *buf { - buffer.WriteString("{ \"create\" : {}}") - buffer.WriteString("\n") - // encode - flat, err := dm.Flatten() - if err != nil { - ec.LogError("flattening DNS message failed: %e", err) - } - json.NewEncoder(buffer).Encode(flat) - } - - req, _ := http.NewRequest("POST", ec.bulkURL, buffer) - req.Header.Set("Content-Type", "application/json") - client := &http.Client{ - Timeout: 5 * time.Second, - } - _, err := client.Do(req) - if err != nil { - ec.LogError(err.Error()) - } - - *buf = nil -} - -func (ec *ElasticSearchClient) Process() { - bufferDm := []dnsutils.DNSMessage{} - ec.LogInfo("ready to process") - - flushInterval := time.Duration(ec.config.Loggers.ElasticSearchClient.FlushInterval) * time.Second - flushTimer := time.NewTimer(flushInterval) - -PROCESS_LOOP: - for { - select { - case <-ec.stopProcess: - ec.doneProcess <- true - break PROCESS_LOOP - - // incoming dns message to process - case dm, opened := <-ec.outputChan: - if !opened { - ec.LogInfo("output channel closed!") - return - } - - // append dns message to buffer - bufferDm = append(bufferDm, dm) - - // buffer is full ? - if len(bufferDm) >= ec.config.Loggers.ElasticSearchClient.BulkSize { - ec.FlushBuffer(&bufferDm) - } - // flush the buffer - case <-flushTimer.C: - if len(bufferDm) > 0 { - ec.FlushBuffer(&bufferDm) - } - - // restart timer - flushTimer.Reset(flushInterval) - } - } - ec.LogInfo("processing terminated") -} diff --git a/loggers/falco.go b/loggers/falco.go deleted file mode 100644 index 7162c910..00000000 --- a/loggers/falco.go +++ /dev/null @@ -1,189 +0,0 @@ -package loggers - -import ( - "bytes" - "encoding/json" - "net/http" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" -) - -type FalcoClient struct { - stopProcess chan bool - doneProcess chan bool - stopRun chan bool - doneRun chan bool - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - name string - url string - RoutingHandler pkgutils.RoutingHandler -} - -func NewFalcoClient(config *pkgconfig.Config, console *logger.Logger, name string) *FalcoClient { - console.Info(pkgutils.PrefixLogLogger+"[%s] falco - enabled", name) - fc := &FalcoClient{ - stopProcess: make(chan bool), - doneProcess: make(chan bool), - stopRun: make(chan bool), - doneRun: make(chan bool), - inputChan: make(chan dnsutils.DNSMessage, config.Loggers.FalcoClient.ChannelBufferSize), - outputChan: make(chan dnsutils.DNSMessage, config.Loggers.FalcoClient.ChannelBufferSize), - logger: console, - config: config, - configChan: make(chan *pkgconfig.Config), - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, console, name), - } - fc.ReadConfig() - return fc -} - -func (fc *FalcoClient) GetName() string { return fc.name } - -func (fc *FalcoClient) AddDroppedRoute(wrk pkgutils.Worker) { - fc.RoutingHandler.AddDroppedRoute(wrk) -} - -func (fc *FalcoClient) AddDefaultRoute(wrk pkgutils.Worker) { - fc.RoutingHandler.AddDefaultRoute(wrk) -} - -func (fc *FalcoClient) SetLoggers(loggers []pkgutils.Worker) {} - -func (fc *FalcoClient) ReadConfig() { - fc.url = fc.config.Loggers.FalcoClient.URL -} - -func (fc *FalcoClient) ReloadConfig(config *pkgconfig.Config) { - fc.LogInfo("reload configuration!") - fc.configChan <- config -} - -func (fc *FalcoClient) GetInputChannel() chan dnsutils.DNSMessage { - return fc.inputChan -} - -func (fc *FalcoClient) LogInfo(msg string, v ...interface{}) { - fc.logger.Info(pkgutils.PrefixLogLogger+"["+fc.name+"] falco - "+msg, v...) -} - -func (fc *FalcoClient) LogError(msg string, v ...interface{}) { - fc.logger.Error(pkgutils.PrefixLogLogger+"["+fc.name+"] falco - "+msg, v...) -} - -func (fc *FalcoClient) Stop() { - fc.LogInfo("stopping logger...") - fc.RoutingHandler.Stop() - - fc.LogInfo("stopping to run...") - fc.stopRun <- true - <-fc.doneRun - - fc.LogInfo("stopping to process...") - fc.stopProcess <- true - <-fc.doneProcess -} - -func (fc *FalcoClient) Run() { - fc.LogInfo("running in background...") - - // prepare next channels - defaultRoutes, defaultNames := fc.RoutingHandler.GetDefaultRoutes() - droppedRoutes, droppedNames := fc.RoutingHandler.GetDroppedRoutes() - - // prepare transforms - listChannel := []chan dnsutils.DNSMessage{} - listChannel = append(listChannel, fc.outputChan) - subprocessors := transformers.NewTransforms(&fc.config.OutgoingTransformers, fc.logger, fc.name, listChannel, 0) - - // goroutine to process transformed dns messages - go fc.Process() - - // loop to process incoming messages -RUN_LOOP: - for { - select { - case <-fc.stopRun: - // cleanup transformers - subprocessors.Reset() - - fc.doneRun <- true - break RUN_LOOP - - case cfg, opened := <-fc.configChan: - if !opened { - return - } - fc.config = cfg - fc.ReadConfig() - subprocessors.ReloadConfig(&cfg.OutgoingTransformers) - - case dm, opened := <-fc.inputChan: - if !opened { - fc.LogInfo("input channel closed!") - return - } - - // apply tranforms, init dns message with additionnals parts if necessary - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - fc.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - continue - } - - // send to next ? - fc.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - - // send to output channel - fc.outputChan <- dm - } - } - fc.LogInfo("run terminated") -} - -func (fc *FalcoClient) Process() { - buffer := new(bytes.Buffer) - fc.LogInfo("ready to process") - -PROCESS_LOOP: - for { - select { - case <-fc.stopProcess: - fc.doneProcess <- true - break PROCESS_LOOP - - // incoming dns message to process - case dm, opened := <-fc.outputChan: - if !opened { - fc.LogInfo("output channel closed!") - return - } - - // encode - json.NewEncoder(buffer).Encode(dm) - - req, _ := http.NewRequest("POST", fc.url, buffer) - req.Header.Set("Content-Type", "application/json") - client := &http.Client{ - Timeout: 5 * time.Second, - } - _, err := client.Do(req) - if err != nil { - fc.LogError(err.Error()) - } - - // finally reset the buffer for next iter - buffer.Reset() - } - } - fc.LogInfo("processing terminated") -} diff --git a/loggers/fluentd.go b/loggers/fluentd.go deleted file mode 100644 index ccd31512..00000000 --- a/loggers/fluentd.go +++ /dev/null @@ -1,376 +0,0 @@ -package loggers - -import ( - "crypto/tls" - "errors" - "io" - "net" - "strconv" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" - "github.com/vmihailenco/msgpack" -) - -type FluentdClient struct { - stopProcess chan bool - doneProcess chan bool - stopRun chan bool - doneRun chan bool - stopRead chan bool - doneRead chan bool - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - transport string - transportConn net.Conn - transportReady chan bool - transportReconnect chan bool - writerReady bool - name string - RoutingHandler pkgutils.RoutingHandler -} - -func NewFluentdClient(config *pkgconfig.Config, logger *logger.Logger, name string) *FluentdClient { - logger.Info(pkgutils.PrefixLogLogger+"[%s] fluentd - enabled", name) - fc := &FluentdClient{ - stopProcess: make(chan bool), - doneProcess: make(chan bool), - stopRun: make(chan bool), - doneRun: make(chan bool), - stopRead: make(chan bool), - doneRead: make(chan bool), - inputChan: make(chan dnsutils.DNSMessage, config.Loggers.Fluentd.ChannelBufferSize), - outputChan: make(chan dnsutils.DNSMessage, config.Loggers.Fluentd.ChannelBufferSize), - transportReady: make(chan bool), - transportReconnect: make(chan bool), - logger: logger, - config: config, - configChan: make(chan *pkgconfig.Config), - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - } - - fc.ReadConfig() - return fc -} - -func (fc *FluentdClient) GetName() string { return fc.name } - -func (fc *FluentdClient) AddDroppedRoute(wrk pkgutils.Worker) { - fc.RoutingHandler.AddDroppedRoute(wrk) -} - -func (fc *FluentdClient) AddDefaultRoute(wrk pkgutils.Worker) { - fc.RoutingHandler.AddDefaultRoute(wrk) -} - -func (fc *FluentdClient) SetLoggers(loggers []pkgutils.Worker) {} - -func (fc *FluentdClient) ReadConfig() { - fc.transport = fc.config.Loggers.Fluentd.Transport - - // begin backward compatibility - if fc.config.Loggers.Fluentd.TLSSupport { - fc.transport = netlib.SocketTLS - } - if len(fc.config.Loggers.Fluentd.SockPath) > 0 { - fc.transport = netlib.SocketUnix - } -} - -func (fc *FluentdClient) ReloadConfig(config *pkgconfig.Config) { - fc.LogInfo("reload configuration!") - fc.configChan <- config -} - -func (fc *FluentdClient) LogInfo(msg string, v ...interface{}) { - fc.logger.Info(pkgutils.PrefixLogLogger+"["+fc.name+"] fluentd - "+msg, v...) -} - -func (fc *FluentdClient) LogError(msg string, v ...interface{}) { - fc.logger.Error(pkgutils.PrefixLogLogger+"["+fc.name+"] fluentd - "+msg, v...) -} - -func (fc *FluentdClient) GetInputChannel() chan dnsutils.DNSMessage { - return fc.inputChan -} - -func (fc *FluentdClient) Stop() { - fc.LogInfo("stopping logger...") - fc.RoutingHandler.Stop() - - fc.LogInfo("stopping to run...") - fc.stopRun <- true - <-fc.doneRun - - fc.LogInfo("stopping to read...") - fc.stopRead <- true - <-fc.doneRead - - fc.LogInfo("stopping to process...") - fc.stopProcess <- true - <-fc.doneProcess -} - -func (fc *FluentdClient) Disconnect() { - if fc.transportConn != nil { - fc.LogInfo("closing tcp connection") - fc.transportConn.Close() - } -} - -func (fc *FluentdClient) ReadFromConnection() { - buffer := make([]byte, 4096) - - go func() { - for { - _, err := fc.transportConn.Read(buffer) - if err != nil { - if errors.Is(err, io.EOF) || errors.Is(err, net.ErrClosed) { - fc.LogInfo("read from connection terminated") - break - } - fc.LogError("Error on reading: %s", err.Error()) - } - // We just discard the data - } - }() - - // block goroutine until receive true event in stopRead channel - <-fc.stopRead - fc.doneRead <- true - - fc.LogInfo("read goroutine terminated") -} - -func (fc *FluentdClient) ConnectToRemote() { - for { - if fc.transportConn != nil { - fc.transportConn.Close() - fc.transportConn = nil - } - - address := fc.config.Loggers.Fluentd.RemoteAddress + ":" + strconv.Itoa(fc.config.Loggers.Fluentd.RemotePort) - connTimeout := time.Duration(fc.config.Loggers.Fluentd.ConnectTimeout) * time.Second - - // make the connection - var conn net.Conn - var err error - - switch fc.transport { - case netlib.SocketUnix: - address = fc.config.Loggers.Fluentd.RemoteAddress - if len(fc.config.Loggers.Fluentd.SockPath) > 0 { - address = fc.config.Loggers.Fluentd.SockPath - } - fc.LogInfo("connecting to %s://%s", fc.transport, address) - conn, err = net.DialTimeout(fc.transport, address, connTimeout) - - case netlib.SocketTCP: - fc.LogInfo("connecting to %s://%s", fc.transport, address) - conn, err = net.DialTimeout(fc.transport, address, connTimeout) - - case netlib.SocketTLS: - fc.LogInfo("connecting to %s://%s", fc.transport, address) - - var tlsConfig *tls.Config - - tlsOptions := pkgconfig.TLSOptions{ - InsecureSkipVerify: fc.config.Loggers.Fluentd.TLSInsecure, - MinVersion: fc.config.Loggers.Fluentd.TLSMinVersion, - CAFile: fc.config.Loggers.Fluentd.CAFile, - CertFile: fc.config.Loggers.Fluentd.CertFile, - KeyFile: fc.config.Loggers.Fluentd.KeyFile, - } - - tlsConfig, err = pkgconfig.TLSClientConfig(tlsOptions) - if err == nil { - dialer := &net.Dialer{Timeout: connTimeout} - conn, err = tls.DialWithDialer(dialer, netlib.SocketTCP, address, tlsConfig) - } - default: - fc.logger.Fatal("logger=fluent - invalid transport:", fc.transport) - } - - // something is wrong during connection ? - if err != nil { - fc.LogError("connect error: %s", err) - fc.LogInfo("retry to connect in %d seconds", fc.config.Loggers.Fluentd.RetryInterval) - time.Sleep(time.Duration(fc.config.Loggers.Fluentd.RetryInterval) * time.Second) - continue - } - - fc.transportConn = conn - - // block until framestream is ready - fc.transportReady <- true - - // block until an error occurred, need to reconnect - fc.transportReconnect <- true - } -} - -func (fc *FluentdClient) FlushBuffer(buf *[]dnsutils.DNSMessage) { - - tag, _ := msgpack.Marshal(fc.config.Loggers.Fluentd.Tag) - - for _, dm := range *buf { - // prepare event - tm, _ := msgpack.Marshal(dm.DNSTap.TimeSec) - record, err := msgpack.Marshal(dm) - if err != nil { - fc.LogError("msgpack error:", err.Error()) - continue - } - - // Message ::= [ Tag, Time, Record, Option? ] - encoded := []byte{} - // array, size 3 - encoded = append(encoded, 0x93) - // append tag, time and record - encoded = append(encoded, tag...) - encoded = append(encoded, tm...) - encoded = append(encoded, record...) - - // write event message - _, err = fc.transportConn.Write(encoded) - - // flusth the buffer - if err != nil { - fc.LogError("send transport error", err.Error()) - fc.writerReady = false - <-fc.transportReconnect - break - } - } -} - -func (fc *FluentdClient) Run() { - fc.LogInfo("running in background...") - - // prepare next channels - defaultRoutes, defaultNames := fc.RoutingHandler.GetDefaultRoutes() - droppedRoutes, droppedNames := fc.RoutingHandler.GetDroppedRoutes() - - // prepare transforms - listChannel := []chan dnsutils.DNSMessage{} - listChannel = append(listChannel, fc.outputChan) - subprocessors := transformers.NewTransforms(&fc.config.OutgoingTransformers, fc.logger, fc.name, listChannel, 0) - - // goroutine to process transformed dns messages - go fc.Process() - - // init remote conn - go fc.ConnectToRemote() - - // loop to process incoming messages -RUN_LOOP: - for { - select { - case <-fc.stopRun: - // cleanup transformers - subprocessors.Reset() - - fc.doneRun <- true - break RUN_LOOP - - case cfg, opened := <-fc.configChan: - if !opened { - return - } - fc.config = cfg - fc.ReadConfig() - subprocessors.ReloadConfig(&cfg.OutgoingTransformers) - - case dm, opened := <-fc.inputChan: - if !opened { - fc.LogInfo("input channel closed!") - return - } - - // apply tranforms, init dns message with additionnals parts if necessary - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - fc.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - continue - } - - // send to next ? - fc.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - - // send to output channel - fc.outputChan <- dm - } - } - fc.LogInfo("run terminated") -} - -func (fc *FluentdClient) Process() { - // init buffer - bufferDm := []dnsutils.DNSMessage{} - - // init flust timer for buffer - flushInterval := time.Duration(fc.config.Loggers.Fluentd.FlushInterval) * time.Second - flushTimer := time.NewTimer(flushInterval) - - fc.LogInfo("ready to process") - -PROCESS_LOOP: - for { - select { - case <-fc.stopProcess: - fc.doneProcess <- true - break PROCESS_LOOP - - case <-fc.transportReady: - fc.LogInfo("connected") - fc.writerReady = true - - // read from the connection until we stop - go fc.ReadFromConnection() - - // incoming dns message to process - case dm, opened := <-fc.outputChan: - if !opened { - fc.LogInfo("output channel closed!") - return - } - - // drop dns message if the connection is not ready to avoid memory leak or - // to block the channel - if !fc.writerReady { - continue - } - - // append dns message to buffer - bufferDm = append(bufferDm, dm) - - // buffer is full ? - if len(bufferDm) >= fc.config.Loggers.Fluentd.BufferSize { - fc.FlushBuffer(&bufferDm) - } - - // flush the buffer - case <-flushTimer.C: - if !fc.writerReady { - bufferDm = nil - } - - if len(bufferDm) > 0 { - fc.FlushBuffer(&bufferDm) - } - - // restart timer - flushTimer.Reset(flushInterval) - } - } - fc.LogInfo("processing terminated") -} diff --git a/loggers/fluentd_test.go b/loggers/fluentd_test.go deleted file mode 100644 index 1afe8a7d..00000000 --- a/loggers/fluentd_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package loggers - -import ( - "net" - "testing" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-logger" - "github.com/vmihailenco/msgpack" -) - -func Test_FluentdClient(t *testing.T) { - testcases := []struct { - transport string - address string - }{ - { - transport: netlib.SocketTCP, - address: ":24224", - }, - } - for _, tc := range testcases { - t.Run(tc.transport, func(t *testing.T) { - // init logger - cfg := pkgconfig.GetFakeConfig() - cfg.Loggers.Fluentd.FlushInterval = 1 - cfg.Loggers.Fluentd.BufferSize = 0 - g := NewFluentdClient(cfg, logger.New(false), "test") - - // fake msgpack receiver - fakeRcvr, err := net.Listen(tc.transport, tc.address) - if err != nil { - t.Fatal(err) - } - defer fakeRcvr.Close() - - // start the logger - go g.Run() - - // accept conn from logger - conn, err := fakeRcvr.Accept() - if err != nil { - return - } - defer conn.Close() - - // send fake dns message to logger - time.Sleep(time.Second) - dm := dnsutils.GetFakeDNSMessage() - g.GetInputChannel() <- dm - - // read data on fake server side - buf := make([]byte, 4096) - _, err = conn.Read(buf) - if err != nil { - t.Errorf("error to read msgpack: %s", err) - } - - // unpack msgpack - var dmRcv dnsutils.DNSMessage - err = msgpack.Unmarshal(buf[24:], &dmRcv) - if err != nil { - t.Errorf("error to unpack msgpack: %s", err) - } - if dm.DNS.Qname != dmRcv.DNS.Qname { - t.Errorf("qname error want %s, got %s", dm.DNS.Qname, dmRcv.DNS.Qname) - } - - // stop all - fakeRcvr.Close() - g.Stop() - }) - } -} diff --git a/loggers/influxdb.go b/loggers/influxdb.go deleted file mode 100644 index 724b4f91..00000000 --- a/loggers/influxdb.go +++ /dev/null @@ -1,224 +0,0 @@ -package loggers - -import ( - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" - - influxdb2 "github.com/influxdata/influxdb-client-go" - "github.com/influxdata/influxdb-client-go/api" -) - -type InfluxDBClient struct { - stopProcess chan bool - doneProcess chan bool - stopRun chan bool - doneRun chan bool - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - influxdbConn influxdb2.Client - writeAPI api.WriteAPI - name string - RoutingHandler pkgutils.RoutingHandler -} - -func NewInfluxDBClient(config *pkgconfig.Config, logger *logger.Logger, name string) *InfluxDBClient { - logger.Info(pkgutils.PrefixLogLogger+"[%s] influxdb - enabled", name) - - ic := &InfluxDBClient{ - stopProcess: make(chan bool), - doneProcess: make(chan bool), - stopRun: make(chan bool), - doneRun: make(chan bool), - inputChan: make(chan dnsutils.DNSMessage, config.Loggers.InfluxDB.ChannelBufferSize), - outputChan: make(chan dnsutils.DNSMessage, config.Loggers.InfluxDB.ChannelBufferSize), - logger: logger, - config: config, - configChan: make(chan *pkgconfig.Config), - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - } - - ic.ReadConfig() - - return ic -} - -func (ic *InfluxDBClient) GetName() string { return ic.name } - -func (ic *InfluxDBClient) AddDroppedRoute(wrk pkgutils.Worker) { - ic.RoutingHandler.AddDroppedRoute(wrk) -} - -func (ic *InfluxDBClient) AddDefaultRoute(wrk pkgutils.Worker) { - ic.RoutingHandler.AddDefaultRoute(wrk) -} - -func (ic *InfluxDBClient) SetLoggers(loggers []pkgutils.Worker) {} - -func (ic *InfluxDBClient) ReadConfig() {} - -func (ic *InfluxDBClient) ReloadConfig(config *pkgconfig.Config) { - ic.LogInfo("reload configuration!") - ic.configChan <- config -} - -func (ic *InfluxDBClient) LogInfo(msg string, v ...interface{}) { - ic.logger.Info(pkgutils.PrefixLogLogger+"["+ic.name+"] influxdb - "+msg, v...) -} - -func (ic *InfluxDBClient) LogError(msg string, v ...interface{}) { - ic.logger.Error(pkgutils.PrefixLogLogger+"["+ic.name+"] influxdb - "+msg, v...) -} - -func (ic *InfluxDBClient) GetInputChannel() chan dnsutils.DNSMessage { - return ic.inputChan -} - -func (ic *InfluxDBClient) Stop() { - ic.LogInfo("stopping logger...") - ic.RoutingHandler.Stop() - - ic.LogInfo("stopping to run...") - ic.stopRun <- true - <-ic.doneRun - - ic.LogInfo("stopping to process...") - ic.stopProcess <- true - <-ic.doneProcess -} - -func (ic *InfluxDBClient) Run() { - ic.LogInfo("running in background...") - - // prepare next channels - defaultRoutes, defaultNames := ic.RoutingHandler.GetDefaultRoutes() - droppedRoutes, droppedNames := ic.RoutingHandler.GetDroppedRoutes() - - // prepare transforms - listChannel := []chan dnsutils.DNSMessage{} - listChannel = append(listChannel, ic.outputChan) - subprocessors := transformers.NewTransforms(&ic.config.OutgoingTransformers, ic.logger, ic.name, listChannel, 0) - - // goroutine to process transformed dns messages - go ic.Process() - - // loop to process incoming messages -RUN_LOOP: - for { - select { - case <-ic.stopRun: - // cleanup transformers - subprocessors.Reset() - - ic.doneRun <- true - break RUN_LOOP - - case cfg, opened := <-ic.configChan: - if !opened { - return - } - ic.config = cfg - ic.ReadConfig() - subprocessors.ReloadConfig(&cfg.OutgoingTransformers) - - case dm, opened := <-ic.inputChan: - if !opened { - ic.LogInfo("input channel closed!") - return - } - - // apply tranforms, init dns message with additionnals parts if necessary - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - ic.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - continue - } - - // send to next ? - ic.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - - // send to output channel - ic.outputChan <- dm - } - } - ic.LogInfo("run terminated") -} - -func (ic *InfluxDBClient) Process() { - // prepare options for influxdb - opts := influxdb2.DefaultOptions() - opts.SetUseGZip(true) - if ic.config.Loggers.InfluxDB.TLSSupport { - tlsOptions := pkgconfig.TLSOptions{ - InsecureSkipVerify: ic.config.Loggers.InfluxDB.TLSInsecure, - MinVersion: ic.config.Loggers.InfluxDB.TLSMinVersion, - CAFile: ic.config.Loggers.InfluxDB.CAFile, - CertFile: ic.config.Loggers.InfluxDB.CertFile, - KeyFile: ic.config.Loggers.InfluxDB.KeyFile, - } - - tlsConfig, err := pkgconfig.TLSClientConfig(tlsOptions) - if err != nil { - ic.logger.Fatal("logger=influxdb - tls config failed:", err) - } - - opts.SetTLSConfig(tlsConfig) - } - // init the client - influxClient := influxdb2.NewClientWithOptions( - ic.config.Loggers.InfluxDB.ServerURL, - ic.config.Loggers.InfluxDB.AuthToken, - opts, - ) - - writeAPI := influxClient.WriteAPI( - ic.config.Loggers.InfluxDB.Organization, - ic.config.Loggers.InfluxDB.Bucket, - ) - - ic.influxdbConn = influxClient - ic.writeAPI = writeAPI - - ic.LogInfo("ready to process") -PROCESS_LOOP: - for { - select { - case <-ic.stopProcess: - // Force all unwritten data to be sent - ic.writeAPI.Flush() - // Ensures background processes finishes - ic.influxdbConn.Close() - ic.doneProcess <- true - break PROCESS_LOOP - // incoming dns message to process - case dm, opened := <-ic.outputChan: - if !opened { - ic.LogInfo("output channel closed!") - return - } - - p := influxdb2.NewPointWithMeasurement("dns"). - AddTag("Identity", dm.DNSTap.Identity). - AddTag("QueryIP", dm.NetworkInfo.QueryIP). - AddTag("Qname", dm.DNS.Qname). - AddField("Operation", dm.DNSTap.Operation). - AddField("Family", dm.NetworkInfo.Family). - AddField("Protocol", dm.NetworkInfo.Protocol). - AddField("Qtype", dm.DNS.Qtype). - AddField("Rcode", dm.DNS.Rcode). - SetTime(time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec))) - - // write asynchronously - ic.writeAPI.WritePoint(p) - } - } - ic.LogInfo("processing terminated") -} diff --git a/loggers/kafkaproducer.go b/loggers/kafkaproducer.go deleted file mode 100644 index 8c28dd66..00000000 --- a/loggers/kafkaproducer.go +++ /dev/null @@ -1,395 +0,0 @@ -package loggers - -import ( - "bytes" - "context" - "encoding/json" - "log" - "strconv" - "strings" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" - "github.com/segmentio/kafka-go" - "github.com/segmentio/kafka-go/compress" - "github.com/segmentio/kafka-go/sasl/plain" - "github.com/segmentio/kafka-go/sasl/scram" -) - -type KafkaProducer struct { - stopProcess chan bool - doneProcess chan bool - stopRun chan bool - doneRun chan bool - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - textFormat []string - name string - kafkaConn *kafka.Conn - kafkaReady chan bool - kafkaReconnect chan bool - kafkaConnected bool - compressCodec compress.Codec - RoutingHandler pkgutils.RoutingHandler -} - -func NewKafkaProducer(config *pkgconfig.Config, logger *logger.Logger, name string) *KafkaProducer { - logger.Info(pkgutils.PrefixLogLogger+"[%s] kafka - enabled", name) - k := &KafkaProducer{ - stopProcess: make(chan bool), - doneProcess: make(chan bool), - stopRun: make(chan bool), - doneRun: make(chan bool), - inputChan: make(chan dnsutils.DNSMessage, config.Loggers.KafkaProducer.ChannelBufferSize), - outputChan: make(chan dnsutils.DNSMessage, config.Loggers.KafkaProducer.ChannelBufferSize), - logger: logger, - config: config, - configChan: make(chan *pkgconfig.Config), - kafkaReady: make(chan bool), - kafkaReconnect: make(chan bool), - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - } - - k.ReadConfig() - return k -} - -func (k *KafkaProducer) GetName() string { return k.name } - -func (k *KafkaProducer) AddDroppedRoute(wrk pkgutils.Worker) { - k.RoutingHandler.AddDroppedRoute(wrk) -} - -func (k *KafkaProducer) AddDefaultRoute(wrk pkgutils.Worker) { - k.RoutingHandler.AddDefaultRoute(wrk) -} - -func (k *KafkaProducer) SetLoggers(loggers []pkgutils.Worker) {} - -func (k *KafkaProducer) ReadConfig() { - if len(k.config.Loggers.RedisPub.TextFormat) > 0 { - k.textFormat = strings.Fields(k.config.Loggers.RedisPub.TextFormat) - } else { - k.textFormat = strings.Fields(k.config.Global.TextFormat) - } - - if k.config.Loggers.KafkaProducer.Compression != pkgconfig.CompressNone { - switch k.config.Loggers.KafkaProducer.Compression { - case pkgconfig.CompressGzip: - k.compressCodec = &compress.GzipCodec - case pkgconfig.CompressLz4: - k.compressCodec = &compress.Lz4Codec - case pkgconfig.CompressSnappy: - k.compressCodec = &compress.SnappyCodec - case pkgconfig.CompressZstd: - k.compressCodec = &compress.ZstdCodec - case pkgconfig.CompressNone: - k.compressCodec = nil - default: - log.Fatal(pkgutils.PrefixLogLogger+"["+k.name+"] kafka - invalid compress mode: ", k.config.Loggers.KafkaProducer.Compression) - } - } -} - -func (k *KafkaProducer) ReloadConfig(config *pkgconfig.Config) { - k.LogInfo("reload configuration!") - k.configChan <- config -} - -func (k *KafkaProducer) LogInfo(msg string, v ...interface{}) { - k.logger.Info(pkgutils.PrefixLogLogger+"["+k.name+"] kafka - "+msg, v...) -} - -func (k *KafkaProducer) LogError(msg string, v ...interface{}) { - k.logger.Error(pkgutils.PrefixLogLogger+"["+k.name+"] kafka - "+msg, v...) -} - -func (k *KafkaProducer) GetInputChannel() chan dnsutils.DNSMessage { - return k.inputChan -} - -func (k *KafkaProducer) Stop() { - k.LogInfo("stopping logger...") - k.RoutingHandler.Stop() - - k.LogInfo("stopping to run...") - k.stopRun <- true - <-k.doneRun - - k.LogInfo("stopping to process...") - k.stopProcess <- true - <-k.doneProcess -} - -func (k *KafkaProducer) Disconnect() { - if k.kafkaConn != nil { - k.LogInfo("closing connection") - k.kafkaConn.Close() - } -} - -func (k *KafkaProducer) ConnectToKafka(ctx context.Context, readyTimer *time.Timer) { - for { - readyTimer.Reset(time.Duration(10) * time.Second) - - if k.kafkaConn != nil { - k.kafkaConn.Close() - k.kafkaConn = nil - } - - topic := k.config.Loggers.KafkaProducer.Topic - partition := k.config.Loggers.KafkaProducer.Partition - address := k.config.Loggers.KafkaProducer.RemoteAddress + ":" + strconv.Itoa(k.config.Loggers.KafkaProducer.RemotePort) - - k.LogInfo("connecting to kafka=%s partition=%d topic=%s", address, partition, topic) - - dialer := &kafka.Dialer{ - Timeout: time.Duration(k.config.Loggers.KafkaProducer.ConnectTimeout) * time.Second, - Deadline: time.Now().Add(5 * time.Second), - DualStack: true, - } - - // enable TLS - if k.config.Loggers.KafkaProducer.TLSSupport { - tlsOptions := pkgconfig.TLSOptions{ - InsecureSkipVerify: k.config.Loggers.KafkaProducer.TLSInsecure, - MinVersion: k.config.Loggers.KafkaProducer.TLSMinVersion, - CAFile: k.config.Loggers.KafkaProducer.CAFile, - CertFile: k.config.Loggers.KafkaProducer.CertFile, - KeyFile: k.config.Loggers.KafkaProducer.KeyFile, - } - - tlsConfig, err := pkgconfig.TLSClientConfig(tlsOptions) - if err != nil { - k.logger.Fatal("logger=kafka - tls config failed:", err) - } - dialer.TLS = tlsConfig - } - - // SASL Support - if k.config.Loggers.KafkaProducer.SaslSupport { - switch k.config.Loggers.KafkaProducer.SaslMechanism { - case pkgconfig.SASLMechanismPlain: - mechanism := plain.Mechanism{ - Username: k.config.Loggers.KafkaProducer.SaslUsername, - Password: k.config.Loggers.KafkaProducer.SaslPassword, - } - dialer.SASLMechanism = mechanism - case pkgconfig.SASLMechanismScram: - mechanism, err := scram.Mechanism( - scram.SHA512, - k.config.Loggers.KafkaProducer.SaslUsername, - k.config.Loggers.KafkaProducer.SaslPassword, - ) - if err != nil { - panic(err) - } - dialer.SASLMechanism = mechanism - } - - } - - // connect - conn, err := dialer.DialLeader(ctx, "tcp", address, topic, partition) - if err != nil { - k.LogError("%s", err) - k.LogInfo("retry to connect in %d seconds", k.config.Loggers.KafkaProducer.RetryInterval) - time.Sleep(time.Duration(k.config.Loggers.KafkaProducer.RetryInterval) * time.Second) - continue - } - - k.kafkaConn = conn - - // block until is ready - k.kafkaReady <- true - k.kafkaReconnect <- true - } -} - -func (k *KafkaProducer) FlushBuffer(buf *[]dnsutils.DNSMessage) { - msgs := []kafka.Message{} - buffer := new(bytes.Buffer) - strDm := "" - - for _, dm := range *buf { - switch k.config.Loggers.KafkaProducer.Mode { - case pkgconfig.ModeText: - strDm = dm.String(k.textFormat, k.config.Global.TextFormatDelimiter, k.config.Global.TextFormatBoundary) - case pkgconfig.ModeJSON: - json.NewEncoder(buffer).Encode(dm) - strDm = buffer.String() - buffer.Reset() - case pkgconfig.ModeFlatJSON: - flat, err := dm.Flatten() - if err != nil { - k.LogError("flattening DNS message failed: %e", err) - } - json.NewEncoder(buffer).Encode(flat) - strDm = buffer.String() - buffer.Reset() - } - - msg := kafka.Message{ - Key: []byte(dm.DNSTap.Identity), - Value: []byte(strDm), - } - msgs = append(msgs, msg) - - } - - // add support for msg compression - var err error - if k.config.Loggers.KafkaProducer.Compression == pkgconfig.CompressNone { - _, err = k.kafkaConn.WriteMessages(msgs...) - } else { - _, err = k.kafkaConn.WriteCompressedMessages(k.compressCodec, msgs...) - } - - if err != nil { - k.LogError("unable to write message", err.Error()) - k.kafkaConnected = false - <-k.kafkaReconnect - } - - // reset buffer - *buf = nil -} - -func (k *KafkaProducer) Run() { - k.LogInfo("running in background...") - - // prepare next channels - defaultRoutes, defaultNames := k.RoutingHandler.GetDefaultRoutes() - droppedRoutes, droppedNames := k.RoutingHandler.GetDroppedRoutes() - - // prepare transforms - listChannel := []chan dnsutils.DNSMessage{} - listChannel = append(listChannel, k.outputChan) - subprocessors := transformers.NewTransforms(&k.config.OutgoingTransformers, k.logger, k.name, listChannel, 0) - - // goroutine to process transformed dns messages - go k.Process() - - // loop to process incoming messages -RUN_LOOP: - for { - select { - case <-k.stopRun: - // cleanup transformers - subprocessors.Reset() - - k.doneRun <- true - break RUN_LOOP - - case cfg, opened := <-k.configChan: - if !opened { - return - } - k.config = cfg - k.ReadConfig() - subprocessors.ReloadConfig(&cfg.OutgoingTransformers) - - case dm, opened := <-k.inputChan: - if !opened { - k.LogInfo("input channel closed!") - return - } - - // apply tranforms, init dns message with additionnals parts if necessary - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - k.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - continue - } - - // send to next ? - k.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - - // send to output channel - k.outputChan <- dm - } - } - k.LogInfo("run terminated") -} - -func (k *KafkaProducer) Process() { - ctx, cancelKafka := context.WithCancel(context.Background()) - defer cancelKafka() // Libérez les ressources liées au contexte - - // init buffer - bufferDm := []dnsutils.DNSMessage{} - - // init flust timer for buffer - readyTimer := time.NewTimer(time.Duration(10) * time.Second) - - // init flust timer for buffer - flushInterval := time.Duration(k.config.Loggers.KafkaProducer.FlushInterval) * time.Second - flushTimer := time.NewTimer(flushInterval) - - go k.ConnectToKafka(ctx, readyTimer) - - k.LogInfo("ready to process") - -PROCESS_LOOP: - for { - select { - case <-k.stopProcess: - // closing kafka connection if exist - k.Disconnect() - k.doneProcess <- true - break PROCESS_LOOP - - case <-readyTimer.C: - k.LogError("failed to established connection") - cancelKafka() - - case <-k.kafkaReady: - k.LogInfo("connected with success") - readyTimer.Stop() - k.kafkaConnected = true - - // incoming dns message to process - case dm, opened := <-k.outputChan: - if !opened { - k.LogInfo("output channel closed!") - return - } - - // drop dns message if the connection is not ready to avoid memory leak or - // to block the channel - if !k.kafkaConnected { - continue - } - - // append dns message to buffer - bufferDm = append(bufferDm, dm) - - // buffer is full ? - if len(bufferDm) >= k.config.Loggers.KafkaProducer.BufferSize { - k.FlushBuffer(&bufferDm) - } - - // flush the buffer - case <-flushTimer.C: - if !k.kafkaConnected { - bufferDm = nil - } - - if len(bufferDm) > 0 { - k.FlushBuffer(&bufferDm) - } - - // restart timer - flushTimer.Reset(flushInterval) - } - } - k.LogInfo("processing terminated") -} diff --git a/loggers/logfile.go b/loggers/logfile.go deleted file mode 100644 index 5cefc8ed..00000000 --- a/loggers/logfile.go +++ /dev/null @@ -1,634 +0,0 @@ -package loggers - -import ( - "bufio" - "bytes" - "compress/gzip" - "encoding/json" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" - "github.com/google/gopacket" - "github.com/google/gopacket/layers" - "github.com/google/gopacket/pcapgo" - - framestream "github.com/farsightsec/golang-framestream" -) - -const ( - compressSuffix = ".gz" -) - -func IsValidMode(mode string) bool { - switch mode { - case - pkgconfig.ModeText, - pkgconfig.ModeJSON, - pkgconfig.ModeFlatJSON, - pkgconfig.ModePCAP, - pkgconfig.ModeDNSTap: - return true - } - return false -} - -type LogFile struct { - stopProcess chan bool - doneProcess chan bool - stopRun chan bool - doneRun chan bool - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - writerPlain *bufio.Writer - writerPcap *pcapgo.Writer - writerDnstap *framestream.Encoder - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - fileFd *os.File - fileSize int64 - fileDir string - fileName string - fileExt string - filePrefix string - commpressTimer *time.Timer - textFormat []string - name string - RoutingHandler pkgutils.RoutingHandler -} - -func NewLogFile(config *pkgconfig.Config, logger *logger.Logger, name string) *LogFile { - logger.Info(pkgutils.PrefixLogLogger+"[%s] file - enabled", name) - lf := &LogFile{ - stopProcess: make(chan bool), - doneProcess: make(chan bool), - stopRun: make(chan bool), - doneRun: make(chan bool), - inputChan: make(chan dnsutils.DNSMessage, config.Loggers.LogFile.ChannelBufferSize), - outputChan: make(chan dnsutils.DNSMessage, config.Loggers.LogFile.ChannelBufferSize), - config: config, - configChan: make(chan *pkgconfig.Config), - logger: logger, - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - } - - lf.ReadConfig() - - if err := lf.OpenFile(); err != nil { - lf.logger.Fatal(pkgutils.PrefixLogLogger+"["+name+"] file - unable to open output file:", err) - } - - return lf -} - -func (lf *LogFile) GetName() string { return lf.name } - -func (lf *LogFile) AddDroppedRoute(wrk pkgutils.Worker) { - lf.RoutingHandler.AddDroppedRoute(wrk) -} - -func (lf *LogFile) AddDefaultRoute(wrk pkgutils.Worker) { - lf.RoutingHandler.AddDefaultRoute(wrk) -} - -func (lf *LogFile) SetLoggers(loggers []pkgutils.Worker) {} - -func (lf *LogFile) GetInputChannel() chan dnsutils.DNSMessage { - return lf.inputChan -} - -func (lf *LogFile) ReadConfig() { - if !IsValidMode(lf.config.Loggers.LogFile.Mode) { - lf.logger.Fatal("["+lf.name+"] logger=file - invalid mode: ", lf.config.Loggers.LogFile.Mode) - } - TextFormatSplitter := lf.config.Global.TextFormatSplitter - - lf.fileDir = filepath.Dir(lf.config.Loggers.LogFile.FilePath) - lf.fileName = filepath.Base(lf.config.Loggers.LogFile.FilePath) - lf.fileExt = filepath.Ext(lf.fileName) - lf.filePrefix = strings.TrimSuffix(lf.fileName, lf.fileExt) - - if len(lf.config.Loggers.LogFile.TextFormat) > 0 { - // lf.textFormat = strings.Fields(lf.config.Loggers.LogFile.TextFormat) - lf.textFormat = strings.Split(lf.config.Loggers.LogFile.TextFormat,TextFormatSplitter) - } else { - // lf.textFormat = strings.Fields(lf.config.Global.TextFormat) - lf.textFormat = strings.Split(lf.config.Global.TextFormat,TextFormatSplitter) - } - - lf.LogInfo("running in mode: %s", lf.config.Loggers.LogFile.Mode) -} - -func (lf *LogFile) ReloadConfig(config *pkgconfig.Config) { - lf.LogInfo("reload configuration!") - lf.configChan <- config -} - -func (lf *LogFile) LogInfo(msg string, v ...interface{}) { - lf.logger.Info(pkgutils.PrefixLogLogger+"["+lf.name+"] file - "+msg, v...) -} - -func (lf *LogFile) LogError(msg string, v ...interface{}) { - lf.logger.Error(pkgutils.PrefixLogLogger+"["+lf.name+"] file - "+msg, v...) -} - -func (lf *LogFile) Stop() { - lf.LogInfo("stopping logger...") - lf.RoutingHandler.Stop() - - lf.LogInfo("stopping to run...") - lf.stopRun <- true - <-lf.doneRun - - lf.LogInfo("stopping to process...") - lf.stopProcess <- true - <-lf.doneProcess -} - -func (lf *LogFile) Cleanup() error { - if lf.config.Loggers.LogFile.MaxFiles == 0 { - return nil - } - - // remove old files ? keep only max files number - entries, err := os.ReadDir(lf.fileDir) - if err != nil { - return err - } - - logFiles := []int{} - for _, entry := range entries { - if entry.IsDir() { - continue - } - - // extract timestamp from filename - re := regexp.MustCompile(`^` + lf.filePrefix + `-(?P\d+)` + lf.fileExt) - matches := re.FindStringSubmatch(entry.Name()) - - if len(matches) == 0 { - continue - } - - // convert timestamp to int - tsIndex := re.SubexpIndex("ts") - i, err := strconv.Atoi(matches[tsIndex]) - if err != nil { - continue - } - logFiles = append(logFiles, i) - } - sort.Ints(logFiles) - - // too much log files ? - diffNB := len(logFiles) - lf.config.Loggers.LogFile.MaxFiles - if diffNB > 0 { - for i := 0; i < diffNB; i++ { - filename := fmt.Sprintf("%s-%d%s", lf.filePrefix, logFiles[i], lf.fileExt) - f := filepath.Join(lf.fileDir, filename) - if _, err := os.Stat(f); os.IsNotExist(err) { - f = filepath.Join(lf.fileDir, filename+compressSuffix) - } - - // ignore errors on deletion - os.Remove(f) - } - } - - return nil -} - -func (lf *LogFile) OpenFile() error { - - fd, err := os.OpenFile(lf.config.Loggers.LogFile.FilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644) - if err != nil { - return err - } - lf.fileFd = fd - - fileinfo, err := os.Stat(lf.config.Loggers.LogFile.FilePath) - if err != nil { - return err - } - - lf.fileSize = fileinfo.Size() - - switch lf.config.Loggers.LogFile.Mode { - case pkgconfig.ModeText, pkgconfig.ModeJSON, pkgconfig.ModeFlatJSON: - bufferSize := 4096 - lf.writerPlain = bufio.NewWriterSize(fd, bufferSize) - - case pkgconfig.ModePCAP: - lf.writerPcap = pcapgo.NewWriter(fd) - if lf.fileSize == 0 { - if err := lf.writerPcap.WriteFileHeader(65536, layers.LinkTypeEthernet); err != nil { - return err - } - } - - case pkgconfig.ModeDNSTap: - fsOptions := &framestream.EncoderOptions{ContentType: []byte("protobuf:dnstap.Dnstap"), Bidirectional: false} - lf.writerDnstap, err = framestream.NewEncoder(fd, fsOptions) - if err != nil { - return err - } - - } - - lf.LogInfo("file opened with success: %s", lf.config.Loggers.LogFile.FilePath) - return nil -} - -func (lf *LogFile) GetMaxSize() int64 { - return int64(1024*1024) * int64(lf.config.Loggers.LogFile.MaxSize) -} - -func (lf *LogFile) CompressFile() { - entries, err := os.ReadDir(lf.fileDir) - if err != nil { - lf.LogError("unable to list all files: %s", err) - return - } - - for _, entry := range entries { - // ignore folder - if entry.IsDir() { - continue - } - - matched, _ := regexp.MatchString(`^`+lf.filePrefix+`-\d+`+lf.fileExt+`$`, entry.Name()) - if matched { - src := filepath.Join(lf.fileDir, entry.Name()) - dst := filepath.Join(lf.fileDir, entry.Name()+compressSuffix) - - fd, err := os.Open(src) - if err != nil { - lf.LogError("compress - failed to open file: ", err) - continue - } - defer fd.Close() - - fi, err := os.Stat(src) - if err != nil { - lf.LogError("compress - failed to stat file: ", err) - continue - } - - gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode()) - if err != nil { - lf.LogError("compress - failed to open compressed file: ", err) - continue - } - defer gzf.Close() - - gz := gzip.NewWriter(gzf) - - if _, err := io.Copy(gz, fd); err != nil { - lf.LogError("compress - failed to compress file: ", err) - os.Remove(dst) - continue - } - if err := gz.Close(); err != nil { - lf.LogError("compress - failed to close gz writer: ", err) - os.Remove(dst) - continue - } - if err := gzf.Close(); err != nil { - lf.LogError("compress - failed to close gz file: ", err) - os.Remove(dst) - continue - } - - if err := fd.Close(); err != nil { - lf.LogError("compress - failed to close log file: ", err) - os.Remove(dst) - continue - } - if err := os.Remove(src); err != nil { - lf.LogError("compress - failed to remove log file: ", err) - os.Remove(dst) - continue - } - - // post rotate command? - lf.CompressPostRotateCommand(dst) - } - } - - lf.commpressTimer.Reset(time.Duration(lf.config.Loggers.LogFile.CompressInterval) * time.Second) -} - -func (lf *LogFile) PostRotateCommand(filename string) { - if len(lf.config.Loggers.LogFile.PostRotateCommand) > 0 { - lf.LogInfo("execute postrotate command: %s", filename) - _, err := exec.Command(lf.config.Loggers.LogFile.PostRotateCommand, filename).Output() - if err != nil { - lf.LogError("postrotate command error: %s", err) - } else if lf.config.Loggers.LogFile.PostRotateDelete { - os.Remove(filename) - } - } -} - -func (lf *LogFile) CompressPostRotateCommand(filename string) { - if len(lf.config.Loggers.LogFile.CompressPostCommand) > 0 { - - lf.LogInfo("execute compress postrotate command: %s", filename) - _, err := exec.Command(lf.config.Loggers.LogFile.CompressPostCommand, filename).Output() - if err != nil { - lf.LogError("compress - postcommand error: %s", err) - } - } -} - -func (lf *LogFile) FlushWriters() { - switch lf.config.Loggers.LogFile.Mode { - case pkgconfig.ModeText, pkgconfig.ModeJSON, pkgconfig.ModeFlatJSON: - lf.writerPlain.Flush() - case pkgconfig.ModeDNSTap: - lf.writerDnstap.Flush() - } -} - -func (lf *LogFile) RotateFile() error { - // close writer and existing file - lf.FlushWriters() - - if lf.config.Loggers.LogFile.Mode == pkgconfig.ModeDNSTap { - lf.writerDnstap.Close() - } - - if err := lf.fileFd.Close(); err != nil { - return err - } - - // Rename current log file - bfpath := filepath.Join(lf.fileDir, fmt.Sprintf("%s-%d%s", lf.filePrefix, time.Now().UnixNano(), lf.fileExt)) - err := os.Rename(lf.config.Loggers.LogFile.FilePath, bfpath) - if err != nil { - return err - } - - // post rotate command? - lf.PostRotateCommand(bfpath) - - // keep only max files - err = lf.Cleanup() - if err != nil { - lf.LogError("unable to cleanup log files: %s", err) - return err - } - - // re-create new one - if err := lf.OpenFile(); err != nil { - lf.LogError("unable to re-create file: %s", err) - return err - } - - return nil -} - -func (lf *LogFile) WriteToPcap(dm dnsutils.DNSMessage, pkt []gopacket.SerializableLayer) { - // create the packet with the layers - buf := gopacket.NewSerializeBuffer() - opts := gopacket.SerializeOptions{ - FixLengths: true, - ComputeChecksums: true, - } - for _, layer := range pkt { - layer.SerializeTo(buf, opts) - } - - // rotate pcap file ? - bufSize := len(buf.Bytes()) - - if (lf.fileSize + int64(bufSize)) > lf.GetMaxSize() { - if err := lf.RotateFile(); err != nil { - lf.LogError("failed to rotate file: %s", err) - return - } - } - - ci := gopacket.CaptureInfo{ - Timestamp: time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)), - CaptureLength: bufSize, - Length: bufSize, - } - - lf.writerPcap.WritePacket(ci, buf.Bytes()) - - // increase size file - lf.fileSize += int64(bufSize) -} - -func (lf *LogFile) WriteToPlain(data []byte) { - dataSize := int64(len(data)) - - // rotate file ? - if (lf.fileSize + dataSize) > lf.GetMaxSize() { - if err := lf.RotateFile(); err != nil { - lf.LogError("failed to rotate file: %s", err) - return - } - } - - // write log to file - n, _ := lf.writerPlain.Write(data) - - // increase size file - lf.fileSize += int64(n) -} - -func (lf *LogFile) WriteToDnstap(data []byte) { - dataSize := int64(len(data)) - - // rotate file ? - if (lf.fileSize + dataSize) > lf.GetMaxSize() { - if err := lf.RotateFile(); err != nil { - lf.LogError("failed to rotate file: %s", err) - return - } - } - - // write log to file - n, _ := lf.writerDnstap.Write(data) - - // increase size file - lf.fileSize += int64(n) -} - -func (lf *LogFile) Run() { - lf.LogInfo("running in background...") - - // prepare next channels - defaultRoutes, defaultNames := lf.RoutingHandler.GetDefaultRoutes() - droppedRoutes, droppedNames := lf.RoutingHandler.GetDroppedRoutes() - - // prepare transforms - listChannel := []chan dnsutils.DNSMessage{} - listChannel = append(listChannel, lf.outputChan) - subprocessors := transformers.NewTransforms(&lf.config.OutgoingTransformers, lf.logger, lf.name, listChannel, 0) - - // goroutine to process transformed dns messages - go lf.Process() - - // loop to process incoming messages -RUN_LOOP: - for { - select { - case <-lf.stopRun: - // cleanup transformers - subprocessors.Reset() - lf.doneRun <- true - break RUN_LOOP - - case cfg, opened := <-lf.configChan: - if !opened { - return - } - lf.config = cfg - lf.ReadConfig() - subprocessors.ReloadConfig(&cfg.OutgoingTransformers) - - case dm, opened := <-lf.inputChan: - if !opened { - lf.LogInfo("input channel closed!") - return - } - - // apply tranforms, init dns message with additionnals parts if necessary - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - lf.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - continue - } - - // send to next ? - lf.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - - // send to output channel - lf.outputChan <- dm - } - } - lf.LogInfo("run terminated") -} - -func (lf *LogFile) Process() { - // prepare some timers - flushInterval := time.Duration(lf.config.Loggers.LogFile.FlushInterval) * time.Second - flushTimer := time.NewTimer(flushInterval) - lf.commpressTimer = time.NewTimer(time.Duration(lf.config.Loggers.LogFile.CompressInterval) * time.Second) - - buffer := new(bytes.Buffer) - var data []byte - var err error - - lf.LogInfo("ready to process") -PROCESS_LOOP: - for { - select { - case <-lf.stopProcess: - // stop timer - flushTimer.Stop() - lf.commpressTimer.Stop() - - // flush writer - lf.FlushWriters() - - // closing file - lf.LogInfo("closing log file") - if lf.config.Loggers.LogFile.Mode == pkgconfig.ModeDNSTap { - lf.writerDnstap.Close() - } - lf.fileFd.Close() - - lf.doneProcess <- true - break PROCESS_LOOP - - case dm, opened := <-lf.outputChan: - if !opened { - lf.LogInfo("output channel closed!") - return - } - - // write to file - switch lf.config.Loggers.LogFile.Mode { - - // with basic text mode - case pkgconfig.ModeText: - lf.WriteToPlain(dm.Bytes(lf.textFormat, - lf.config.Global.TextFormatDelimiter, - lf.config.Global.TextFormatBoundary)) - - var delimiter bytes.Buffer - delimiter.WriteString("\n") - lf.WriteToPlain(delimiter.Bytes()) - - // with json mode - case pkgconfig.ModeFlatJSON: - flat, err := dm.Flatten() - if err != nil { - lf.LogError("flattening DNS message failed: %e", err) - } - json.NewEncoder(buffer).Encode(flat) - lf.WriteToPlain(buffer.Bytes()) - buffer.Reset() - - // with json mode - case pkgconfig.ModeJSON: - json.NewEncoder(buffer).Encode(dm) - lf.WriteToPlain(buffer.Bytes()) - buffer.Reset() - - // with dnstap mode - case pkgconfig.ModeDNSTap: - data, err = dm.ToDNSTap(lf.config.Loggers.LogFile.ExtendedSupport) - if err != nil { - lf.LogError("failed to encode to DNStap protobuf: %s", err) - continue - } - lf.WriteToDnstap(data) - - // with pcap mode - case pkgconfig.ModePCAP: - pkt, err := dm.ToPacketLayer() - if err != nil { - lf.LogError("failed to encode to packet layer: %s", err) - continue - } - - // write the packet - lf.WriteToPcap(dm, pkt) - } - - case <-flushTimer.C: - // flush writer - lf.FlushWriters() - - // reset flush timer and buffer - buffer.Reset() - flushTimer.Reset(flushInterval) - - case <-lf.commpressTimer.C: - if lf.config.Loggers.LogFile.Compress { - lf.CompressFile() - } - - } - } - lf.LogInfo("processing terminated") -} diff --git a/loggers/lokiclient.go b/loggers/lokiclient.go deleted file mode 100644 index c429dbcf..00000000 --- a/loggers/lokiclient.go +++ /dev/null @@ -1,477 +0,0 @@ -package loggers - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "os" - "strings" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" - "github.com/gogo/protobuf/proto" - "github.com/grafana/dskit/backoff" - "github.com/klauspost/compress/snappy" - - /* - install loki with tags - - go get github.com/grafana/loki@2535f9bedeae5f27abdbfaf0cc1a8e9f91b6c96d - https://github.com/grafana/loki/releases/tag/v2.9.3 - - go get github.com/grafana/loki/pkg/push@2535f9bedeae5f27abdbfaf0cc1a8e9f91b6c96d - - go get github.com/prometheus/prometheus@v0.43.1-0.20230419161410-69155c6ba1e9 - go mod tidy - */ - "github.com/grafana/loki/pkg/logproto" - "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/model/relabel" -) - -type LokiStream struct { - labels labels.Labels - config *pkgconfig.Config - logger *logger.Logger - stream *logproto.Stream - pushrequest *logproto.PushRequest - sizeentries int -} - -func (o *LokiStream) Init() { - // prepare stream with label name - o.stream = &logproto.Stream{} - o.stream.Labels = o.labels.String() - - // creates push request - o.pushrequest = &logproto.PushRequest{ - Streams: make([]logproto.Stream, 0, 1), - } -} - -func (o *LokiStream) ResetEntries() { - o.stream.Entries = nil - o.sizeentries = 0 - o.pushrequest.Reset() -} - -func (o *LokiStream) Encode2Proto() ([]byte, error) { - o.pushrequest.Streams = append(o.pushrequest.Streams, *o.stream) - - buf, err := proto.Marshal(o.pushrequest) - if err != nil { - fmt.Println(err) - } - buf = snappy.Encode(nil, buf) - return buf, nil -} - -type LokiClient struct { - stopProcess chan bool - doneProcess chan bool - stopRun chan bool - doneRun chan bool - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - httpclient *http.Client - textFormat []string - streams map[string]*LokiStream - name string - RoutingHandler pkgutils.RoutingHandler -} - -func NewLokiClient(config *pkgconfig.Config, logger *logger.Logger, name string) *LokiClient { - logger.Info(pkgutils.PrefixLogLogger+"[%s] loki - enabled", name) - - s := &LokiClient{ - stopProcess: make(chan bool), - doneProcess: make(chan bool), - stopRun: make(chan bool), - doneRun: make(chan bool), - inputChan: make(chan dnsutils.DNSMessage, config.Loggers.LokiClient.ChannelBufferSize), - outputChan: make(chan dnsutils.DNSMessage, config.Loggers.LokiClient.ChannelBufferSize), - logger: logger, - config: config, - configChan: make(chan *pkgconfig.Config), - streams: make(map[string]*LokiStream), - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - } - - s.ReadConfig() - return s -} - -func (c *LokiClient) GetName() string { return c.name } - -func (c *LokiClient) AddDroppedRoute(wrk pkgutils.Worker) { - c.RoutingHandler.AddDroppedRoute(wrk) -} - -func (c *LokiClient) AddDefaultRoute(wrk pkgutils.Worker) { - c.RoutingHandler.AddDefaultRoute(wrk) -} - -func (c *LokiClient) SetLoggers(loggers []pkgutils.Worker) {} - -func (c *LokiClient) ReadConfig() { - if len(c.config.Loggers.LokiClient.TextFormat) > 0 { - c.textFormat = strings.Fields(c.config.Loggers.LokiClient.TextFormat) - } else { - c.textFormat = strings.Fields(c.config.Global.TextFormat) - } - - // tls client config - tlsOptions := pkgconfig.TLSOptions{ - InsecureSkipVerify: c.config.Loggers.LokiClient.TLSInsecure, - MinVersion: c.config.Loggers.LokiClient.TLSMinVersion, - CAFile: c.config.Loggers.LokiClient.CAFile, - CertFile: c.config.Loggers.LokiClient.CertFile, - KeyFile: c.config.Loggers.LokiClient.KeyFile, - } - - tlsConfig, err := pkgconfig.TLSClientConfig(tlsOptions) - if err != nil { - c.logger.Fatal(pkgutils.PrefixLogLogger+"["+c.name+"] loki - tls config failed:", err) - } - - // prepare http client - tr := &http.Transport{ - MaxIdleConns: 10, - IdleConnTimeout: 30 * time.Second, - DisableCompression: false, - TLSClientConfig: tlsConfig, - } - - // use proxy - if len(c.config.Loggers.LokiClient.ProxyURL) > 0 { - proxyURL, err := url.Parse(c.config.Loggers.LokiClient.ProxyURL) - if err != nil { - c.logger.Fatal("logger=loki - unable to parse proxy url: ", err) - } - tr.Proxy = http.ProxyURL(proxyURL) - } - - c.httpclient = &http.Client{Transport: tr} - - if c.config.Loggers.LokiClient.BasicAuthPwdFile != "" { - content, err := os.ReadFile(c.config.Loggers.LokiClient.BasicAuthPwdFile) - if err != nil { - c.logger.Fatal("logger=loki - unable to load password from file: ", err) - } - c.config.Loggers.LokiClient.BasicAuthPwd = string(content) - } -} - -func (c *LokiClient) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration!") - c.configChan <- config -} - -func (c *LokiClient) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogLogger+"["+c.name+"] loki - "+msg, v...) -} - -func (c *LokiClient) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogLogger+"["+c.name+"] loki - "+msg, v...) -} - -func (c *LokiClient) GetInputChannel() chan dnsutils.DNSMessage { - return c.inputChan -} - -func (c *LokiClient) Stop() { - c.LogInfo("stopping logger...") - c.RoutingHandler.Stop() - - c.LogInfo("stopping to run...") - c.stopRun <- true - <-c.doneRun - - c.LogInfo("stopping to process...") - c.stopProcess <- true - <-c.doneProcess -} - -func (c *LokiClient) Run() { - c.LogInfo("running in background...") - - // prepare next channels - defaultRoutes, defaultNames := c.RoutingHandler.GetDefaultRoutes() - droppedRoutes, droppedNames := c.RoutingHandler.GetDroppedRoutes() - - // prepare transforms - listChannel := []chan dnsutils.DNSMessage{} - listChannel = append(listChannel, c.outputChan) - subprocessors := transformers.NewTransforms(&c.config.OutgoingTransformers, c.logger, c.name, listChannel, 0) - - // goroutine to process transformed dns messages - go c.Process() - - // loop to process incoming messages -RUN_LOOP: - for { - select { - case <-c.stopRun: - // cleanup transformers - subprocessors.Reset() - - c.doneRun <- true - break RUN_LOOP - - case cfg, opened := <-c.configChan: - if !opened { - return - } - c.config = cfg - c.ReadConfig() - subprocessors.ReloadConfig(&cfg.OutgoingTransformers) - - case dm, opened := <-c.inputChan: - if !opened { - c.LogInfo("input channel closed!") - return - } - - // apply tranforms, init dns message with additionnals parts if necessary - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - c.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - continue - } - - // send to next ? - c.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - - // send to output channel - c.outputChan <- dm - } - } - c.LogInfo("run terminated") -} - -func (c *LokiClient) Process() { - // prepare buffer - buffer := new(bytes.Buffer) - var byteBuffer []byte - - // prepare timers - tflushInterval := time.Duration(c.config.Loggers.LokiClient.FlushInterval) * time.Second - tflush := time.NewTimer(tflushInterval) - - c.LogInfo("ready to process") -PROCESS_LOOP: - for { - select { - case <-c.stopProcess: - c.doneProcess <- true - break PROCESS_LOOP - - // incoming dns message to process - case dm, opened := <-c.outputChan: - if !opened { - c.LogInfo("output channel closed!") - return - } - - lbls := labels.Labels{ - labels.Label{Name: "identity", Value: dm.DNSTap.Identity}, - labels.Label{Name: "job", Value: c.config.Loggers.LokiClient.JobName}, - } - var err error - var flat map[string]interface{} - if len(c.config.Loggers.LokiClient.RelabelConfigs) > 0 { - // Save flattened JSON in case it's used when populating the message of the log entry. - // There is more room for improvement for reusing data though. Flatten() internally - // does a JSON encode of the DnsMessage, but it's not saved to use when the mode - // is JSON. - flat, err = dm.Flatten() - if err != nil { - c.LogError("flattening DNS message failed: %e", err) - } - sb := labels.NewScratchBuilder(len(lbls) + len(flat)) - sb.Assign(lbls) - for k, v := range flat { - sb.Add(fmt.Sprintf("__%s", strings.ReplaceAll(k, ".", "_")), fmt.Sprint(v)) - } - sb.Sort() - lbls, _ = relabel.Process(sb.Labels(), c.config.Loggers.LokiClient.RelabelConfigs...) - - // Drop all labels starting with __ from the map if a relabel config is used. - // These labels are just exposed to relabel for the user and should not be - // shipped to loki by default. - lb := labels.NewBuilder(lbls) - lbls.Range(func(l labels.Label) { - if l.Name[0:2] == "__" { - lb.Del(l.Name) - } - }) - lbls = lb.Labels() - - if len(lbls) == 0 { - c.LogInfo("dropping %v since it has no labels", dm) - continue - } - } - - // prepare entry - entry := logproto.Entry{} - entry.Timestamp = time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)) - - switch c.config.Loggers.LokiClient.Mode { - case pkgconfig.ModeText: - entry.Line = string(dm.Bytes(c.textFormat, - c.config.Global.TextFormatDelimiter, - c.config.Global.TextFormatBoundary)) - case pkgconfig.ModeJSON: - json.NewEncoder(buffer).Encode(dm) - entry.Line = buffer.String() - buffer.Reset() - case pkgconfig.ModeFlatJSON: - if len(flat) == 0 { - flat, err = dm.Flatten() - if err != nil { - c.LogError("flattening DNS message failed: %e", err) - } - } - json.NewEncoder(buffer).Encode(flat) - entry.Line = buffer.String() - buffer.Reset() - } - key := string(lbls.Bytes(byteBuffer)) - ls, ok := c.streams[key] - if !ok { - ls = &LokiStream{config: c.config, logger: c.logger, labels: lbls} - ls.Init() - c.streams[key] = ls - } - ls.sizeentries += len(entry.Line) - - // append entry to the stream - ls.stream.Entries = append(ls.stream.Entries, entry) - - // flush ? - if ls.sizeentries >= c.config.Loggers.LokiClient.BatchSize { - // encode log entries - buf, err := ls.Encode2Proto() - if err != nil { - c.LogError("error encoding log entries - %v", err) - // reset push request and entries - ls.ResetEntries() - return - } - - // send all entries - c.SendEntries(buf) - - // reset entries and push request - ls.ResetEntries() - } - - case <-tflush.C: - for _, s := range c.streams { - if len(s.stream.Entries) > 0 { - // timeout - // encode log entries - buf, err := s.Encode2Proto() - if err != nil { - c.LogError("error encoding log entries - %v", err) - // reset push request and entries - s.ResetEntries() - // restart timer - tflush.Reset(tflushInterval) - return - } - - // send all entries - c.SendEntries(buf) - - // reset entries and push request - s.ResetEntries() - } - } - - // restart timer - tflush.Reset(tflushInterval) - } - } - c.LogInfo("processing terminated") -} - -func (c *LokiClient) SendEntries(buf []byte) { - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - MinBackoff := 500 * time.Millisecond - MaxBackoff := 5 * time.Minute - MaxRetries := 10 - - backoff := backoff.New(ctx, backoff.Config{ - MaxBackoff: MaxBackoff, - MaxRetries: MaxRetries, - MinBackoff: MinBackoff, - }) - - for { - // send post http - post, err := http.NewRequest("POST", c.config.Loggers.LokiClient.ServerURL, bytes.NewReader(buf)) - if err != nil { - c.LogError("new http error: %s", err) - return - } - post = post.WithContext(ctx) - post.Header.Set("Content-Type", "application/x-protobuf") - post.Header.Set("User-Agent", c.config.GetServerIdentity()) - if len(c.config.Loggers.LokiClient.TenantID) > 0 { - post.Header.Set("X-Scope-OrgID", c.config.Loggers.LokiClient.TenantID) - } - - post.SetBasicAuth( - c.config.Loggers.LokiClient.BasicAuthLogin, - c.config.Loggers.LokiClient.BasicAuthPwd, - ) - - // send post and read response - resp, err := c.httpclient.Do(post) - if err != nil { - c.LogError("do http error: %s", err) - return - } - - // success ? - if resp.StatusCode > 0 && resp.StatusCode != 429 && resp.StatusCode/100 != 5 { - break - } - - // something is wrong, retry ? - if resp.StatusCode/100 != 2 { - scanner := bufio.NewScanner(io.LimitReader(resp.Body, 1024)) - line := "" - if scanner.Scan() { - line = scanner.Text() - } - c.LogError("server returned HTTP status %s (%d): %s", resp.Status, resp.StatusCode, line) - } - - // wait before retry - backoff.Wait() - - // Make sure it sends at least once before checking for retry. - if !backoff.Ongoing() { - break - } - } -} diff --git a/loggers/prometheus.go b/loggers/prometheus.go deleted file mode 100644 index cf3d5741..00000000 --- a/loggers/prometheus.go +++ /dev/null @@ -1,1359 +0,0 @@ -package loggers - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "net" - "net/http" - "os" - "regexp" - "strconv" - "strings" - "sync" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" - "github.com/dmachard/go-topmap" - "github.com/hashicorp/golang-lru/v2/expirable" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/collectors" - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/prometheus/common/version" - // _ "net/http/pprof" -) - -var metricNameRegex = regexp.MustCompile(`_*[^0-9A-Za-z_]+_*`) - -/* -This is the list of available label values selectors. -Configuration may specify a list of lables to use for metrics. -Any label in this catalogueSelectors can be specidied in config (prometheus-labels stanza) -*/ -var catalogueSelectors map[string]func(*dnsutils.DNSMessage) string = map[string]func(*dnsutils.DNSMessage) string{ - "stream_id": GetStreamID, - "resolver": GetResolverIP, - "stream_global": GetStreamGlobal, -} - -/* -OpenMetrics and the Prometheus exposition format require the metric name -to consist only of alphanumericals and "_", ":" and they must not start -with digits. -*/ -func SanitizeMetricName(metricName string) string { - return metricNameRegex.ReplaceAllString(metricName, "_") -} - -/* -EpsCounters (Events Per Second) - is a set of metrics we calculate on per-second basis. -For others we rely on averaging by collector -*/ -type EpsCounters struct { - Eps uint64 - EpsMax uint64 - TotalEvents uint64 - TotalEventsPrev uint64 - - TotalRcodes map[string]float64 - TotalQtypes map[string]float64 - TotalIPVersion map[string]float64 - TotalIPProtocol map[string]float64 - TotalDNSMessages float64 - TotalQueries int - TotalReplies int - TotalBytesSent int - TotalBytesReceived int - TotalBytes int - - TotalTC float64 - TotalAA float64 - TotalRA float64 - TotalAD float64 - TotalMalformed float64 - TotalFragmented float64 - TotalReasembled float64 -} - -type PrometheusCountersCatalogue interface { - // Prometheus logger encapsulates stats counters (PrometheusCounterSet) inside - // PromCounterCatalogueContainer's. For each label the logger creates a nested level - // of containers. - // Containers and CounterSets must implemnent PrometheusCountersCatalogue interface - // to allow fetching a CounterSet by the list of metric/values by fetching values from - // the DNS message it logs. - // There is a schematic sample layout when there are 2 labels considered at the end of this file - GetCountersSet(*dnsutils.DNSMessage) PrometheusCountersCatalogue -} - -// This type represents a set of counters for a unique set of label name=value pairs. -// By default, we create a set per setream_id for backward compatibility -// However, we can allow slicing and dicing data using more dimensions. -// Each CounterSet is registered with Prometheus collection independently (wrapping label values) -type PrometheusCountersSet struct { - prom *Prometheus - - // LRU cache counters per domains and IP - requesters *expirable.LRU[string, int] // Requests number made by a specific requestor - allDomains *expirable.LRU[string, int] // Requests number made to find out about a specific domain - validDomains *expirable.LRU[string, int] // Requests number ended up in NOERROR - nxDomains *expirable.LRU[string, int] // Requests number ended up in NXDOMAIN - sfDomains *expirable.LRU[string, int] // Requests number ended up in SERVFAIL - tlds *expirable.LRU[string, int] // Requests number for a specific TLD - etldplusone *expirable.LRU[string, int] // Requests number for a specific eTLD+1 - suspicious *expirable.LRU[string, int] // Requests number for a specific name that looked suspicious - evicted *expirable.LRU[string, int] // Requests number for a specific name that timed out - - epsCounters EpsCounters - topRequesters *topmap.TopMap - topAllDomains *topmap.TopMap - topEvicted *topmap.TopMap - topValidDomains *topmap.TopMap - topSfDomains *topmap.TopMap - topNxDomains *topmap.TopMap - topTlds *topmap.TopMap - topETLDPlusOne *topmap.TopMap - topSuspicious *topmap.TopMap - - labels prometheus.Labels // Do we really need to keep that map outside of registration? - sync.Mutex // Each PrometheusCountersSet locks independently -} - -// PromCounterCatalogueContainer is the implementation of PrometheusCountersCatalogue interface -// That maps a single label into other Containers or CounterSet -// The 'chain' of nested Containers keep track of labelNames requested by the config -// to figure out whether nested Container should be created, or, if all labels but the last one -// already considered at the upper levels, it is time to create individual CounterSet -type PromCounterCatalogueContainer struct { - prom *Prometheus - - // labelNames - is a list of label *names* for PromCounterCatalogueContainer's in stats - // map to use to get proper selectors. - // The topmost instance of PromCounterCatalogueContainer has the full list of all names to - // consider (the one provided by the config). Whenver it needs to create a new item in - // it's stats map, it suplies labelNames[1:] to the constructor for the lower level - // container to get the selector for the next level - labelNames []string // This is list of label names for nested containers - - // This is the unique set of label-value pairs for this catalogue element. - // The topmost Catalog has it empty, when it creates a new entry it provides the pair of - // labelNames[0]->selector(message) to the constructor. Lower levels get these pair - // collected. Ultimately, when all label names in labelNames is exausted, Catalogue creates - // an instance of newPrometheusCounterSet and provides it with labels map to properly wrap - // in Prometheus registry. - // The goal is to separate label/values pairs construction and individual counters collection - labels map[string]string // This is the set of label=value pairs we collected to this level - stats map[string]PrometheusCountersCatalogue - - // selector is a function that obtains a value for a label considering DNS Message data - // in most cases - just a field of that message - selector func(*dnsutils.DNSMessage) string - - sync.RWMutex -} - -/* -Selectors -*/ -func GetStreamGlobal(dm *dnsutils.DNSMessage) string { - return "enabled" -} - -func GetStreamID(dm *dnsutils.DNSMessage) string { - return dm.DNSTap.Identity -} - -func GetResolverIP(dm *dnsutils.DNSMessage) string { - return dm.NetworkInfo.ResponseIP -} - -type Prometheus struct { - doneAPI chan bool - stopProcess chan bool - doneProcess chan bool - stopRun chan bool - doneRun chan bool - httpServer *http.Server - netListener net.Listener - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - promRegistry *prometheus.Registry - - sync.Mutex - catalogueLabels []string - counters *PromCounterCatalogueContainer - - // All metrics use these descriptions when regestering - gaugeTopDomains *prometheus.Desc - gaugeTopNoerrDomains *prometheus.Desc - gaugeTopNxDomains *prometheus.Desc - gaugeTopSfDomains *prometheus.Desc - gaugeTopRequesters *prometheus.Desc - gaugeTopTlds *prometheus.Desc - gaugeTopETldsPlusOne *prometheus.Desc - gaugeTopSuspicious *prometheus.Desc - gaugeTopEvicted *prometheus.Desc - - gaugeDomainsAll *prometheus.Desc - gaugeDomainsValid *prometheus.Desc - gaugeDomainsNx *prometheus.Desc - gaugeDomainsSf *prometheus.Desc - gaugeRequesters *prometheus.Desc - gaugeTlds *prometheus.Desc - gaugeETldPlusOne *prometheus.Desc - gaugeSuspicious *prometheus.Desc - gaugeEvicted *prometheus.Desc - - gaugeEps *prometheus.Desc - gaugeEpsMax *prometheus.Desc - - counterQtypes *prometheus.Desc - counterRcodes *prometheus.Desc - counterIPProtocol *prometheus.Desc - counterIPVersion *prometheus.Desc - counterDNSMessages *prometheus.Desc - counterDNSQueries *prometheus.Desc - counterDNSReplies *prometheus.Desc - - counterFlagsTC *prometheus.Desc - counterFlagsAA *prometheus.Desc - counterFlagsRA *prometheus.Desc - counterFlagsAD *prometheus.Desc - counterFlagsMalformed *prometheus.Desc - counterFlagsFragmented *prometheus.Desc - counterFlagsReassembled *prometheus.Desc - - totalBytes *prometheus.Desc - totalReceivedBytes *prometheus.Desc - totalSentBytes *prometheus.Desc - - // Histograms are heavy and expensive, turned off - // by default in configuration - histogramQueriesLength *prometheus.HistogramVec - histogramRepliesLength *prometheus.HistogramVec - histogramQnamesLength *prometheus.HistogramVec - histogramLatencies *prometheus.HistogramVec - - name string - RoutingHandler pkgutils.RoutingHandler -} - -func newPrometheusCounterSet(p *Prometheus, labels prometheus.Labels) *PrometheusCountersSet { - pcs := &PrometheusCountersSet{ - prom: p, - labels: labels, - requesters: expirable.NewLRU[string, int](p.config.Loggers.Prometheus.RequestersCacheSize, nil, time.Second*time.Duration(p.config.Loggers.Prometheus.RequestersCacheTTL)), - allDomains: expirable.NewLRU[string, int](p.config.Loggers.Prometheus.DomainsCacheSize, nil, time.Second*time.Duration(p.config.Loggers.Prometheus.DomainsCacheTTL)), - validDomains: expirable.NewLRU[string, int](p.config.Loggers.Prometheus.NoErrorDomainsCacheSize, nil, time.Second*time.Duration(p.config.Loggers.Prometheus.NoErrorDomainsCacheTTL)), - nxDomains: expirable.NewLRU[string, int](p.config.Loggers.Prometheus.NXDomainsCacheSize, nil, time.Second*time.Duration(p.config.Loggers.Prometheus.NXDomainsCacheTTL)), - sfDomains: expirable.NewLRU[string, int](p.config.Loggers.Prometheus.ServfailDomainsCacheSize, nil, time.Second*time.Duration(p.config.Loggers.Prometheus.ServfailDomainsCacheTTL)), - tlds: expirable.NewLRU[string, int](p.config.Loggers.Prometheus.DefaultDomainsCacheSize, nil, time.Second*time.Duration(p.config.Loggers.Prometheus.DefaultDomainsCacheTTL)), - etldplusone: expirable.NewLRU[string, int](p.config.Loggers.Prometheus.DefaultDomainsCacheSize, nil, time.Second*time.Duration(p.config.Loggers.Prometheus.DefaultDomainsCacheTTL)), - suspicious: expirable.NewLRU[string, int](p.config.Loggers.Prometheus.DefaultDomainsCacheSize, nil, time.Second*time.Duration(p.config.Loggers.Prometheus.DefaultDomainsCacheTTL)), - evicted: expirable.NewLRU[string, int](p.config.Loggers.Prometheus.DefaultDomainsCacheSize, nil, time.Second*time.Duration(p.config.Loggers.Prometheus.DefaultDomainsCacheTTL)), - - epsCounters: EpsCounters{ - TotalRcodes: make(map[string]float64), - TotalQtypes: make(map[string]float64), - TotalIPVersion: make(map[string]float64), - TotalIPProtocol: make(map[string]float64), - }, - - topRequesters: topmap.NewTopMap(p.config.Loggers.Prometheus.TopN), - topEvicted: topmap.NewTopMap(p.config.Loggers.Prometheus.TopN), - topAllDomains: topmap.NewTopMap(p.config.Loggers.Prometheus.TopN), - topValidDomains: topmap.NewTopMap(p.config.Loggers.Prometheus.TopN), - topSfDomains: topmap.NewTopMap(p.config.Loggers.Prometheus.TopN), - topNxDomains: topmap.NewTopMap(p.config.Loggers.Prometheus.TopN), - topTlds: topmap.NewTopMap(p.config.Loggers.Prometheus.TopN), - topETLDPlusOne: topmap.NewTopMap(p.config.Loggers.Prometheus.TopN), - topSuspicious: topmap.NewTopMap(p.config.Loggers.Prometheus.TopN), - } - prometheus.WrapRegistererWith(labels, p.promRegistry).MustRegister(pcs) - return pcs -} - -func (c *PrometheusCountersSet) GetCountersSet(dm *dnsutils.DNSMessage) PrometheusCountersCatalogue { - return c -} - -// each CounterSet has the same list of timeseries descriptors, -// so it uses descriptros from the Prometheus instance the set belongs to. -func (c *PrometheusCountersSet) Describe(ch chan<- *prometheus.Desc) { - // Gauge metrcis - c.Lock() - defer c.Unlock() - ch <- c.prom.gaugeTopDomains - ch <- c.prom.gaugeTopNoerrDomains - ch <- c.prom.gaugeTopNxDomains - ch <- c.prom.gaugeTopSfDomains - ch <- c.prom.gaugeTopRequesters - ch <- c.prom.gaugeTopTlds - ch <- c.prom.gaugeTopETldsPlusOne - ch <- c.prom.gaugeTopSuspicious - ch <- c.prom.gaugeTopEvicted - - // Counter metrics - ch <- c.prom.gaugeDomainsAll - ch <- c.prom.gaugeDomainsValid - ch <- c.prom.gaugeDomainsNx - ch <- c.prom.gaugeDomainsSf - ch <- c.prom.gaugeRequesters - ch <- c.prom.gaugeTlds - ch <- c.prom.gaugeETldPlusOne - ch <- c.prom.gaugeSuspicious - ch <- c.prom.gaugeEvicted - - ch <- c.prom.gaugeEps - ch <- c.prom.gaugeEpsMax - - ch <- c.prom.counterQtypes - ch <- c.prom.counterRcodes - ch <- c.prom.counterIPProtocol - ch <- c.prom.counterIPVersion - ch <- c.prom.counterDNSMessages - ch <- c.prom.counterDNSQueries - ch <- c.prom.counterDNSReplies - - ch <- c.prom.counterFlagsTC - ch <- c.prom.counterFlagsAA - ch <- c.prom.counterFlagsRA - ch <- c.prom.counterFlagsAD - ch <- c.prom.counterFlagsMalformed - ch <- c.prom.counterFlagsFragmented - ch <- c.prom.counterFlagsReassembled - - ch <- c.prom.totalBytes - ch <- c.prom.totalReceivedBytes - ch <- c.prom.totalSentBytes -} - -// Updates all counters for a specific set of labelName=labelValue -func (c *PrometheusCountersSet) Record(dm dnsutils.DNSMessage) { - c.Lock() - defer c.Unlock() - - // count all uniq requesters if enabled - if c.prom.config.Loggers.Prometheus.RequestersMetricsEnabled { - count, _ := c.requesters.Get(dm.NetworkInfo.QueryIP) - c.requesters.Add(dm.NetworkInfo.QueryIP, count+1) - c.topRequesters.Record(dm.NetworkInfo.QueryIP, count+1) - } - - // count all uniq domains if enabled - if c.prom.config.Loggers.Prometheus.DomainsMetricsEnabled { - count, _ := c.allDomains.Get(dm.DNS.Qname) - c.allDomains.Add(dm.DNS.Qname, count+1) - c.topAllDomains.Record(dm.DNS.Qname, count+1) - } - - // top domains - switch { - case dm.DNS.Rcode == dnsutils.DNSRcodeTimeout && c.prom.config.Loggers.Prometheus.TimeoutMetricsEnabled: - count, _ := c.evicted.Get(dm.DNS.Qname) - c.evicted.Add(dm.DNS.Qname, count+1) - c.topEvicted.Record(dm.DNS.Qname, count+1) - - case dm.DNS.Rcode == dnsutils.DNSRcodeServFail && c.prom.config.Loggers.Prometheus.ServfailMetricsEnabled: - count, _ := c.sfDomains.Get(dm.DNS.Qname) - c.sfDomains.Add(dm.DNS.Qname, count+1) - c.topSfDomains.Record(dm.DNS.Qname, count+1) - - case dm.DNS.Rcode == dnsutils.DNSRcodeNXDomain && c.prom.config.Loggers.Prometheus.NonExistentMetricsEnabled: - count, _ := c.nxDomains.Get(dm.DNS.Qname) - c.nxDomains.Add(dm.DNS.Qname, count+1) - c.topNxDomains.Record(dm.DNS.Qname, count+1) - - case dm.DNS.Rcode == dnsutils.DNSRcodeNoError && c.prom.config.Loggers.Prometheus.NoErrorMetricsEnabled: - count, _ := c.validDomains.Get(dm.DNS.Qname) - c.validDomains.Add(dm.DNS.Qname, count+1) - c.topValidDomains.Record(dm.DNS.Qname, count+1) - } - - // count and top tld - if dm.PublicSuffix != nil && dm.PublicSuffix.QnamePublicSuffix != "-" { - count, _ := c.tlds.Get(dm.PublicSuffix.QnamePublicSuffix) - c.tlds.Add(dm.PublicSuffix.QnamePublicSuffix, count+1) - c.topTlds.Record(dm.PublicSuffix.QnamePublicSuffix, count+1) - } - - // count TLD+1 if it is set - if dm.PublicSuffix != nil && dm.PublicSuffix.QnameEffectiveTLDPlusOne != "-" { - count, _ := c.etldplusone.Get(dm.PublicSuffix.QnameEffectiveTLDPlusOne) - c.etldplusone.Add(dm.PublicSuffix.QnameEffectiveTLDPlusOne, count+1) - c.topETLDPlusOne.Record(dm.PublicSuffix.QnameEffectiveTLDPlusOne, count+1) - } - - // suspicious domains - if dm.Suspicious != nil && dm.Suspicious.Score > 0.0 { - count, _ := c.suspicious.Get(dm.DNS.Qname) - c.suspicious.Add(dm.DNS.Qname, count+1) - c.topSuspicious.Record(dm.DNS.Qname, count+1) - } - - // compute histograms, no more enabled by default to avoid to hurt performance. - if c.prom.config.Loggers.Prometheus.HistogramMetricsEnabled { - c.prom.histogramQnamesLength.With(c.labels).Observe(float64(len(dm.DNS.Qname))) - - if dm.DNSTap.Latency > 0.0 { - c.prom.histogramLatencies.With(c.labels).Observe(dm.DNSTap.Latency) - } - - if dm.DNS.Type == dnsutils.DNSQuery { - c.prom.histogramQueriesLength.With(c.labels).Observe(float64(dm.DNS.Length)) - } else { - c.prom.histogramRepliesLength.With(c.labels).Observe(float64(dm.DNS.Length)) - } - } - - // Record EPS related data - c.epsCounters.TotalEvents++ - c.epsCounters.TotalBytes += dm.DNS.Length - c.epsCounters.TotalDNSMessages++ - - if _, exists := c.epsCounters.TotalIPVersion[dm.NetworkInfo.Family]; !exists { - c.epsCounters.TotalIPVersion[dm.NetworkInfo.Family] = 1 - } else { - c.epsCounters.TotalIPVersion[dm.NetworkInfo.Family]++ - } - - if _, exists := c.epsCounters.TotalIPProtocol[dm.NetworkInfo.Protocol]; !exists { - c.epsCounters.TotalIPProtocol[dm.NetworkInfo.Protocol] = 1 - } else { - c.epsCounters.TotalIPProtocol[dm.NetworkInfo.Protocol]++ - } - - if _, exists := c.epsCounters.TotalQtypes[dm.DNS.Qtype]; !exists { - c.epsCounters.TotalQtypes[dm.DNS.Qtype] = 1 - } else { - c.epsCounters.TotalQtypes[dm.DNS.Qtype]++ - } - - if _, exists := c.epsCounters.TotalRcodes[dm.DNS.Rcode]; !exists { - c.epsCounters.TotalRcodes[dm.DNS.Rcode] = 1 - } else { - c.epsCounters.TotalRcodes[dm.DNS.Rcode]++ - } - - if dm.DNS.Type == dnsutils.DNSQuery { - c.epsCounters.TotalBytesReceived += dm.DNS.Length - c.epsCounters.TotalQueries++ - } - if dm.DNS.Type == dnsutils.DNSReply { - c.epsCounters.TotalBytesSent += dm.DNS.Length - c.epsCounters.TotalReplies++ - } - - // flags - if dm.DNS.Flags.TC { - c.epsCounters.TotalTC++ - } - if dm.DNS.Flags.AA { - c.epsCounters.TotalAA++ - } - if dm.DNS.Flags.RA { - c.epsCounters.TotalRA++ - } - if dm.DNS.Flags.AD { - c.epsCounters.TotalAD++ - } - if dm.DNS.MalformedPacket { - c.epsCounters.TotalMalformed++ - } - if dm.NetworkInfo.IPDefragmented { - c.epsCounters.TotalFragmented++ - } - if dm.NetworkInfo.TCPReassembled { - c.epsCounters.TotalReasembled++ - } - -} - -func (c *PrometheusCountersSet) Collect(ch chan<- prometheus.Metric) { - c.Lock() - defer c.Unlock() - // Update number of all domains - ch <- prometheus.MustNewConstMetric(c.prom.gaugeDomainsAll, prometheus.GaugeValue, - float64(c.allDomains.Len()), - ) - // Update number of valid domains (noerror) - ch <- prometheus.MustNewConstMetric(c.prom.gaugeDomainsValid, prometheus.GaugeValue, - float64(c.validDomains.Len()), - ) - // Count NX domains - ch <- prometheus.MustNewConstMetric(c.prom.gaugeDomainsNx, prometheus.GaugeValue, - float64(c.nxDomains.Len()), - ) - // Count SERVFAIL domains - ch <- prometheus.MustNewConstMetric(c.prom.gaugeDomainsSf, prometheus.GaugeValue, - float64(c.sfDomains.Len()), - ) - // Requesters counter - ch <- prometheus.MustNewConstMetric(c.prom.gaugeRequesters, prometheus.GaugeValue, - float64(c.requesters.Len()), - ) - - // Count number of unique TLDs - ch <- prometheus.MustNewConstMetric(c.prom.gaugeTlds, prometheus.GaugeValue, - float64(c.tlds.Len()), - ) - - ch <- prometheus.MustNewConstMetric(c.prom.gaugeETldPlusOne, prometheus.GaugeValue, - float64(c.etldplusone.Len()), - ) - - // Count number of unique suspicious names - ch <- prometheus.MustNewConstMetric(c.prom.gaugeSuspicious, prometheus.GaugeValue, - float64(c.suspicious.Len()), - ) - - // Count number of unique unanswered (timedout) names - ch <- prometheus.MustNewConstMetric(c.prom.gaugeEvicted, prometheus.GaugeValue, - float64(c.evicted.Len()), - ) - - // Count for all top domains - for _, r := range c.topAllDomains.Get() { - ch <- prometheus.MustNewConstMetric(c.prom.gaugeTopDomains, prometheus.GaugeValue, - float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) - } - - for _, r := range c.topValidDomains.Get() { - ch <- prometheus.MustNewConstMetric(c.prom.gaugeTopNoerrDomains, prometheus.GaugeValue, - float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) - } - - for _, r := range c.topNxDomains.Get() { - ch <- prometheus.MustNewConstMetric(c.prom.gaugeTopNxDomains, prometheus.GaugeValue, - float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) - } - - for _, r := range c.topSfDomains.Get() { - ch <- prometheus.MustNewConstMetric(c.prom.gaugeTopSfDomains, prometheus.GaugeValue, - float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) - } - - for _, r := range c.topRequesters.Get() { - ch <- prometheus.MustNewConstMetric(c.prom.gaugeTopRequesters, prometheus.GaugeValue, - float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) - } - - for _, r := range c.topTlds.Get() { - ch <- prometheus.MustNewConstMetric(c.prom.gaugeTopTlds, prometheus.GaugeValue, - float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) - } - - for _, r := range c.topETLDPlusOne.Get() { - ch <- prometheus.MustNewConstMetric(c.prom.gaugeTopETldsPlusOne, prometheus.GaugeValue, - float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) - } - - for _, r := range c.topSuspicious.Get() { - ch <- prometheus.MustNewConstMetric(c.prom.gaugeTopSuspicious, prometheus.GaugeValue, - float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) - } - - for _, r := range c.topEvicted.Get() { - ch <- prometheus.MustNewConstMetric(c.prom.gaugeTopEvicted, prometheus.GaugeValue, - float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) - } - - ch <- prometheus.MustNewConstMetric(c.prom.gaugeEps, prometheus.GaugeValue, - float64(c.epsCounters.Eps), - ) - ch <- prometheus.MustNewConstMetric(c.prom.gaugeEpsMax, prometheus.GaugeValue, - float64(c.epsCounters.EpsMax), - ) - - // Update qtypes counter - for k, v := range c.epsCounters.TotalQtypes { - ch <- prometheus.MustNewConstMetric(c.prom.counterQtypes, prometheus.CounterValue, - v, k, - ) - } - - // Update Return Codes counter - for k, v := range c.epsCounters.TotalRcodes { - ch <- prometheus.MustNewConstMetric(c.prom.counterRcodes, prometheus.CounterValue, - v, k, - ) - } - - // Update IP protocol counter - for k, v := range c.epsCounters.TotalIPProtocol { - ch <- prometheus.MustNewConstMetric(c.prom.counterIPProtocol, prometheus.CounterValue, - v, k, - ) - } - - // Update IP version counter - for k, v := range c.epsCounters.TotalIPVersion { - ch <- prometheus.MustNewConstMetric(c.prom.counterIPVersion, prometheus.CounterValue, - v, k, - ) - } - - // Update global number of dns messages - ch <- prometheus.MustNewConstMetric(c.prom.counterDNSMessages, prometheus.CounterValue, - c.epsCounters.TotalDNSMessages) - - // Update number of dns queries - ch <- prometheus.MustNewConstMetric(c.prom.counterDNSQueries, prometheus.CounterValue, - float64(c.epsCounters.TotalQueries)) - - // Update number of dns replies - ch <- prometheus.MustNewConstMetric(c.prom.counterDNSReplies, prometheus.CounterValue, - float64(c.epsCounters.TotalReplies)) - - // Update flags - ch <- prometheus.MustNewConstMetric(c.prom.counterFlagsTC, prometheus.CounterValue, - c.epsCounters.TotalTC) - ch <- prometheus.MustNewConstMetric(c.prom.counterFlagsAA, prometheus.CounterValue, - c.epsCounters.TotalAA) - ch <- prometheus.MustNewConstMetric(c.prom.counterFlagsRA, prometheus.CounterValue, - c.epsCounters.TotalRA) - ch <- prometheus.MustNewConstMetric(c.prom.counterFlagsAD, prometheus.CounterValue, - c.epsCounters.TotalAD) - ch <- prometheus.MustNewConstMetric(c.prom.counterFlagsMalformed, prometheus.CounterValue, - c.epsCounters.TotalMalformed) - ch <- prometheus.MustNewConstMetric(c.prom.counterFlagsFragmented, prometheus.CounterValue, - c.epsCounters.TotalFragmented) - ch <- prometheus.MustNewConstMetric(c.prom.counterFlagsReassembled, prometheus.CounterValue, - c.epsCounters.TotalReasembled) - - ch <- prometheus.MustNewConstMetric(c.prom.totalBytes, - prometheus.CounterValue, float64(c.epsCounters.TotalBytes), - ) - ch <- prometheus.MustNewConstMetric(c.prom.totalReceivedBytes, prometheus.CounterValue, - float64(c.epsCounters.TotalBytesReceived), - ) - ch <- prometheus.MustNewConstMetric(c.prom.totalSentBytes, prometheus.CounterValue, - float64(c.epsCounters.TotalBytesSent)) - -} - -func (c *PrometheusCountersSet) ComputeEventsPerSecond() { - c.Lock() - defer c.Unlock() - if c.epsCounters.TotalEvents > 0 && c.epsCounters.TotalEventsPrev > 0 { - c.epsCounters.Eps = c.epsCounters.TotalEvents - c.epsCounters.TotalEventsPrev - } - c.epsCounters.TotalEventsPrev = c.epsCounters.TotalEvents - if c.epsCounters.Eps > c.epsCounters.EpsMax { - c.epsCounters.EpsMax = c.epsCounters.Eps - } -} - -func NewPromCounterCatalogueContainer(p *Prometheus, selLabels []string, l map[string]string) *PromCounterCatalogueContainer { - if len(selLabels) == 0 { - panic("Cannot create a new PromCounterCatalogueContainer with empty list of selLabels") - } - sel, ok := catalogueSelectors[selLabels[0]] - if !ok { - panic(fmt.Sprintf("No selector for %v label", selLabels[0])) - } - - // copy all the data over, to make sure this container does not share memory with other containers - r := &PromCounterCatalogueContainer{ - prom: p, - stats: make(map[string]PrometheusCountersCatalogue), - selector: sel, - labelNames: make([]string, len(selLabels)), - labels: make(map[string]string), - } - for k, v := range l { - r.labels[k] = v - } - copy(r.labelNames, selLabels) - return r -} - -// Returns a slice of all PrometheusCountersSet in a Container -func (c *PromCounterCatalogueContainer) GetAllCounterSets() []*PrometheusCountersSet { - ret := []*PrometheusCountersSet{} - c.RLock() - for _, v := range c.stats { - switch elem := v.(type) { - case *PrometheusCountersSet: - ret = append(ret, elem) - case *PromCounterCatalogueContainer: - ret = append(ret, elem.GetAllCounterSets()...) - default: - panic(fmt.Sprintf("Unexpected element in PromCounterCatalogueContainer of %T: %v", v, v)) - } - } - c.RUnlock() - return ret -} - -// Searches for an existing element for a label value, creating one if not found -func (c *PromCounterCatalogueContainer) GetCountersSet(dm *dnsutils.DNSMessage) PrometheusCountersCatalogue { - if c.selector == nil { - panic(fmt.Sprintf("%v: nil selector", c)) - } - - // c.selector fetches the value for the label *this* Catalogue Element considers. - // Check if we alreday have item for it, and return it if we do (it is either catalogue or counter set) - lbl := c.selector(dm) - c.Lock() - defer c.Unlock() - if r, ok := c.stats[lbl]; ok { - return r.GetCountersSet(dm) - } - - // there is no existing element in the catalogue. We need to create a new entry. - // Entry may be a new Catalogue, or PrometheusCounterSet. - // If selector_labels consists of single element, we need to create a PrometheusCounterSet. - // Otherwise, there is another layer of labels. - var newElem PrometheusCountersCatalogue - // Prepare labels for the new element (needed for ether CatalogueContainer and CounterSet) - newLables := map[string]string{ - c.labelNames[0]: lbl, - } - for k, v := range c.labels { - newLables[k] = v - } - if len(c.labelNames) > 1 { - newElem = NewPromCounterCatalogueContainer( - c.prom, - c.labelNames[1:], - newLables, // Here we'll do an extra map copy... - ) - } else { - newElem = newPrometheusCounterSet( - c.prom, - prometheus.Labels(newLables), - ) - - } - c.stats[lbl] = newElem - - // GetCountersSet of the newly created element may take some time, and we will be holding the lock - // of the current Container until it is done. This may be improved if we separate c.stats[lbl] - // update and calling GetCountersSet on the new element. - return c.stats[lbl].GetCountersSet(dm) -} - -// This function checks the configuration, to determine which label dimensions were requested -// by configuration, and returns correct implementation of Catalogue. -func CreateSystemCatalogue(o *Prometheus) ([]string, *PromCounterCatalogueContainer) { - lbls := o.config.Loggers.Prometheus.LabelsList - - // Default configuration is label with stream_id, to keep us backward compatible - if len(lbls) == 0 { - lbls = []string{"stream_id"} - } - return lbls, NewPromCounterCatalogueContainer( - o, - lbls, - make(map[string]string), - ) -} - -func NewPrometheus(config *pkgconfig.Config, logger *logger.Logger, name string) *Prometheus { - logger.Info(pkgutils.PrefixLogLogger+"[%s] prometheus - enabled", name) - o := &Prometheus{ - doneAPI: make(chan bool), - stopProcess: make(chan bool), - doneProcess: make(chan bool), - stopRun: make(chan bool), - doneRun: make(chan bool), - config: config, - configChan: make(chan *pkgconfig.Config), - inputChan: make(chan dnsutils.DNSMessage, config.Loggers.Prometheus.ChannelBufferSize), - outputChan: make(chan dnsutils.DNSMessage, config.Loggers.Prometheus.ChannelBufferSize), - logger: logger, - promRegistry: prometheus.NewPedanticRegistry(), - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - } - - // This will create a catalogue of counters indexed by fileds requested by config - o.catalogueLabels, o.counters = CreateSystemCatalogue(o) - - // init prometheus - o.InitProm() - - // midleware to add basic authentication - authMiddleware := func(handler http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - username, password, ok := r.BasicAuth() - if !ok || username != o.config.Loggers.Prometheus.BasicAuthLogin || password != o.config.Loggers.Prometheus.BasicAuthPwd { - w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) - w.WriteHeader(http.StatusUnauthorized) - fmt.Fprintf(w, "Unauthorized\n") - return - } - - handler.ServeHTTP(w, r) - }) - } - - mux := http.NewServeMux() - mux.Handle("/metrics", promhttp.HandlerFor(o.promRegistry, promhttp.HandlerOpts{})) - - handler := authMiddleware(mux) - - o.httpServer = &http.Server{} - if o.config.Loggers.Prometheus.BasicAuthEnabled { - o.httpServer.Handler = handler - } else { - o.httpServer.Handler = mux - } - - o.httpServer.ErrorLog = o.logger.ErrorLogger() - return o -} - -func (c *Prometheus) GetName() string { return c.name } - -func (c *Prometheus) AddDroppedRoute(wrk pkgutils.Worker) { - c.RoutingHandler.AddDroppedRoute(wrk) -} - -func (c *Prometheus) AddDefaultRoute(wrk pkgutils.Worker) { - c.RoutingHandler.AddDefaultRoute(wrk) -} - -func (c *Prometheus) SetLoggers(loggers []pkgutils.Worker) {} - -func (c *Prometheus) InitProm() { - - promPrefix := SanitizeMetricName(c.config.Loggers.Prometheus.PromPrefix) - - // register metric about current version information. - c.promRegistry.MustRegister(version.NewCollector(promPrefix)) - - // export Go runtime metrics - c.promRegistry.MustRegister( - collectors.NewGoCollector(collectors.WithGoCollectorMemStatsMetricsDisabled()), - collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), - ) - // also try collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), - - // Metric description created in Prometheus object, but used in Describe method of PrometheusCounterSet - // Prometheus class itself reports signle metric - BuildInfo. - c.gaugeTopDomains = prometheus.NewDesc( - fmt.Sprintf("%s_top_domains", promPrefix), - "Number of hit per domain topN, partitioned by qname", - []string{"domain"}, nil, - ) - - c.gaugeTopNoerrDomains = prometheus.NewDesc( - fmt.Sprintf("%s_top_noerror_domains", promPrefix), - "Number of hit per domain topN, partitioned by qname", - []string{"domain"}, nil, - ) - - c.gaugeTopNxDomains = prometheus.NewDesc( - fmt.Sprintf("%s_top_nonexistent_domains", promPrefix), - "Number of hit per nx domain topN, partitioned by qname", - []string{"domain"}, nil, - ) - - c.gaugeTopSfDomains = prometheus.NewDesc( - fmt.Sprintf("%s_top_servfail_domains", promPrefix), - "Number of hit per servfail domain topN, partitioned by stream and qname", - []string{"domain"}, nil, - ) - - c.gaugeTopRequesters = prometheus.NewDesc( - fmt.Sprintf("%s_top_requesters", promPrefix), - "Number of hit per requester topN, partitioned by client IP", - []string{"ip"}, nil, - ) - - c.gaugeTopTlds = prometheus.NewDesc( - fmt.Sprintf("%s_top_tlds", promPrefix), - "Number of hit per tld - topN", - []string{"suffix"}, nil, - ) - // etldplusone_top_total - c.gaugeTopETldsPlusOne = prometheus.NewDesc( - fmt.Sprintf("%s_top_etlds_plusone", promPrefix), - "Number of hit per eTLD+1 - topN", - []string{"suffix"}, nil, - ) - - c.gaugeTopSuspicious = prometheus.NewDesc( - fmt.Sprintf("%s_top_suspicious", promPrefix), - "Number of hit per suspicious domain - topN", - []string{"domain"}, nil, - ) - - c.gaugeTopEvicted = prometheus.NewDesc( - fmt.Sprintf("%s_top_unanswered", promPrefix), - "Number of hit per unanswered domain - topN", - []string{"domain"}, nil, - ) - - c.gaugeEps = prometheus.NewDesc( - fmt.Sprintf("%s_throughput_ops", promPrefix), - "Number of ops per second received, partitioned by stream", - nil, nil, - ) - - c.gaugeEpsMax = prometheus.NewDesc( - fmt.Sprintf("%s_throughput_ops_max", promPrefix), - "Max number of ops per second observed, partitioned by stream", - nil, nil, - ) - - // Counter metrics - c.gaugeDomainsAll = prometheus.NewDesc( - fmt.Sprintf("%s_total_domains_lru", promPrefix), - "Total number of uniq domains most recently observed per stream identity ", - nil, nil, - ) - - c.gaugeDomainsValid = prometheus.NewDesc( - fmt.Sprintf("%s_total_noerror_domains_lru", promPrefix), - "Total number of NOERROR domains most recently observed per stream identity ", - nil, nil, - ) - - c.gaugeDomainsNx = prometheus.NewDesc( - fmt.Sprintf("%s_total_nonexistent_domains_lru", promPrefix), - "Total number of NX domains most recently observed per stream identity", - nil, nil, - ) - - c.gaugeDomainsSf = prometheus.NewDesc( - fmt.Sprintf("%s_total_servfail_domains_lru", promPrefix), - "Total number of SERVFAIL domains most recently observed per stream identity", - nil, nil, - ) - - c.gaugeRequesters = prometheus.NewDesc( - fmt.Sprintf("%s_total_requesters_lru", promPrefix), - "Total number of DNS clients most recently observed per stream identity.", - nil, nil, - ) - - c.gaugeTlds = prometheus.NewDesc( - fmt.Sprintf("%s_total_tlds_lru", promPrefix), - "Total number of tld most recently observed per stream identity", - nil, nil, - ) - - c.gaugeETldPlusOne = prometheus.NewDesc( - fmt.Sprintf("%s_total_etlds_plusone_lru", promPrefix), - "Total number of etld+one most recently observed per stream identity", - nil, nil, - ) - - c.gaugeSuspicious = prometheus.NewDesc( - fmt.Sprintf("%s_total_suspicious_lru", promPrefix), - "Total number of suspicious domains most recently observed per stream identity", - nil, nil, - ) - - c.gaugeEvicted = prometheus.NewDesc( - fmt.Sprintf("%s_total_unanswered_lru", promPrefix), - "Total number of unanswered domains most recently observed per stream identity", - nil, nil, - ) - - c.counterQtypes = prometheus.NewDesc( - fmt.Sprintf("%s_qtypes_total", promPrefix), - "Counter of queries per qtypes", - []string{"query_type"}, nil, - ) - - c.counterRcodes = prometheus.NewDesc( - fmt.Sprintf("%s_rcodes_total", promPrefix), - "Counter of replies per return codes", - []string{"return_code"}, nil, - ) - - c.counterIPProtocol = prometheus.NewDesc( - fmt.Sprintf("%s_ipprotocol_total", promPrefix), - "Counter of packets per IP protocol", - []string{"net_transport"}, nil, - ) - - c.counterIPVersion = prometheus.NewDesc( - fmt.Sprintf("%s_ipversion_total", promPrefix), - "Counter of packets per IP version", - []string{"net_family"}, nil, - ) - - c.counterDNSMessages = prometheus.NewDesc( - fmt.Sprintf("%s_dnsmessages_total", promPrefix), - "Counter of DNS messages per stream", - nil, nil, - ) - - c.counterDNSQueries = prometheus.NewDesc( - fmt.Sprintf("%s_queries_total", promPrefix), - "Counter of DNS queries per stream", - nil, nil, - ) - - c.counterDNSReplies = prometheus.NewDesc( - fmt.Sprintf("%s_replies_total", promPrefix), - "Counter of DNS replies per stream", - nil, nil, - ) - - c.counterFlagsTC = prometheus.NewDesc( - fmt.Sprintf("%s_flag_tc_total", promPrefix), - "Number of packet with flag TC", - nil, nil, - ) - - c.counterFlagsAA = prometheus.NewDesc( - fmt.Sprintf("%s_flag_aa_total", promPrefix), - "Number of packet with flag AA", - nil, nil, - ) - - c.counterFlagsRA = prometheus.NewDesc( - fmt.Sprintf("%s_flag_ra_total", promPrefix), - "Number of packet with flag RA", - nil, nil, - ) - - c.counterFlagsAD = prometheus.NewDesc( - fmt.Sprintf("%s_flag_ad_total", promPrefix), - "Number of packet with flag AD", - nil, nil, - ) - - c.counterFlagsMalformed = prometheus.NewDesc( - fmt.Sprintf("%s_malformed_total", promPrefix), - "Number of malformed packets", - nil, nil, - ) - - c.counterFlagsFragmented = prometheus.NewDesc( - fmt.Sprintf("%s_fragmented_total", promPrefix), - "Number of IP fragmented packets", - nil, nil, - ) - - c.counterFlagsReassembled = prometheus.NewDesc( - fmt.Sprintf("%s_reassembled_total", promPrefix), - "Number of TCP reassembled packets", - nil, nil, - ) - - c.totalBytes = prometheus.NewDesc( - fmt.Sprintf("%s_bytes_total", promPrefix), - "The total bytes received and sent", - nil, nil, - ) - - c.totalReceivedBytes = prometheus.NewDesc( - fmt.Sprintf("%s_received_bytes_total", promPrefix), - "The total bytes received", - nil, nil, - ) - - c.totalSentBytes = prometheus.NewDesc( - fmt.Sprintf("%s_sent_bytes_total", promPrefix), - "The total bytes sent", - nil, nil, - ) - - c.histogramQueriesLength = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: fmt.Sprintf("%s_queries_size_bytes", promPrefix), - Help: "Size of the queries in bytes.", - Buckets: []float64{50, 100, 250, 500}, - }, - c.catalogueLabels, - ) - c.promRegistry.MustRegister(c.histogramQueriesLength) - - c.histogramRepliesLength = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: fmt.Sprintf("%s_replies_size_bytes", promPrefix), - Help: "Size of the replies in bytes.", - Buckets: []float64{50, 100, 250, 500}, - }, - c.catalogueLabels, - ) - c.promRegistry.MustRegister(c.histogramRepliesLength) - - c.histogramQnamesLength = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: fmt.Sprintf("%s_qnames_size_bytes", promPrefix), - Help: "Size of the qname in bytes.", - Buckets: []float64{10, 20, 40, 60, 100}, - }, - c.catalogueLabels, - ) - c.promRegistry.MustRegister(c.histogramQnamesLength) - - c.histogramLatencies = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: fmt.Sprintf("%s_latencies", promPrefix), - Help: "Latency between query and reply", - Buckets: []float64{0.001, 0.010, 0.050, 0.100, 0.5, 1.0}, - }, - c.catalogueLabels, - ) - c.promRegistry.MustRegister(c.histogramLatencies) -} - -func (c *Prometheus) ReadConfig() { - if !pkgconfig.IsValidTLS(c.config.Loggers.Prometheus.TLSMinVersion) { - c.logger.Fatal(pkgutils.PrefixLogLogger + "[" + c.name + "] prometheus - invalid tls min version") - } -} - -func (c *Prometheus) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration!") - c.configChan <- config -} - -func (c *Prometheus) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogLogger+"["+c.name+"] prometheus - "+msg, v...) -} - -func (c *Prometheus) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogLogger+"["+c.name+"] prometheus - "+msg, v...) -} - -func (c *Prometheus) GetInputChannel() chan dnsutils.DNSMessage { - return c.inputChan -} - -func (c *Prometheus) Stop() { - c.LogInfo("stopping logger...") - c.RoutingHandler.Stop() - - c.LogInfo("stopping to run...") - c.stopRun <- true - <-c.doneRun - - c.LogInfo("stopping to process...") - c.stopProcess <- true - <-c.doneProcess - - c.LogInfo("stopping http server...") - c.netListener.Close() - <-c.doneAPI -} - -func (c *Prometheus) Record(dm dnsutils.DNSMessage) { - // record stream identity - c.Lock() - - // count number of dns messages per network family (ipv4 or v6) - v := c.counters.GetCountersSet(&dm) - counterSet, ok := v.(*PrometheusCountersSet) - c.Unlock() - if !ok { - c.LogError(fmt.Sprintf("GetCountersSet returned an invalid value of %T, expected *PrometheusCountersSet", v)) - } else { - counterSet.Record(dm) - } - -} - -func (c *Prometheus) ComputeEventsPerSecond() { - // for each stream compute the number of events per second - c.Lock() - defer c.Unlock() - for _, cntrSet := range c.counters.GetAllCounterSets() { - cntrSet.ComputeEventsPerSecond() - } -} - -func (c *Prometheus) ListenAndServe() { - c.LogInfo("starting http server...") - - var err error - var listener net.Listener - addrlisten := c.config.Loggers.Prometheus.ListenIP + ":" + strconv.Itoa(c.config.Loggers.Prometheus.ListenPort) - // listening with tls enabled ? - if c.config.Loggers.Prometheus.TLSSupport { - c.LogInfo("tls support enabled") - var cer tls.Certificate - cer, err = tls.LoadX509KeyPair(c.config.Loggers.Prometheus.CertFile, c.config.Loggers.Prometheus.KeyFile) - if err != nil { - c.logger.Fatal("loading certificate failed:", err) - } - - // prepare tls configuration - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{cer}, - MinVersion: tls.VersionTLS12, - } - - // update tls min version according to the user config - tlsConfig.MinVersion = pkgconfig.TLSVersion[c.config.Loggers.Prometheus.TLSMinVersion] - - if c.config.Loggers.Prometheus.TLSMutual { - - // Create a CA certificate pool and add cert.pem to it - var caCert []byte - caCert, err = os.ReadFile(c.config.Loggers.Prometheus.CertFile) - if err != nil { - c.logger.Fatal(err) - } - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(caCert) - - tlsConfig.ClientCAs = caCertPool - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - } - - listener, err = tls.Listen(netlib.SocketTCP, addrlisten, tlsConfig) - - } else { - // basic listening - listener, err = net.Listen(netlib.SocketTCP, addrlisten) - } - - // something wrong ? - if err != nil { - c.logger.Fatal("http server listening failed:", err) - } - - c.netListener = listener - c.LogInfo("is listening on %s", listener.Addr()) - - c.httpServer.Serve(c.netListener) - - c.LogInfo("http server terminated") - c.doneAPI <- true -} - -func (c *Prometheus) Run() { - c.LogInfo("running in background...") - - // prepare next channels - defaultRoutes, defaultNames := c.RoutingHandler.GetDefaultRoutes() - droppedRoutes, droppedNames := c.RoutingHandler.GetDroppedRoutes() - - // prepare transforms - listChannel := []chan dnsutils.DNSMessage{} - listChannel = append(listChannel, c.outputChan) - subprocessors := transformers.NewTransforms(&c.config.OutgoingTransformers, c.logger, c.name, listChannel, 0) - - // start http server - go c.ListenAndServe() - - // goroutine to process transformed dns messages - go c.Process() - - // loop to process incoming messages -RUN_LOOP: - for { - select { - case <-c.stopRun: - // cleanup transformers - subprocessors.Reset() - c.doneRun <- true - break RUN_LOOP - - case cfg, opened := <-c.configChan: - if !opened { - return - } - c.config = cfg - c.ReadConfig() - subprocessors.ReloadConfig(&cfg.OutgoingTransformers) - - case dm, opened := <-c.inputChan: - if !opened { - c.LogInfo("input channel closed!") - return - } - - // apply tranforms, init dns message with additionnals parts if necessary - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - c.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - continue - } - - // send to next ? - c.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - - // send to output channel - c.outputChan <- dm - } - } - c.LogInfo("run terminated") -} - -func (c *Prometheus) Process() { - // init timer to compute qps - t1Interval := 1 * time.Second - t1 := time.NewTimer(t1Interval) - - c.LogInfo("ready to process") -PROCESS_LOOP: - for { - select { - case <-c.stopProcess: - c.doneProcess <- true - break PROCESS_LOOP - case dm, opened := <-c.outputChan: - if !opened { - c.LogInfo("output channel closed!") - return - } - - // record the dnstap message - c.Record(dm) - - case <-t1.C: - // compute eps each second - c.ComputeEventsPerSecond() - - // reset the timer - t1.Reset(t1Interval) - } - } - c.LogInfo("processing terminated") -} - -/* -This is an implementation of variadic dimensions map of label values. -Having nested structure offers the fastest operations, compared to super-flexibile approach that prom client -uses with arbitrary set of labels. - -Label values are obtained by the means of 'selectors' - functions that fetch a specific field of a DNS Message -offering fast operations. - -Example of conterSet/Container for 2 labels - -+----------------------------------------------------------------------------------------------------------+ -| Container for label1 | -| Container maps different values of label1 to other containers | -| until the chain for all required label names is built. | -| | -| Label1 values: | -| value11 value12 | -| +---------------------------------------------------------------------------+ +-------------------------+| -| | Container for label2 | | Container for label2 || -| | in this container ALL elements | | all elemenens share || -| | have the same value for label1 | | the same value of label1|| -| | | | || -| | Label2 values: | | +----------++----------+|| -| | value21 value22 | | | .... || ,,,,,, ||| -| | +-----------------------------------++-----------------------------------+| | | || ||| -| | | CounterSet || CounterSet || | | || ||| -| | | In this set all metrics share the || In this set all metrics share the || | +----------++----------+|| -| | | same values for both labels, so || same values for both labels, so || | || -| | | no need to keep label values here || no need to keep label values here || | || -| | | || || | || -| | | metric1 || metric1 || | || -| | | metric2 || metric2 || | || -| | +-----------------------------------++-----------------------------------+| | || -| +---------------------------------------------------------------------------+ +-------------------------+| - -*/ diff --git a/loggers/redispub.go b/loggers/redispub.go deleted file mode 100644 index 391d14dd..00000000 --- a/loggers/redispub.go +++ /dev/null @@ -1,405 +0,0 @@ -package loggers - -import ( - "bufio" - "bytes" - "crypto/tls" - "encoding/json" - "errors" - "io" - "net" - "strconv" - "strings" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" -) - -type RedisPub struct { - stopProcess chan bool - doneProcess chan bool - stopRun chan bool - doneRun chan bool - stopRead chan bool - doneRead chan bool - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - textFormat []string - name string - transport string - transportWriter *bufio.Writer - transportConn net.Conn - transportReady chan bool - transportReconnect chan bool - writerReady bool - RoutingHandler pkgutils.RoutingHandler -} - -func NewRedisPub(config *pkgconfig.Config, logger *logger.Logger, name string) *RedisPub { - logger.Info(pkgutils.PrefixLogLogger+"[%s] redispub - enabled", name) - s := &RedisPub{ - stopProcess: make(chan bool), - doneProcess: make(chan bool), - stopRun: make(chan bool), - doneRun: make(chan bool), - stopRead: make(chan bool), - doneRead: make(chan bool), - inputChan: make(chan dnsutils.DNSMessage, config.Loggers.RedisPub.ChannelBufferSize), - outputChan: make(chan dnsutils.DNSMessage, config.Loggers.RedisPub.ChannelBufferSize), - transportReady: make(chan bool), - transportReconnect: make(chan bool), - logger: logger, - config: config, - configChan: make(chan *pkgconfig.Config), - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - } - - s.ReadConfig() - - return s -} - -func (c *RedisPub) GetName() string { return c.name } - -func (c *RedisPub) AddDroppedRoute(wrk pkgutils.Worker) { - c.RoutingHandler.AddDroppedRoute(wrk) -} - -func (c *RedisPub) AddDefaultRoute(wrk pkgutils.Worker) { - c.RoutingHandler.AddDefaultRoute(wrk) -} - -func (c *RedisPub) SetLoggers(loggers []pkgutils.Worker) {} - -func (c *RedisPub) ReadConfig() { - - c.transport = c.config.Loggers.RedisPub.Transport - - // begin backward compatibility - if c.config.Loggers.RedisPub.TLSSupport { - c.transport = netlib.SocketTLS - } - if len(c.config.Loggers.RedisPub.SockPath) > 0 { - c.transport = netlib.SocketUnix - } - // end - - if len(c.config.Loggers.RedisPub.TextFormat) > 0 { - c.textFormat = strings.Fields(c.config.Loggers.RedisPub.TextFormat) - } else { - c.textFormat = strings.Fields(c.config.Global.TextFormat) - } -} - -func (c *RedisPub) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration!") - c.configChan <- config -} - -func (c *RedisPub) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogLogger+"["+c.name+"] redispub - "+msg, v...) -} - -func (c *RedisPub) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogLogger+"["+c.name+"] redispub - "+msg, v...) -} - -func (c *RedisPub) GetInputChannel() chan dnsutils.DNSMessage { - return c.inputChan -} - -func (c *RedisPub) Stop() { - c.LogInfo("stopping logger...") - c.RoutingHandler.Stop() - - c.LogInfo("stopping to run...") - c.stopRun <- true - <-c.doneRun - - c.LogInfo("stopping to receive...") - c.stopRead <- true - <-c.doneRead - - c.LogInfo("stopping to process...") - c.stopProcess <- true - <-c.doneProcess -} - -func (c *RedisPub) Disconnect() { - if c.transportConn != nil { - c.LogInfo("closing redispub connection") - c.transportConn.Close() - } -} - -func (c *RedisPub) ReadFromConnection() { - buffer := make([]byte, 4096) - - go func() { - for { - _, err := c.transportConn.Read(buffer) - if err != nil { - if errors.Is(err, io.EOF) || errors.Is(err, net.ErrClosed) { - c.LogInfo("read from connection terminated") - break - } - c.LogError("Error on reading: %s", err.Error()) - } - // We just discard the data - } - }() - - // block goroutine until receive true event in stopRead channel - <-c.stopRead - c.doneRead <- true - - c.LogInfo("read goroutine terminated") -} - -func (c *RedisPub) ConnectToRemote() { - for { - if c.transportConn != nil { - c.transportConn.Close() - c.transportConn = nil - } - - address := c.config.Loggers.RedisPub.RemoteAddress + ":" + strconv.Itoa(c.config.Loggers.RedisPub.RemotePort) - connTimeout := time.Duration(c.config.Loggers.RedisPub.ConnectTimeout) * time.Second - - var conn net.Conn - var err error - - switch c.transport { - case netlib.SocketUnix: - address = c.config.Loggers.RedisPub.RemoteAddress - if len(c.config.Loggers.RedisPub.SockPath) > 0 { - address = c.config.Loggers.RedisPub.SockPath - } - c.LogInfo("connecting to %s://%s", c.transport, address) - conn, err = net.DialTimeout(c.transport, address, connTimeout) - - case netlib.SocketTCP: - c.LogInfo("connecting to %s://%s", c.transport, address) - conn, err = net.DialTimeout(c.transport, address, connTimeout) - - case netlib.SocketTLS: - c.LogInfo("connecting to %s://%s", c.transport, address) - - var tlsConfig *tls.Config - - tlsOptions := pkgconfig.TLSOptions{ - InsecureSkipVerify: c.config.Loggers.RedisPub.TLSInsecure, - MinVersion: c.config.Loggers.RedisPub.TLSMinVersion, - CAFile: c.config.Loggers.RedisPub.CAFile, - CertFile: c.config.Loggers.RedisPub.CertFile, - KeyFile: c.config.Loggers.RedisPub.KeyFile, - } - - tlsConfig, err = pkgconfig.TLSClientConfig(tlsOptions) - if err == nil { - dialer := &net.Dialer{Timeout: connTimeout} - conn, err = tls.DialWithDialer(dialer, netlib.SocketTCP, address, tlsConfig) - } - - default: - c.logger.Fatal("logger=redispub - invalid transport:", c.transport) - } - - // something is wrong during connection ? - if err != nil { - c.LogError("%s", err) - c.LogInfo("retry to connect in %d seconds", c.config.Loggers.RedisPub.RetryInterval) - time.Sleep(time.Duration(c.config.Loggers.RedisPub.RetryInterval) * time.Second) - continue - } - - c.transportConn = conn - - // block until framestream is ready - c.transportReady <- true - - // block until an error occurred, need to reconnect - c.transportReconnect <- true - } -} - -func (c *RedisPub) FlushBuffer(buf *[]dnsutils.DNSMessage) { - // create escaping buffer - escapeBuffer := new(bytes.Buffer) - // create a new encoder that writes to the buffer - encoder := json.NewEncoder(escapeBuffer) - - for _, dm := range *buf { - escapeBuffer.Reset() - - cmd := "PUBLISH " + strconv.Quote(c.config.Loggers.RedisPub.RedisChannel) + " " - c.transportWriter.WriteString(cmd) - - if c.config.Loggers.RedisPub.Mode == pkgconfig.ModeText { - c.transportWriter.WriteString(strconv.Quote(dm.String(c.textFormat, c.config.Global.TextFormatDelimiter, c.config.Global.TextFormatBoundary))) - c.transportWriter.WriteString(c.config.Loggers.RedisPub.PayloadDelimiter) - } - - if c.config.Loggers.RedisPub.Mode == pkgconfig.ModeJSON { - encoder.Encode(dm) - c.transportWriter.WriteString(strconv.Quote(escapeBuffer.String())) - c.transportWriter.WriteString(c.config.Loggers.RedisPub.PayloadDelimiter) - } - - if c.config.Loggers.RedisPub.Mode == pkgconfig.ModeFlatJSON { - flat, err := dm.Flatten() - if err != nil { - c.LogError("flattening DNS message failed: %e", err) - continue - } - encoder.Encode(flat) - c.transportWriter.WriteString(strconv.Quote(escapeBuffer.String())) - c.transportWriter.WriteString(c.config.Loggers.RedisPub.PayloadDelimiter) - } - - // flush the transport buffer - err := c.transportWriter.Flush() - if err != nil { - c.LogError("send frame error", err.Error()) - c.writerReady = false - <-c.transportReconnect - break - } - } - - // reset buffer - *buf = nil -} - -func (c *RedisPub) Run() { - c.LogInfo("running in background...") - - // prepare next channels - defaultRoutes, defaultNames := c.RoutingHandler.GetDefaultRoutes() - droppedRoutes, droppedNames := c.RoutingHandler.GetDroppedRoutes() - - // prepare transforms - listChannel := []chan dnsutils.DNSMessage{} - listChannel = append(listChannel, c.outputChan) - subprocessors := transformers.NewTransforms(&c.config.OutgoingTransformers, c.logger, c.name, listChannel, 0) - - // goroutine to process transformed dns messages - go c.Process() - - // loop to process incoming messages -RUN_LOOP: - for { - select { - case <-c.stopRun: - // cleanup transformers - subprocessors.Reset() - - c.doneRun <- true - break RUN_LOOP - - case cfg, opened := <-c.configChan: - if !opened { - return - } - c.config = cfg - c.ReadConfig() - subprocessors.ReloadConfig(&cfg.OutgoingTransformers) - - case dm, opened := <-c.inputChan: - if !opened { - c.LogInfo("input channel closed!") - return - } - - // apply tranforms, init dns message with additionnals parts if necessary - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - c.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - continue - } - - // send to next ? - c.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - - // send to output channel - c.outputChan <- dm - } - } - c.LogInfo("run terminated") -} - -func (c *RedisPub) Process() { - // init buffer - bufferDm := []dnsutils.DNSMessage{} - - // init flust timer for buffer - flushInterval := time.Duration(c.config.Loggers.RedisPub.FlushInterval) * time.Second - flushTimer := time.NewTimer(flushInterval) - - // init remote conn - go c.ConnectToRemote() - - c.LogInfo("ready to process") -PROCESS_LOOP: - for { - select { - case <-c.stopProcess: - // closing remote connection if exist - c.Disconnect() - c.doneProcess <- true - break PROCESS_LOOP - - case <-c.transportReady: - c.LogInfo("transport connected with success") - c.transportWriter = bufio.NewWriter(c.transportConn) - c.writerReady = true - // read from the connection until we stop - go c.ReadFromConnection() - - // incoming dns message to process - case dm, opened := <-c.outputChan: - if !opened { - c.LogInfo("output channel closed!") - return - } - - // drop dns message if the connection is not ready to avoid memory leak or - // to block the channel - if !c.writerReady { - continue - } - - // append dns message to buffer - bufferDm = append(bufferDm, dm) - - // buffer is full ? - if len(bufferDm) >= c.config.Loggers.RedisPub.BufferSize { - c.FlushBuffer(&bufferDm) - } - - // flush the buffer - case <-flushTimer.C: - if !c.writerReady { - bufferDm = nil - } - - if len(bufferDm) > 0 { - c.FlushBuffer(&bufferDm) - } - - // restart timer - flushTimer.Reset(flushInterval) - - } - } - c.LogInfo("processing terminated") -} diff --git a/loggers/restapi.go b/loggers/restapi.go deleted file mode 100644 index d38d883d..00000000 --- a/loggers/restapi.go +++ /dev/null @@ -1,791 +0,0 @@ -package loggers - -import ( - "crypto/tls" - "encoding/json" - "net" - "net/http" - "strconv" - "sync" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" - "github.com/dmachard/go-topmap" -) - -type HitsRecord struct { - TotalHits int `json:"total-hits"` - Hits map[string]int `json:"hits"` -} - -type SearchBy struct { - Clients map[string]*HitsRecord - Domains map[string]*HitsRecord -} - -type HitsStream struct { - Streams map[string]SearchBy -} - -type HitsUniq struct { - Clients map[string]int - Domains map[string]int - NxDomains map[string]int - SfDomains map[string]int - PublicSuffixes map[string]int - Suspicious map[string]*dnsutils.TransformSuspicious -} - -type KeyHit struct { - Key string `json:"key"` - Hit int `json:"hit"` -} - -type RestAPI struct { - doneAPI chan bool - stopProcess chan bool - doneProcess chan bool - stopRun chan bool - doneRun chan bool - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - httpserver net.Listener - httpmux *http.ServeMux - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - name string - RoutingHandler pkgutils.RoutingHandler - - HitsStream HitsStream - HitsUniq HitsUniq - - Streams map[string]int `json:"streams"` - - TopQnames *topmap.TopMap - TopClients *topmap.TopMap - TopTLDs *topmap.TopMap - TopNonExistent *topmap.TopMap - TopServFail *topmap.TopMap - - sync.RWMutex -} - -func NewRestAPI(config *pkgconfig.Config, logger *logger.Logger, name string) *RestAPI { - logger.Info(pkgutils.PrefixLogLogger+"[%s] restapi - enabled", name) - o := &RestAPI{ - doneAPI: make(chan bool), - stopProcess: make(chan bool), - doneProcess: make(chan bool), - stopRun: make(chan bool), - doneRun: make(chan bool), - config: config, - configChan: make(chan *pkgconfig.Config), - inputChan: make(chan dnsutils.DNSMessage, config.Loggers.RestAPI.ChannelBufferSize), - outputChan: make(chan dnsutils.DNSMessage, config.Loggers.RestAPI.ChannelBufferSize), - logger: logger, - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - - HitsStream: HitsStream{ - Streams: make(map[string]SearchBy), - }, - HitsUniq: HitsUniq{ - Clients: make(map[string]int), - Domains: make(map[string]int), - NxDomains: make(map[string]int), - SfDomains: make(map[string]int), - PublicSuffixes: make(map[string]int), - Suspicious: make(map[string]*dnsutils.TransformSuspicious), - }, - - Streams: make(map[string]int), - - TopQnames: topmap.NewTopMap(config.Loggers.RestAPI.TopN), - TopClients: topmap.NewTopMap(config.Loggers.RestAPI.TopN), - TopTLDs: topmap.NewTopMap(config.Loggers.RestAPI.TopN), - TopNonExistent: topmap.NewTopMap(config.Loggers.RestAPI.TopN), - TopServFail: topmap.NewTopMap(config.Loggers.RestAPI.TopN), - } - return o -} - -func (c *RestAPI) GetName() string { return c.name } - -func (c *RestAPI) AddDroppedRoute(wrk pkgutils.Worker) { - c.RoutingHandler.AddDroppedRoute(wrk) -} - -func (c *RestAPI) AddDefaultRoute(wrk pkgutils.Worker) { - c.RoutingHandler.AddDefaultRoute(wrk) -} - -func (c *RestAPI) SetLoggers(loggers []pkgutils.Worker) {} - -func (c *RestAPI) ReadConfig() { - if !pkgconfig.IsValidTLS(c.config.Loggers.RestAPI.TLSMinVersion) { - c.logger.Fatal(pkgutils.PrefixLogLogger + "[" + c.name + "]restapi - invalid tls min version") - } -} - -func (c *RestAPI) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration!") - c.configChan <- config -} - -func (c *RestAPI) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogLogger+"["+c.name+"] restapi - "+msg, v...) -} - -func (c *RestAPI) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogLogger+"["+c.name+"] restapi - "+msg, v...) -} - -func (c *RestAPI) GetInputChannel() chan dnsutils.DNSMessage { - return c.inputChan -} - -func (c *RestAPI) Stop() { - c.LogInfo("stopping logger...") - c.RoutingHandler.Stop() - - c.LogInfo("stopping to run...") - c.stopRun <- true - <-c.doneRun - - c.LogInfo("stopping to process...") - c.stopProcess <- true - <-c.doneProcess - - c.LogInfo("stopping http server...") - c.httpserver.Close() - <-c.doneAPI -} - -func (c *RestAPI) BasicAuth(w http.ResponseWriter, r *http.Request) bool { - login, password, authOK := r.BasicAuth() - if !authOK { - return false - } - - return (login == c.config.Loggers.RestAPI.BasicAuthLogin) && - (password == c.config.Loggers.RestAPI.BasicAuthPwd) -} - -func (c *RestAPI) DeleteResetHandler(w http.ResponseWriter, r *http.Request) { - c.RLock() - defer c.RUnlock() - - if !c.BasicAuth(w, r) { - http.Error(w, "Not authorized", http.StatusUnauthorized) - return - } - - switch r.Method { - case http.MethodDelete: - - c.HitsUniq.Clients = make(map[string]int) - c.HitsUniq.Domains = make(map[string]int) - c.HitsUniq.NxDomains = make(map[string]int) - c.HitsUniq.SfDomains = make(map[string]int) - c.HitsUniq.PublicSuffixes = make(map[string]int) - c.HitsUniq.Suspicious = make(map[string]*dnsutils.TransformSuspicious) - - c.Streams = make(map[string]int) - - c.TopQnames = topmap.NewTopMap(c.config.Loggers.RestAPI.TopN) - c.TopClients = topmap.NewTopMap(c.config.Loggers.RestAPI.TopN) - c.TopTLDs = topmap.NewTopMap(c.config.Loggers.RestAPI.TopN) - c.TopNonExistent = topmap.NewTopMap(c.config.Loggers.RestAPI.TopN) - c.TopServFail = topmap.NewTopMap(c.config.Loggers.RestAPI.TopN) - - c.HitsStream.Streams = make(map[string]SearchBy) - - w.Header().Set("Content-Type", "application/text") - w.Write([]byte("OK")) - default: - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } -} - -func (c *RestAPI) GetTopTLDsHandler(w http.ResponseWriter, r *http.Request) { - c.RLock() - defer c.RUnlock() - - if !c.BasicAuth(w, r) { - http.Error(w, "Not authorized", http.StatusUnauthorized) - return - } - - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case http.MethodGet: - json.NewEncoder(w).Encode(c.TopTLDs.Get()) - default: - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } -} - -func (c *RestAPI) GetTopClientsHandler(w http.ResponseWriter, r *http.Request) { - c.RLock() - defer c.RUnlock() - - if !c.BasicAuth(w, r) { - http.Error(w, "Not authorized", http.StatusUnauthorized) - return - } - - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case http.MethodGet: - json.NewEncoder(w).Encode(c.TopClients.Get()) - default: - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } -} - -func (c *RestAPI) GetTopDomainsHandler(w http.ResponseWriter, r *http.Request) { - c.RLock() - defer c.RUnlock() - - if !c.BasicAuth(w, r) { - http.Error(w, "Not authorized", http.StatusUnauthorized) - return - } - - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case http.MethodGet: - json.NewEncoder(w).Encode(c.TopQnames.Get()) - default: - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } -} - -func (c *RestAPI) GetTopNxDomainsHandler(w http.ResponseWriter, r *http.Request) { - c.RLock() - defer c.RUnlock() - - if !c.BasicAuth(w, r) { - http.Error(w, "Not authorized", http.StatusUnauthorized) - return - } - - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case http.MethodGet: - json.NewEncoder(w).Encode(c.TopNonExistent.Get()) - default: - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } -} - -func (c *RestAPI) GetTopSfDomainsHandler(w http.ResponseWriter, r *http.Request) { - c.RLock() - defer c.RUnlock() - - if !c.BasicAuth(w, r) { - http.Error(w, "Not authorized", http.StatusUnauthorized) - return - } - - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case http.MethodGet: - json.NewEncoder(w).Encode(c.TopServFail.Get()) - default: - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } -} - -func (c *RestAPI) GetTLDsHandler(w http.ResponseWriter, r *http.Request) { - c.RLock() - defer c.RUnlock() - - if !c.BasicAuth(w, r) { - http.Error(w, "Not authorized", http.StatusUnauthorized) - return - } - - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case http.MethodGet: - // return as array - dataArray := []KeyHit{} - for tld, hit := range c.HitsUniq.PublicSuffixes { - dataArray = append(dataArray, KeyHit{Key: tld, Hit: hit}) - } - - // encode - json.NewEncoder(w).Encode(dataArray) - default: - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } -} - -func (c *RestAPI) GetClientsHandler(w http.ResponseWriter, r *http.Request) { - c.RLock() - defer c.RUnlock() - - if !c.BasicAuth(w, r) { - http.Error(w, "Not authorized", http.StatusUnauthorized) - return - } - - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case http.MethodGet: - // return as array - dataArray := []KeyHit{} - for address, hit := range c.HitsUniq.Clients { - dataArray = append(dataArray, KeyHit{Key: address, Hit: hit}) - } - // encode - json.NewEncoder(w).Encode(dataArray) - default: - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } -} - -func (c *RestAPI) GetDomainsHandler(w http.ResponseWriter, r *http.Request) { - c.RLock() - defer c.RUnlock() - - if !c.BasicAuth(w, r) { - http.Error(w, "Not authorized", http.StatusUnauthorized) - return - } - - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case http.MethodGet: - // return as array - dataArray := []KeyHit{} - for domain, hit := range c.HitsUniq.Domains { - dataArray = append(dataArray, KeyHit{Key: domain, Hit: hit}) - } - - // encode - json.NewEncoder(w).Encode(dataArray) - default: - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } -} - -func (c *RestAPI) GetNxDomainsHandler(w http.ResponseWriter, r *http.Request) { - c.RLock() - defer c.RUnlock() - - if !c.BasicAuth(w, r) { - http.Error(w, "Not authorized", http.StatusUnauthorized) - return - } - - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case http.MethodGet: - // convert to array - dataArray := []KeyHit{} - for domain, hit := range c.HitsUniq.NxDomains { - dataArray = append(dataArray, KeyHit{Key: domain, Hit: hit}) - } - - // encode - json.NewEncoder(w).Encode(dataArray) - - default: - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } -} - -func (c *RestAPI) GetSfDomainsHandler(w http.ResponseWriter, r *http.Request) { - c.RLock() - defer c.RUnlock() - - if !c.BasicAuth(w, r) { - http.Error(w, "Not authorized", http.StatusUnauthorized) - return - } - - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case http.MethodGet: - // return as array - dataArray := []KeyHit{} - for domain, hit := range c.HitsUniq.SfDomains { - dataArray = append(dataArray, KeyHit{Key: domain, Hit: hit}) - } - - // encode - json.NewEncoder(w).Encode(dataArray) - default: - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } -} - -func (c *RestAPI) GetSuspiciousHandler(w http.ResponseWriter, r *http.Request) { - c.RLock() - defer c.RUnlock() - - if !c.BasicAuth(w, r) { - http.Error(w, "Not authorized", http.StatusUnauthorized) - return - } - - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case http.MethodGet: - // return as array - dataArray := []*dnsutils.TransformSuspicious{} - for domain, suspicious := range c.HitsUniq.Suspicious { - suspicious.Domain = domain - dataArray = append(dataArray, suspicious) - } - - // encode - json.NewEncoder(w).Encode(dataArray) - default: - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } -} - -func (c *RestAPI) GetSearchHandler(w http.ResponseWriter, r *http.Request) { - c.RLock() - defer c.RUnlock() - - if !c.BasicAuth(w, r) { - http.Error(w, "Not authorized", http.StatusUnauthorized) - return - } - - switch r.Method { - case http.MethodGet: - - filter := r.URL.Query()["filter"] - if len(filter) == 0 { - http.Error(w, "Arguments are missing", http.StatusBadRequest) - } - - dataArray := []KeyHit{} - - // search by IP - for _, search := range c.HitsStream.Streams { - userHits, clientExists := search.Clients[filter[0]] - if clientExists { - for domain, hit := range userHits.Hits { - dataArray = append(dataArray, KeyHit{Key: domain, Hit: hit}) - } - } - } - - // search by domain - if len(dataArray) == 0 { - for _, search := range c.HitsStream.Streams { - domainHists, domainExists := search.Domains[filter[0]] - if domainExists { - for addr, hit := range domainHists.Hits { - dataArray = append(dataArray, KeyHit{Key: addr, Hit: hit}) - } - } - } - } - - // encode to json - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(dataArray) - - default: - http.Error(w, "{\"error\": \"Method not allowed\"}", http.StatusMethodNotAllowed) - } -} - -func (c *RestAPI) GetStreamsHandler(w http.ResponseWriter, r *http.Request) { - c.RLock() - defer c.RUnlock() - - if !c.BasicAuth(w, r) { - http.Error(w, "Not authorized", http.StatusUnauthorized) - return - } - - w.Header().Set("Content-Type", "application/json") - - switch r.Method { - case http.MethodGet: - - dataArray := []KeyHit{} - for stream, hit := range c.Streams { - dataArray = append(dataArray, KeyHit{Key: stream, Hit: hit}) - } - - json.NewEncoder(w).Encode(dataArray) - default: - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } -} - -func (c *RestAPI) RecordDNSMessage(dm dnsutils.DNSMessage) { - c.Lock() - defer c.Unlock() - - if _, exists := c.Streams[dm.DNSTap.Identity]; !exists { - c.Streams[dm.DNSTap.Identity] = 1 - } else { - c.Streams[dm.DNSTap.Identity] += 1 - } - - // record suspicious domains only is enabled - if dm.Suspicious != nil { - if dm.Suspicious.Score > 0.0 { - if _, exists := c.HitsUniq.Suspicious[dm.DNS.Qname]; !exists { - c.HitsUniq.Suspicious[dm.DNS.Qname] = dm.Suspicious - } - } - } - - // uniq record for tld - // record public suffix only if enabled - if dm.PublicSuffix != nil { - if dm.PublicSuffix.QnamePublicSuffix != "-" { - if _, ok := c.HitsUniq.PublicSuffixes[dm.PublicSuffix.QnamePublicSuffix]; !ok { - c.HitsUniq.PublicSuffixes[dm.PublicSuffix.QnamePublicSuffix] = 1 - } else { - c.HitsUniq.PublicSuffixes[dm.PublicSuffix.QnamePublicSuffix]++ - } - } - } - - // uniq record for domains - if _, exists := c.HitsUniq.Domains[dm.DNS.Qname]; !exists { - c.HitsUniq.Domains[dm.DNS.Qname] = 1 - } else { - c.HitsUniq.Domains[dm.DNS.Qname] += 1 - } - - if dm.DNS.Rcode == dnsutils.DNSRcodeNXDomain { - if _, exists := c.HitsUniq.NxDomains[dm.DNS.Qname]; !exists { - c.HitsUniq.NxDomains[dm.DNS.Qname] = 1 - } else { - c.HitsUniq.NxDomains[dm.DNS.Qname] += 1 - } - } - - if dm.DNS.Rcode == dnsutils.DNSRcodeServFail { - if _, exists := c.HitsUniq.SfDomains[dm.DNS.Qname]; !exists { - c.HitsUniq.SfDomains[dm.DNS.Qname] = 1 - } else { - c.HitsUniq.SfDomains[dm.DNS.Qname] += 1 - } - } - - // uniq record for queries - if _, exists := c.HitsUniq.Clients[dm.NetworkInfo.QueryIP]; !exists { - c.HitsUniq.Clients[dm.NetworkInfo.QueryIP] = 1 - } else { - c.HitsUniq.Clients[dm.NetworkInfo.QueryIP] += 1 - } - - // uniq top qnames and clients - c.TopQnames.Record(dm.DNS.Qname, c.HitsUniq.Domains[dm.DNS.Qname]) - c.TopClients.Record(dm.NetworkInfo.QueryIP, c.HitsUniq.Clients[dm.NetworkInfo.QueryIP]) - if dm.PublicSuffix != nil { - c.TopTLDs.Record(dm.PublicSuffix.QnamePublicSuffix, c.HitsUniq.PublicSuffixes[dm.PublicSuffix.QnamePublicSuffix]) - } - if dm.DNS.Rcode == dnsutils.DNSRcodeNXDomain { - c.TopNonExistent.Record(dm.DNS.Qname, c.HitsUniq.NxDomains[dm.DNS.Qname]) - } - if dm.DNS.Rcode == dnsutils.DNSRcodeServFail { - c.TopServFail.Record(dm.DNS.Qname, c.HitsUniq.SfDomains[dm.DNS.Qname]) - } - - // record dns message per client source ip and domain - if _, exists := c.HitsStream.Streams[dm.DNSTap.Identity]; !exists { - c.HitsStream.Streams[dm.DNSTap.Identity] = SearchBy{Clients: make(map[string]*HitsRecord), - Domains: make(map[string]*HitsRecord)} - } - - // continue with the query IP - if _, exists := c.HitsStream.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP]; !exists { - c.HitsStream.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP] = &HitsRecord{Hits: make(map[string]int), TotalHits: 1} - } else { - c.HitsStream.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP].TotalHits += 1 - } - - // continue with Qname - if _, exists := c.HitsStream.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP].Hits[dm.DNS.Qname]; !exists { - c.HitsStream.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP].Hits[dm.DNS.Qname] = 1 - } else { - c.HitsStream.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP].Hits[dm.DNS.Qname] += 1 - } - - // domain doesn't exists in domains map? - if _, exists := c.HitsStream.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname]; !exists { - c.HitsStream.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname] = &HitsRecord{Hits: make(map[string]int), TotalHits: 1} - } else { - c.HitsStream.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname].TotalHits += 1 - } - - // domain doesn't exists in domains map? - if _, exists := c.HitsStream.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname].Hits[dm.NetworkInfo.QueryIP]; !exists { - c.HitsStream.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname].Hits[dm.NetworkInfo.QueryIP] = 1 - } else { - c.HitsStream.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname].Hits[dm.NetworkInfo.QueryIP] += 1 - } -} - -func (c *RestAPI) ListenAndServe() { - c.LogInfo("starting server...") - - mux := http.NewServeMux() - mux.HandleFunc("/tlds", c.GetTLDsHandler) - mux.HandleFunc("/tlds/top", c.GetTopTLDsHandler) - mux.HandleFunc("/streams", c.GetStreamsHandler) - mux.HandleFunc("/clients", c.GetClientsHandler) - mux.HandleFunc("/clients/top", c.GetTopClientsHandler) - mux.HandleFunc("/domains", c.GetDomainsHandler) - mux.HandleFunc("/domains/servfail", c.GetSfDomainsHandler) - mux.HandleFunc("/domains/top", c.GetTopDomainsHandler) - mux.HandleFunc("/domains/nx/top", c.GetTopNxDomainsHandler) - mux.HandleFunc("/domains/servfail/top", c.GetTopSfDomainsHandler) - mux.HandleFunc("/suspicious", c.GetSuspiciousHandler) - mux.HandleFunc("/search", c.GetSearchHandler) - mux.HandleFunc("/reset", c.DeleteResetHandler) - - var err error - var listener net.Listener - addrlisten := c.config.Loggers.RestAPI.ListenIP + ":" + strconv.Itoa(c.config.Loggers.RestAPI.ListenPort) - - // listening with tls enabled ? - if c.config.Loggers.RestAPI.TLSSupport { - c.LogInfo("tls support enabled") - var cer tls.Certificate - cer, err = tls.LoadX509KeyPair(c.config.Loggers.RestAPI.CertFile, c.config.Loggers.RestAPI.KeyFile) - if err != nil { - c.logger.Fatal("loading certificate failed:", err) - } - - // prepare tls configuration - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{cer}, - MinVersion: tls.VersionTLS12, - } - - // update tls min version according to the user config - tlsConfig.MinVersion = pkgconfig.TLSVersion[c.config.Loggers.RestAPI.TLSMinVersion] - - listener, err = tls.Listen(netlib.SocketTCP, addrlisten, tlsConfig) - - } else { - // basic listening - listener, err = net.Listen(netlib.SocketTCP, addrlisten) - } - - // something wrong ? - if err != nil { - c.logger.Fatal("listening failed:", err) - } - - c.httpserver = listener - c.httpmux = mux - c.LogInfo("is listening on %s", listener.Addr()) - - http.Serve(c.httpserver, c.httpmux) - - c.LogInfo("http server terminated") - c.doneAPI <- true -} - -func (c *RestAPI) Run() { - // prepare next channels - defaultRoutes, defaultNames := c.RoutingHandler.GetDefaultRoutes() - droppedRoutes, droppedNames := c.RoutingHandler.GetDroppedRoutes() - - // prepare transforms - listChannel := []chan dnsutils.DNSMessage{} - listChannel = append(listChannel, c.outputChan) - subprocessors := transformers.NewTransforms(&c.config.OutgoingTransformers, c.logger, c.name, listChannel, 0) - - // start http server - go c.ListenAndServe() - - // goroutine to process transformed dns messages - go c.Process() - - // loop to process incoming messages - c.LogInfo("ready to process") -RUN_LOOP: - for { - select { - case <-c.stopRun: - // cleanup transformers - subprocessors.Reset() - c.doneRun <- true - break RUN_LOOP - - case cfg, opened := <-c.configChan: - if !opened { - return - } - c.config = cfg - c.ReadConfig() - subprocessors.ReloadConfig(&cfg.OutgoingTransformers) - - case dm, opened := <-c.inputChan: - if !opened { - c.LogInfo("input channel closed!") - return - } - - // apply tranforms, init dns message with additionnals parts if necessary - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - c.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - continue - } - - // send to next ? - c.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - - // send to output channel - c.outputChan <- dm - } - } - c.LogInfo("run terminated") -} - -func (c *RestAPI) Process() { - c.LogInfo("processing...") - -PROCESS_LOOP: - for { - select { - case <-c.stopProcess: - c.doneProcess <- true - break PROCESS_LOOP - - case dm, opened := <-c.outputChan: - if !opened { - c.LogInfo("output channel closed!") - return - } - // record the dnstap message - c.RecordDNSMessage(dm) - } - } - c.LogInfo("processing terminated") -} diff --git a/loggers/scalyr.go b/loggers/scalyr.go deleted file mode 100644 index 6c0691ff..00000000 --- a/loggers/scalyr.go +++ /dev/null @@ -1,468 +0,0 @@ -package loggers - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" - - "github.com/google/uuid" - "github.com/grafana/dskit/backoff" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" -) - -// ScalyrClient is a client for Scalyr(https://www.dataset.com/) -// This client is using the addEvents endpoint, described here: https://app.scalyr.com/help/api#addEvents -type ScalyrClient struct { - stopProcess chan bool - doneProcess chan bool - stopRun chan bool - doneRun chan bool - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - logger *logger.Logger - name string - config *pkgconfig.Config - configChan chan *pkgconfig.Config - RoutingHandler pkgutils.RoutingHandler - - mode string - textFormat []string - - session string // Session ID, used by scalyr, see API docs - - httpclient *http.Client - endpoint string // Where to send the data - apikey string // API Token to use for authorizing requests - parser string // Parser used by Scalyr - flush *time.Ticker // Timer that allows us to flush events periodically - - submissions chan []byte // Marshalled JSON to send to Scalyr - - submitterDone chan bool // Will be written to when the HTTP submitter is done -} - -func NewScalyrClient(config *pkgconfig.Config, console *logger.Logger, name string) *ScalyrClient { - console.Info(pkgutils.PrefixLogLogger+"[%s] scalyr - starting", name) - c := &ScalyrClient{ - stopProcess: make(chan bool), - doneProcess: make(chan bool), - stopRun: make(chan bool), - doneRun: make(chan bool), - inputChan: make(chan dnsutils.DNSMessage, config.Loggers.ScalyrClient.ChannelBufferSize), - outputChan: make(chan dnsutils.DNSMessage, config.Loggers.ScalyrClient.ChannelBufferSize), - logger: console, - name: name, - config: config, - configChan: make(chan *pkgconfig.Config), - RoutingHandler: pkgutils.NewRoutingHandler(config, console, name), - - mode: pkgconfig.ModeText, - - endpoint: makeEndpoint("app.scalyr.com"), - flush: time.NewTicker(30 * time.Second), - - session: uuid.NewString(), - - submissions: make(chan []byte, 25), - submitterDone: make(chan bool), - } - c.ReadConfig() - return c -} - -func makeEndpoint(host string) string { - return fmt.Sprintf("https://%s/api/addEvents", host) -} - -func (c *ScalyrClient) ReadConfig() { - if len(c.config.Loggers.ScalyrClient.APIKey) == 0 { - c.logger.Fatal("No API Key configured for Scalyr Client") - } - c.apikey = c.config.Loggers.ScalyrClient.APIKey - - if len(c.config.Loggers.ScalyrClient.Mode) != 0 { - c.mode = c.config.Loggers.ScalyrClient.Mode - } - - if len(c.config.Loggers.ScalyrClient.Parser) == 0 && (c.mode == pkgconfig.ModeText || c.mode == pkgconfig.ModeJSON) { - c.logger.Fatal(fmt.Sprintf("No Scalyr parser configured for Scalyr Client in %s mode", c.mode)) - } - c.parser = c.config.Loggers.ScalyrClient.Parser - - if len(c.config.Loggers.ScalyrClient.TextFormat) > 0 { - c.textFormat = strings.Fields(c.config.Loggers.ScalyrClient.TextFormat) - } else { - c.textFormat = strings.Fields(c.config.Global.TextFormat) - } - - if host := c.config.Loggers.ScalyrClient.ServerURL; host != "" { - c.endpoint = makeEndpoint(host) - } - - if flushInterval := c.config.Loggers.ScalyrClient.FlushInterval; flushInterval != 0 { - c.flush = time.NewTicker(time.Duration(flushInterval) * time.Second) - } - - // tls client config - tlsOptions := pkgconfig.TLSOptions{ - InsecureSkipVerify: c.config.Loggers.ScalyrClient.TLSInsecure, - MinVersion: c.config.Loggers.ScalyrClient.TLSMinVersion, - CAFile: c.config.Loggers.ScalyrClient.CAFile, - CertFile: c.config.Loggers.ScalyrClient.CertFile, - KeyFile: c.config.Loggers.ScalyrClient.KeyFile, - } - - tlsConfig, err := pkgconfig.TLSClientConfig(tlsOptions) - if err != nil { - c.logger.Fatal("unable to parse tls confgi: ", err) - } - - // prepare http client - tr := &http.Transport{ - MaxIdleConns: 10, - IdleConnTimeout: 30 * time.Second, - DisableCompression: false, - TLSClientConfig: tlsConfig, - } - - // use proxy - if len(c.config.Loggers.ScalyrClient.ProxyURL) > 0 { - proxyURL, err := url.Parse(c.config.Loggers.ScalyrClient.ProxyURL) - if err != nil { - c.logger.Fatal("unable to parse proxy url: ", err) - } - tr.Proxy = http.ProxyURL(proxyURL) - } - - c.httpclient = &http.Client{Transport: tr} -} - -func (c *ScalyrClient) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration!") - c.configChan <- config -} - -func (c *ScalyrClient) Run() { - c.LogInfo("running in background...") - - // prepare next channels - defaultRoutes, defaultNames := c.RoutingHandler.GetDefaultRoutes() - droppedRoutes, droppedNames := c.RoutingHandler.GetDroppedRoutes() - - // prepare transforms - listChannel := []chan dnsutils.DNSMessage{} - listChannel = append(listChannel, c.outputChan) - subprocessors := transformers.NewTransforms(&c.config.OutgoingTransformers, c.logger, c.name, listChannel, 0) - - // goroutine to process transformed dns messages - go c.Process() - - // loop to process incoming messages -RUN_LOOP: - for { - select { - case <-c.stopRun: - // cleanup transformers - subprocessors.Reset() - - c.doneRun <- true - break RUN_LOOP - - case cfg, opened := <-c.configChan: - if !opened { - return - } - c.config = cfg - c.ReadConfig() - subprocessors.ReloadConfig(&cfg.OutgoingTransformers) - - case dm, opened := <-c.inputChan: - if !opened { - c.LogInfo("input channel closed!") - return - } - - // apply tranforms, init dns message with additionnals parts if necessary - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - c.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - continue - } - - // send to next ? - c.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - - // send to output channel - c.outputChan <- dm - } - } - c.LogInfo("run terminated") -} - -func (c *ScalyrClient) Process() { - - sInfo := c.config.Loggers.ScalyrClient.SessionInfo - if sInfo == nil { - sInfo = make(map[string]string) - } - attrs := make(map[string]interface{}) - for k, v := range c.config.Loggers.ScalyrClient.Attrs { - attrs[k] = v - } - if len(c.parser) != 0 { - attrs["parser"] = c.parser - } - var events []event - - if host, ok := sInfo["serverHost"]; !ok || len(host) == 0 { - hostname, err := os.Hostname() - if err != nil { - hostname = "unknown-hostname" - } - sInfo["serverHost"] = hostname - } - - c.runSubmitter() - - c.LogInfo("ready to process") -PROCESS_LOOP: - for { - select { - case <-c.stopProcess: - - if len(events) > 0 { - c.submitEventRecord(sInfo, events) - } - close(c.submissions) - - // Block until both threads are done - <-c.submitterDone - - c.doneProcess <- true - break PROCESS_LOOP - // incoming dns message to process - case dm, opened := <-c.outputChan: - if !opened { - c.LogInfo("output channel closed!") - return - } - - switch c.mode { - case pkgconfig.ModeText: - attrs["message"] = string(dm.Bytes(c.textFormat, - c.config.Global.TextFormatDelimiter, - c.config.Global.TextFormatBoundary)) - case pkgconfig.ModeJSON: - attrs["message"] = dm - case pkgconfig.ModeFlatJSON: - var err error - if attrs, err = dm.Flatten(); err != nil { - c.LogError("unable to flatten: %e", err) - break - } - // Add user's attrs without overwriting flattened ones - for k, v := range c.config.Loggers.ScalyrClient.Attrs { - if _, ok := attrs[k]; !ok { - attrs[k] = v - } - } - } - events = append(events, event{ - TS: strconv.FormatInt(time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)).UnixNano(), 10), - Sev: SeverityInfo, - Attrs: attrs, - }) - if len(events) >= 400 { - // Maximum size of a POST is 6MB. 400 events would mean that each dnstap entry - // can be a little over 15 kB in JSON, which should be plenty. - c.submitEventRecord(sInfo, events) - events = []event{} - } - case <-c.flush.C: - if len(events) > 0 { - c.submitEventRecord(sInfo, events) - events = []event{} - } - } - } - c.LogInfo("processing terminated") -} - -func (c ScalyrClient) Stop() { - c.LogInfo("stopping logger...") - c.RoutingHandler.Stop() - - c.LogInfo("stopping to run...") - c.stopRun <- true - <-c.doneRun - - c.LogInfo("stopping to process...") - c.stopProcess <- true - <-c.doneProcess -} - -func (c *ScalyrClient) submitEventRecord(sessionInfo map[string]string, events []event) { - er := eventRecord{ - Session: c.session, - SessionInfo: sessionInfo, - Events: events, - } - buf, err := json.Marshal(er) - if err != nil { - // TODO should this panic? - c.LogError("Unable to create JSON from events: %e", err) - } - c.submissions <- buf -} - -func (c *ScalyrClient) runSubmitter() { - go func() { - for m := range c.submissions { - c.send(m) - } - c.submitterDone <- true - }() - c.LogInfo("HTTP Submitter started") -} - -func (c *ScalyrClient) send(buf []byte) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - MinBackoff := 500 * time.Millisecond - MaxBackoff := 5 * time.Minute - MaxRetries := 10 - - backoff := backoff.New(ctx, backoff.Config{ - MaxBackoff: MaxBackoff, - MaxRetries: MaxRetries, - MinBackoff: MinBackoff, - }) - - for { - post, err := http.NewRequest("POST", c.endpoint, bytes.NewReader(buf)) - if err != nil { - c.LogError("new http error: %s", err) - return - } - post = post.WithContext(ctx) - post.Header.Set("Content-Type", "application/json") - post.Header.Set("User-Agent", "dnscollector") - post.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.apikey)) - - // send post and read response - resp, err := c.httpclient.Do(post) - if err != nil { - c.LogError("do http error: %s", err) - return - } - - // success ? - if resp.StatusCode > 0 && resp.StatusCode != 429 && resp.StatusCode/100 != 5 { - break - } - - // something is wrong, retry ? - if resp.StatusCode/100 != 2 { - response, err := parseServerResponse(resp.Body) - if err != nil { - c.LogError("server returned HTTP status %s (%d), unable to decode response: %e", resp.Status, resp.StatusCode, err) - } else { - c.LogError("server returned HTTP status %s (%d), %s", resp.Status, resp.StatusCode, response.Message) - } - } - - // wait before retry - backoff.Wait() - - // Make sure it sends at least once before checking for retry. - if !backoff.Ongoing() { - break - } - } -} - -func parseServerResponse(body io.ReadCloser) (response, error) { - var response response - b, err := io.ReadAll(body) - if err != nil { - return response, err - } - err = json.Unmarshal(b, &response) - return response, err -} - -func (c *ScalyrClient) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogLogger+"["+c.name+"] scalyr - "+msg, v...) -} - -func (c *ScalyrClient) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogLogger+"["+c.name+"] scalyr - "+msg, v...) -} - -// Models -type scalyrSeverity uint - -const ( - SeverityFinest scalyrSeverity = iota - SeverityFiner - SeverityFine - SeverityInfo - SeverityWarning - SeverityError - SeverityFatal -) - -type event struct { - Thread string `json:"thread,omitempty"` - TS string `json:"ts"` - Sev scalyrSeverity `json:"sev,omitempty"` - Attrs map[string]interface{} `json:"attrs"` -} - -type thread struct { - ID string `json:"id"` - Name string `json:"name"` -} - -type eventRecord struct { - Token string `json:"token,omitempty"` - Session string `json:"session"` - SessionInfo map[string]string `json:"sessionInfo"` - Events []event `json:"events"` - Threads []thread `json:"threads,omitempty"` -} - -type response struct { - Status string `json:"status"` - Message string `json:"message"` -} - -func (c *ScalyrClient) GetName() string { return c.name } - -func (c *ScalyrClient) AddDroppedRoute(wrk pkgutils.Worker) { - c.RoutingHandler.AddDroppedRoute(wrk) -} - -func (c *ScalyrClient) AddDefaultRoute(wrk pkgutils.Worker) { - c.RoutingHandler.AddDefaultRoute(wrk) -} - -func (c *ScalyrClient) SetLoggers(loggers []pkgutils.Worker) {} - -func (c *ScalyrClient) GetInputChannel() chan dnsutils.DNSMessage { - return c.inputChan -} diff --git a/loggers/statsd.go b/loggers/statsd.go deleted file mode 100644 index 036b0a55..00000000 --- a/loggers/statsd.go +++ /dev/null @@ -1,418 +0,0 @@ -package loggers - -import ( - "bufio" - "crypto/tls" - "fmt" - "net" - "strconv" - "sync" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" - "github.com/dmachard/go-topmap" -) - -type StatsPerStream struct { - TotalPackets int - TotalSentBytes int - TotalReceivedBytes int - - Clients map[string]int - Domains map[string]int - Nxdomains map[string]int - - RRtypes map[string]int - Rcodes map[string]int - Operations map[string]int - Transports map[string]int - IPproto map[string]int - - TopRcodes *topmap.TopMap - TopOperations *topmap.TopMap - TopIPproto *topmap.TopMap - TopTransport *topmap.TopMap - TopRRtypes *topmap.TopMap -} - -type StreamStats struct { - Streams map[string]*StatsPerStream -} - -type StatsdClient struct { - stopProcess chan bool - doneProcess chan bool - stopRun chan bool - doneRun chan bool - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - name string - RoutingHandler pkgutils.RoutingHandler - - Stats StreamStats - sync.RWMutex -} - -func NewStatsdClient(config *pkgconfig.Config, logger *logger.Logger, name string) *StatsdClient { - logger.Info(pkgutils.PrefixLogLogger+"[%s] statsd - enabled", name) - - s := &StatsdClient{ - stopProcess: make(chan bool), - doneProcess: make(chan bool), - stopRun: make(chan bool), - doneRun: make(chan bool), - inputChan: make(chan dnsutils.DNSMessage, config.Loggers.Statsd.ChannelBufferSize), - outputChan: make(chan dnsutils.DNSMessage, config.Loggers.Statsd.ChannelBufferSize), - logger: logger, - config: config, - configChan: make(chan *pkgconfig.Config), - name: name, - Stats: StreamStats{Streams: make(map[string]*StatsPerStream)}, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - } - - // check config - s.ReadConfig() - - return s -} - -func (c *StatsdClient) GetName() string { return c.name } - -func (c *StatsdClient) AddDroppedRoute(wrk pkgutils.Worker) { - c.RoutingHandler.AddDroppedRoute(wrk) -} - -func (c *StatsdClient) AddDefaultRoute(wrk pkgutils.Worker) { - c.RoutingHandler.AddDefaultRoute(wrk) -} - -func (c *StatsdClient) SetLoggers(loggers []pkgutils.Worker) {} - -func (c *StatsdClient) ReadConfig() { - if !pkgconfig.IsValidTLS(c.config.Loggers.Statsd.TLSMinVersion) { - c.logger.Fatal(pkgutils.PrefixLogLogger + "[" + c.name + "]statd - invalid tls min version") - } -} - -func (c *StatsdClient) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration!") - c.configChan <- config -} - -func (c *StatsdClient) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogLogger+"["+c.name+"] statsd - "+msg, v...) -} - -func (c *StatsdClient) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogLogger+"["+c.name+"] statsd - "+msg, v...) -} - -func (c *StatsdClient) GetInputChannel() chan dnsutils.DNSMessage { - return c.inputChan -} - -func (c *StatsdClient) Stop() { - c.LogInfo("stopping logger...") - c.RoutingHandler.Stop() - - c.LogInfo("stopping to run...") - c.stopRun <- true - <-c.doneRun - - c.LogInfo("stopping to process...") - c.stopProcess <- true - <-c.doneProcess -} - -func (c *StatsdClient) RecordDNSMessage(dm dnsutils.DNSMessage) { - c.Lock() - defer c.Unlock() - - // add stream - if _, exists := c.Stats.Streams[dm.DNSTap.Identity]; !exists { - c.Stats.Streams[dm.DNSTap.Identity] = &StatsPerStream{ - Clients: make(map[string]int), - Domains: make(map[string]int), - Nxdomains: make(map[string]int), - - RRtypes: make(map[string]int), - Rcodes: make(map[string]int), - Operations: make(map[string]int), - Transports: make(map[string]int), - IPproto: make(map[string]int), - - TopRcodes: topmap.NewTopMap(50), - TopOperations: topmap.NewTopMap(50), - TopIPproto: topmap.NewTopMap(50), - TopRRtypes: topmap.NewTopMap(50), - TopTransport: topmap.NewTopMap(50), - - TotalPackets: 0, - TotalSentBytes: 0, - TotalReceivedBytes: 0, - } - } - - // global number of packets - c.Stats.Streams[dm.DNSTap.Identity].TotalPackets++ - - if dm.DNS.Type == dnsutils.DNSQuery { - c.Stats.Streams[dm.DNSTap.Identity].TotalReceivedBytes += dm.DNS.Length - } else { - c.Stats.Streams[dm.DNSTap.Identity].TotalSentBytes += dm.DNS.Length - } - - // count client and domains - if _, exists := c.Stats.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname]; !exists { - c.Stats.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname] = 1 - } else { - c.Stats.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname] += 1 - } - if dm.DNS.Rcode == dnsutils.DNSRcodeNXDomain { - if _, exists := c.Stats.Streams[dm.DNSTap.Identity].Nxdomains[dm.DNS.Qname]; !exists { - c.Stats.Streams[dm.DNSTap.Identity].Nxdomains[dm.DNS.Qname] = 1 - } else { - c.Stats.Streams[dm.DNSTap.Identity].Nxdomains[dm.DNS.Qname] += 1 - } - } - if _, exists := c.Stats.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP]; !exists { - c.Stats.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP] = 1 - } else { - c.Stats.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP] += 1 - } - - // record ip proto - if _, ok := c.Stats.Streams[dm.DNSTap.Identity].IPproto[dm.NetworkInfo.Family]; !ok { - c.Stats.Streams[dm.DNSTap.Identity].IPproto[dm.NetworkInfo.Family] = 1 - } else { - c.Stats.Streams[dm.DNSTap.Identity].IPproto[dm.NetworkInfo.Family]++ - } - c.Stats.Streams[dm.DNSTap.Identity].TopIPproto.Record( - dm.NetworkInfo.Family, - c.Stats.Streams[dm.DNSTap.Identity].IPproto[dm.NetworkInfo.Family], - ) - - // record transports - if _, ok := c.Stats.Streams[dm.DNSTap.Identity].Transports[dm.NetworkInfo.Protocol]; !ok { - c.Stats.Streams[dm.DNSTap.Identity].Transports[dm.NetworkInfo.Protocol] = 1 - } else { - c.Stats.Streams[dm.DNSTap.Identity].Transports[dm.NetworkInfo.Protocol]++ - } - c.Stats.Streams[dm.DNSTap.Identity].TopTransport.Record( - dm.NetworkInfo.Protocol, - c.Stats.Streams[dm.DNSTap.Identity].Transports[dm.NetworkInfo.Protocol], - ) - - // record rrtypes - if _, ok := c.Stats.Streams[dm.DNSTap.Identity].RRtypes[dm.DNS.Qtype]; !ok { - c.Stats.Streams[dm.DNSTap.Identity].RRtypes[dm.DNS.Qtype] = 1 - } else { - c.Stats.Streams[dm.DNSTap.Identity].RRtypes[dm.DNS.Qtype]++ - } - c.Stats.Streams[dm.DNSTap.Identity].TopRRtypes.Record( - dm.DNS.Qtype, - c.Stats.Streams[dm.DNSTap.Identity].RRtypes[dm.DNS.Qtype], - ) - - // record rcodes - if _, ok := c.Stats.Streams[dm.DNSTap.Identity].Rcodes[dm.DNS.Rcode]; !ok { - c.Stats.Streams[dm.DNSTap.Identity].Rcodes[dm.DNS.Rcode] = 1 - } else { - c.Stats.Streams[dm.DNSTap.Identity].Rcodes[dm.DNS.Rcode]++ - } - c.Stats.Streams[dm.DNSTap.Identity].TopRcodes.Record( - dm.DNS.Rcode, - c.Stats.Streams[dm.DNSTap.Identity].Rcodes[dm.DNS.Rcode], - ) - - // record operations - if _, ok := c.Stats.Streams[dm.DNSTap.Identity].Operations[dm.DNSTap.Operation]; !ok { - c.Stats.Streams[dm.DNSTap.Identity].Operations[dm.DNSTap.Operation] = 1 - } else { - c.Stats.Streams[dm.DNSTap.Identity].Operations[dm.DNSTap.Operation]++ - } - c.Stats.Streams[dm.DNSTap.Identity].TopOperations.Record( - dm.DNSTap.Operation, - c.Stats.Streams[dm.DNSTap.Identity].Operations[dm.DNSTap.Operation], - ) -} - -func (c *StatsdClient) Run() { - c.LogInfo("running in background...") - - // prepare next channels - defaultRoutes, defaultNames := c.RoutingHandler.GetDefaultRoutes() - droppedRoutes, droppedNames := c.RoutingHandler.GetDroppedRoutes() - - // prepare transforms - listChannel := []chan dnsutils.DNSMessage{} - listChannel = append(listChannel, c.outputChan) - subprocessors := transformers.NewTransforms(&c.config.OutgoingTransformers, c.logger, c.name, listChannel, 0) - - // goroutine to process transformed dns messages - go c.Process() - - // loop to process incoming messages -RUN_LOOP: - for { - select { - case <-c.stopRun: - // cleanup transformers - subprocessors.Reset() - - c.doneRun <- true - break RUN_LOOP - - case cfg, opened := <-c.configChan: - if !opened { - return - } - c.config = cfg - c.ReadConfig() - subprocessors.ReloadConfig(&cfg.OutgoingTransformers) - - case dm, opened := <-c.inputChan: - if !opened { - c.LogInfo("input channel closed!") - return - } - - // apply tranforms, init dns message with additionnals parts if necessary - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - c.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - continue - } - - // send to next ? - c.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - - // send to output channel - c.outputChan <- dm - } - } - c.LogInfo("run terminated") -} - -func (c *StatsdClient) Process() { - // statd timer to push data - t2Interval := time.Duration(c.config.Loggers.Statsd.FlushInterval) * time.Second - t2 := time.NewTimer(t2Interval) - - c.LogInfo("ready to process") -PROCESS_LOOP: - for { - select { - case <-c.stopProcess: - c.doneProcess <- true - break PROCESS_LOOP - // incoming dns message to process - case dm, opened := <-c.outputChan: - if !opened { - c.LogInfo("output channel closed!") - return - } - - // record the dnstap message - c.RecordDNSMessage(dm) - - case <-t2.C: - address := c.config.Loggers.Statsd.RemoteAddress + ":" + strconv.Itoa(c.config.Loggers.Statsd.RemotePort) - connTimeout := time.Duration(c.config.Loggers.Statsd.ConnectTimeout) * time.Second - - // make the connection - var conn net.Conn - var err error - - switch c.config.Loggers.Statsd.Transport { - case netlib.SocketTCP, netlib.SocketUDP: - c.LogInfo("connecting to %s://%s", c.config.Loggers.Statsd.Transport, address) - conn, err = net.DialTimeout(c.config.Loggers.Statsd.Transport, address, connTimeout) - - case netlib.SocketTLS: - c.LogInfo("connecting to %s://%s", c.config.Loggers.Statsd.Transport, address) - - var tlsConfig *tls.Config - - tlsOptions := pkgconfig.TLSOptions{ - InsecureSkipVerify: c.config.Loggers.Statsd.TLSInsecure, - MinVersion: c.config.Loggers.Statsd.TLSMinVersion, - CAFile: c.config.Loggers.Statsd.CAFile, - CertFile: c.config.Loggers.Statsd.CertFile, - KeyFile: c.config.Loggers.Statsd.KeyFile, - } - - tlsConfig, err = pkgconfig.TLSClientConfig(tlsOptions) - if err == nil { - dialer := &net.Dialer{Timeout: connTimeout} - conn, err = tls.DialWithDialer(dialer, netlib.SocketTCP, address, tlsConfig) - } - default: - c.logger.Fatal("logger=statsd - invalid transport:", c.config.Loggers.Statsd.Transport) - } - - // something is wrong during connection ? - if err != nil { - c.LogError("dial error: %s", err) - } - - if conn != nil { - c.LogInfo("dialing with success, continue...") - - b := bufio.NewWriter(conn) - - prefix := c.config.Loggers.Statsd.Prefix - for streamID, stream := range c.Stats.Streams { - b.WriteString(fmt.Sprintf("%s_%s_total_bytes_received:%d|c\n", prefix, streamID, stream.TotalReceivedBytes)) - b.WriteString(fmt.Sprintf("%s_%s_total_bytes_sent:%d|c\n", prefix, streamID, stream.TotalSentBytes)) - - b.WriteString(fmt.Sprintf("%s_%s_total_requesters:%d|c\n", prefix, streamID, len(stream.Clients))) - - b.WriteString(fmt.Sprintf("%s_%s_total_domains:%d|c\n", prefix, streamID, len(stream.Domains))) - b.WriteString(fmt.Sprintf("%s_%s_total_domains_nx:%d|c\n", prefix, streamID, len(stream.Nxdomains))) - - b.WriteString(fmt.Sprintf("%s_%s_total_packets:%d|c\n", prefix, streamID, stream.TotalPackets)) - - // transport repartition - for _, v := range stream.TopTransport.Get() { - b.WriteString(fmt.Sprintf("%s_%s_total_packets_%s:%d|c\n", prefix, streamID, v.Name, v.Hit)) - } - - // ip proto repartition - for _, v := range stream.TopIPproto.Get() { - b.WriteString(fmt.Sprintf("%s_%s_total_packets_%s:%d|c\n", prefix, streamID, v.Name, v.Hit)) - } - - // qtypes repartition - for _, v := range stream.TopRRtypes.Get() { - b.WriteString(fmt.Sprintf("%s_%s_total_replies_rrtype_%s:%d|c\n", prefix, streamID, v.Name, v.Hit)) - } - - // top rcodes - for _, v := range stream.TopRcodes.Get() { - b.WriteString(fmt.Sprintf("%s_%s_total_replies_rcode_%s:%d|c\n", prefix, streamID, v.Name, v.Hit)) - } - } - - // send data - err = b.Flush() - if err != nil { - c.LogError("sent data error:", err.Error()) - } - } - - // reset the timer - t2.Reset(t2Interval) - } - } - c.LogInfo("processing terminated") -} diff --git a/loggers/stdout.go b/loggers/stdout.go deleted file mode 100644 index 0042024b..00000000 --- a/loggers/stdout.go +++ /dev/null @@ -1,275 +0,0 @@ -package loggers - -import ( - "bytes" - "encoding/json" - "io" - "log" - "os" - "strings" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" - "github.com/google/gopacket" - "github.com/google/gopacket/layers" - "github.com/google/gopacket/pcapgo" -) - -func IsStdoutValidMode(mode string) bool { - switch mode { - case - pkgconfig.ModeText, - pkgconfig.ModeJSON, - pkgconfig.ModeFlatJSON, - pkgconfig.ModePCAP: - return true - } - return false -} - -type StdOut struct { - stopProcess chan bool - doneProcess chan bool - stopRun chan bool - doneRun chan bool - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - textFormat []string - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - writerText *log.Logger - writerPcap *pcapgo.Writer - name string - RoutingHandler pkgutils.RoutingHandler -} - -func NewStdOut(config *pkgconfig.Config, console *logger.Logger, name string) *StdOut { - console.Info(pkgutils.PrefixLogLogger+"[%s] stdout - enabled", name) - so := &StdOut{ - stopProcess: make(chan bool), - doneProcess: make(chan bool), - stopRun: make(chan bool), - doneRun: make(chan bool), - inputChan: make(chan dnsutils.DNSMessage, config.Loggers.Stdout.ChannelBufferSize), - outputChan: make(chan dnsutils.DNSMessage, config.Loggers.Stdout.ChannelBufferSize), - logger: console, - config: config, - configChan: make(chan *pkgconfig.Config), - writerText: log.New(os.Stdout, "", 0), - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, console, name), - } - so.ReadConfig() - return so -} - -func (so *StdOut) GetName() string { return so.name } - -func (so *StdOut) AddDroppedRoute(wrk pkgutils.Worker) { - so.RoutingHandler.AddDroppedRoute(wrk) -} - -func (so *StdOut) AddDefaultRoute(wrk pkgutils.Worker) { - so.RoutingHandler.AddDefaultRoute(wrk) -} - -func (so *StdOut) SetLoggers(loggers []pkgutils.Worker) {} - -func (so *StdOut) ReadConfig() { - if !IsStdoutValidMode(so.config.Loggers.Stdout.Mode) { - so.logger.Fatal("["+so.name+"] logger=stdout - invalid mode: ", so.config.Loggers.Stdout.Mode) - } - - if len(so.config.Loggers.Stdout.TextFormat) > 0 { - // so.textFormat = strings.Fields(so.config.Loggers.Stdout.TextFormat) - so.textFormat = strings.Split(so.config.Loggers.Stdout.TextFormat,so.config.Global.TextFormatSplitter) - } else { - // so.textFormat = strings.Fields(so.config.Global.TextFormat) - so.textFormat = strings.Split(so.config.Global.TextFormat,so.config.Global.TextFormatSplitter) - } - so.logger.Info("textFormat = "+so.config.Global.TextFormat) -} - -func (so *StdOut) ReloadConfig(config *pkgconfig.Config) { - so.LogInfo("reload configuration!") - so.configChan <- config -} - -func (so *StdOut) LogInfo(msg string, v ...interface{}) { - so.logger.Info(pkgutils.PrefixLogLogger+"["+so.name+"] stdout - "+msg, v...) -} - -func (so *StdOut) LogError(msg string, v ...interface{}) { - so.logger.Error(pkgutils.PrefixLogLogger+"["+so.name+"] stdout - "+msg, v...) -} - -func (so *StdOut) SetTextWriter(b *bytes.Buffer) { - so.writerText = log.New(os.Stdout, "", 0) - so.writerText.SetOutput(b) -} - -func (so *StdOut) SetPcapWriter(w io.Writer) { - so.LogInfo("init pcap writer") - - so.writerPcap = pcapgo.NewWriter(w) - if err := so.writerPcap.WriteFileHeader(65536, layers.LinkTypeEthernet); err != nil { - so.logger.Fatal("["+so.name+"] logger=stdout - pcap init error: %e", err) - } -} - -func (so *StdOut) GetInputChannel() chan dnsutils.DNSMessage { - return so.inputChan -} - -func (so *StdOut) Stop() { - so.LogInfo("stopping logger...") - so.RoutingHandler.Stop() - - so.LogInfo("stopping to run...") - so.stopRun <- true - <-so.doneRun - - so.LogInfo("stopping to process...") - so.stopProcess <- true - <-so.doneProcess -} - -func (so *StdOut) Run() { - so.LogInfo("running in background...") - - // prepare next channels - defaultRoutes, defaultNames := so.RoutingHandler.GetDefaultRoutes() - droppedRoutes, droppedNames := so.RoutingHandler.GetDroppedRoutes() - - // prepare transforms - listChannel := []chan dnsutils.DNSMessage{} - listChannel = append(listChannel, so.outputChan) - subprocessors := transformers.NewTransforms(&so.config.OutgoingTransformers, so.logger, so.name, listChannel, 0) - - // goroutine to process transformed dns messages - go so.Process() - - // loop to process incoming messages -RUN_LOOP: - for { - select { - case <-so.stopRun: - // cleanup transformers - subprocessors.Reset() - so.doneRun <- true - break RUN_LOOP - - // new config provided? - case cfg, opened := <-so.configChan: - if !opened { - return - } - so.config = cfg - so.ReadConfig() - subprocessors.ReloadConfig(&cfg.OutgoingTransformers) - - case dm, opened := <-so.inputChan: - if !opened { - so.LogInfo("run: input channel closed!") - return - } - - // apply tranforms, init dns message with additionnals parts if necessary - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - so.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - continue - } - - // send to next ? - so.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - - // send to output channel - so.outputChan <- dm - } - } - so.LogInfo("run terminated") -} - -func (so *StdOut) Process() { - - // standard output buffer - buffer := new(bytes.Buffer) - - if so.config.Loggers.Stdout.Mode == pkgconfig.ModePCAP && so.writerPcap == nil { - so.SetPcapWriter(os.Stdout) - } - - so.LogInfo("ready to process") -PROCESS_LOOP: - for { - select { - case <-so.stopProcess: - so.doneProcess <- true - break PROCESS_LOOP - - case dm, opened := <-so.outputChan: - if !opened { - so.LogInfo("process: output channel closed!") - return - } - - switch so.config.Loggers.Stdout.Mode { - case pkgconfig.ModePCAP: - if len(dm.DNS.Payload) == 0 { - so.LogError("process: no dns payload to encode, drop it") - continue - } - - pkt, err := dm.ToPacketLayer() - if err != nil { - so.LogError("unable to pack layer: %s", err) - continue - } - - buf := gopacket.NewSerializeBuffer() - opts := gopacket.SerializeOptions{ - FixLengths: true, - ComputeChecksums: true, - } - for _, l := range pkt { - l.SerializeTo(buf, opts) - } - - bufSize := len(buf.Bytes()) - ci := gopacket.CaptureInfo{ - Timestamp: time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)), - CaptureLength: bufSize, - Length: bufSize, - } - - so.writerPcap.WritePacket(ci, buf.Bytes()) - - case pkgconfig.ModeText: - so.writerText.Print(dm.String(so.textFormat, - so.config.Global.TextFormatDelimiter, - so.config.Global.TextFormatBoundary)) - - case pkgconfig.ModeJSON: - json.NewEncoder(buffer).Encode(dm) - so.writerText.Print(buffer.String()) - buffer.Reset() - - case pkgconfig.ModeFlatJSON: - flat, err := dm.Flatten() - if err != nil { - so.LogError("process: flattening DNS message failed: %e", err) - } - json.NewEncoder(buffer).Encode(flat) - so.writerText.Print(buffer.String()) - buffer.Reset() - } - } - } - so.LogInfo("processing terminated") -} diff --git a/loggers/syslog.go b/loggers/syslog.go deleted file mode 100644 index 5bda6029..00000000 --- a/loggers/syslog.go +++ /dev/null @@ -1,448 +0,0 @@ -package loggers - -import ( - "bytes" - "crypto/tls" - "encoding/json" - "fmt" - "time" - - "strings" - - syslog "github.com/dmachard/go-clientsyslog" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" -) - -func GetPriority(facility string) (syslog.Priority, error) { - facility = strings.ToUpper(facility) - switch facility { - // level - case "WARNING": - return syslog.LOG_WARNING, nil - case "NOTICE": - return syslog.LOG_NOTICE, nil - case "INFO": - return syslog.LOG_INFO, nil - case "DEBUG": - return syslog.LOG_DEBUG, nil - // facility - case "DAEMON": - return syslog.LOG_DAEMON, nil - case "LOCAL0": - return syslog.LOG_LOCAL0, nil - case "LOCAL1": - return syslog.LOG_LOCAL1, nil - case "LOCAL2": - return syslog.LOG_LOCAL2, nil - case "LOCAL3": - return syslog.LOG_LOCAL3, nil - case "LOCAL4": - return syslog.LOG_LOCAL4, nil - case "LOCAL5": - return syslog.LOG_LOCAL5, nil - case "LOCAL6": - return syslog.LOG_LOCAL6, nil - case "LOCAL7": - return syslog.LOG_LOCAL7, nil - default: - return 0, fmt.Errorf("invalid syslog priority: %s", facility) - } -} - -type Syslog struct { - stopProcess chan bool - doneProcess chan bool - stopRun chan bool - doneRun chan bool - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - severity syslog.Priority - facility syslog.Priority - syslogWriter *syslog.Writer - syslogReady bool - transportReady chan bool - transportReconnect chan bool - textFormat []string - name string - RoutingHandler pkgutils.RoutingHandler -} - -func NewSyslog(config *pkgconfig.Config, console *logger.Logger, name string) *Syslog { - console.Info(pkgutils.PrefixLogLogger+"[%s] syslog - enabled", name) - s := &Syslog{ - stopProcess: make(chan bool), - doneProcess: make(chan bool), - stopRun: make(chan bool), - doneRun: make(chan bool), - inputChan: make(chan dnsutils.DNSMessage, config.Loggers.Syslog.ChannelBufferSize), - outputChan: make(chan dnsutils.DNSMessage, config.Loggers.Syslog.ChannelBufferSize), - transportReady: make(chan bool), - transportReconnect: make(chan bool), - logger: console, - config: config, - configChan: make(chan *pkgconfig.Config), - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, console, name), - } - s.ReadConfig() - return s -} - -func (s *Syslog) GetName() string { return s.name } - -func (s *Syslog) AddDroppedRoute(wrk pkgutils.Worker) { - s.RoutingHandler.AddDroppedRoute(wrk) -} - -func (s *Syslog) AddDefaultRoute(wrk pkgutils.Worker) { - s.RoutingHandler.AddDefaultRoute(wrk) -} - -func (s *Syslog) SetLoggers(loggers []pkgutils.Worker) {} - -func (s *Syslog) ReadConfig() { - if !pkgconfig.IsValidTLS(s.config.Loggers.Syslog.TLSMinVersion) { - s.logger.Fatal(pkgutils.PrefixLogLogger + "[" + s.name + "] syslog - invalid tls min version") - } - - if !pkgconfig.IsValidMode(s.config.Loggers.Syslog.Mode) { - s.logger.Fatal(pkgutils.PrefixLogLogger + "[" + s.name + "] syslog - invalid mode text or json expected") - } - severity, err := GetPriority(s.config.Loggers.Syslog.Severity) - if err != nil { - s.logger.Fatal(pkgutils.PrefixLogLogger + "[" + s.name + "] syslog - invalid severity") - } - s.severity = severity - - facility, err := GetPriority(s.config.Loggers.Syslog.Facility) - if err != nil { - s.logger.Fatal(pkgutils.PrefixLogLogger + "[" + s.name + "] syslog - invalid facility") - } - s.facility = facility - - if len(s.config.Loggers.Syslog.TextFormat) > 0 { - s.textFormat = strings.Fields(s.config.Loggers.Syslog.TextFormat) - } else { - s.textFormat = strings.Fields(s.config.Global.TextFormat) - } -} - -func (s *Syslog) ReloadConfig(config *pkgconfig.Config) { - s.LogInfo("reload configuration!") - s.configChan <- config -} - -func (s *Syslog) GetInputChannel() chan dnsutils.DNSMessage { - return s.inputChan -} - -func (s *Syslog) LogInfo(msg string, v ...interface{}) { - s.logger.Info(pkgutils.PrefixLogLogger+"["+s.name+"] syslog - "+msg, v...) -} - -func (s *Syslog) LogError(msg string, v ...interface{}) { - s.logger.Error(pkgutils.PrefixLogLogger+"["+s.name+"] syslog - "+msg, v...) -} - -func (s *Syslog) Stop() { - s.LogInfo("stopping logger...") - s.RoutingHandler.Stop() - - s.LogInfo("stopping to run...") - s.stopRun <- true - <-s.doneRun - - s.LogInfo("stopping to process...") - s.stopProcess <- true - <-s.doneProcess -} - -func (s *Syslog) ConnectToRemote() { - for { - if s.syslogWriter != nil { - s.syslogWriter.Close() - s.syslogWriter = nil - } - - var logWriter *syslog.Writer - var tlsConfig *tls.Config - var err error - - switch s.config.Loggers.Syslog.Transport { - case "local": - s.LogInfo("connecting to local syslog...") - logWriter, err = syslog.New(s.facility|s.severity, "") - case netlib.SocketUnix: - s.LogInfo("connecting to %s://%s ...", - s.config.Loggers.Syslog.Transport, - s.config.Loggers.Syslog.RemoteAddress) - logWriter, err = syslog.Dial("", - s.config.Loggers.Syslog.RemoteAddress, s.facility|s.severity, - s.config.Loggers.Syslog.Tag) - case netlib.SocketUDP, netlib.SocketTCP: - s.LogInfo("connecting to %s://%s ...", - s.config.Loggers.Syslog.Transport, - s.config.Loggers.Syslog.RemoteAddress) - logWriter, err = syslog.Dial(s.config.Loggers.Syslog.Transport, - s.config.Loggers.Syslog.RemoteAddress, s.facility|s.severity, - s.config.Loggers.Syslog.Tag) - case netlib.SocketTLS: - s.LogInfo("connecting to %s://%s ...", - s.config.Loggers.Syslog.Transport, - s.config.Loggers.Syslog.RemoteAddress) - - tlsOptions := pkgconfig.TLSOptions{ - InsecureSkipVerify: s.config.Loggers.Syslog.TLSInsecure, - MinVersion: s.config.Loggers.Syslog.TLSMinVersion, - CAFile: s.config.Loggers.Syslog.CAFile, - CertFile: s.config.Loggers.Syslog.CertFile, - KeyFile: s.config.Loggers.Syslog.KeyFile, - } - - tlsConfig, err = pkgconfig.TLSClientConfig(tlsOptions) - if err == nil { - logWriter, err = syslog.DialWithTLSConfig(s.config.Loggers.Syslog.Transport, - s.config.Loggers.Syslog.RemoteAddress, s.facility|s.severity, - s.config.Loggers.Syslog.Tag, - tlsConfig) - } - default: - s.logger.Fatal("invalid syslog transport: ", s.config.Loggers.Syslog.Transport) - } - - // something is wrong during connection ? - if err != nil { - s.LogError("%s", err) - s.LogInfo("retry to connect in %d seconds", s.config.Loggers.Syslog.RetryInterval) - time.Sleep(time.Duration(s.config.Loggers.Syslog.RetryInterval) * time.Second) - continue - } - - s.syslogWriter = logWriter - - // set syslog format - switch strings.ToLower(s.config.Loggers.Syslog.Formatter) { - case "unix": - s.syslogWriter.SetFormatter(syslog.UnixFormatter) - case "rfc3164": - s.syslogWriter.SetFormatter(syslog.RFC3164Formatter) - case "rfc5424", "": - s.syslogWriter.SetFormatter(syslog.RFC5424Formatter) - } - - // set syslog framer - switch strings.ToLower(s.config.Loggers.Syslog.Framer) { - case "none", "": - s.syslogWriter.SetFramer(syslog.DefaultFramer) - case "rfc5425": - s.syslogWriter.SetFramer(syslog.RFC5425MessageLengthFramer) - } - - // custom hostname - if len(s.config.Loggers.Syslog.Hostname) > 0 { - s.syslogWriter.SetHostname(s.config.Loggers.Syslog.Hostname) - } - // custom program name - if len(s.config.Loggers.Syslog.AppName) > 0 { - s.syslogWriter.SetProgram(s.config.Loggers.Syslog.AppName) - } - - // notify process that the transport is ready - // block the loop until a reconnect is needed - s.transportReady <- true - s.transportReconnect <- true - } -} - -func (s *Syslog) Run() { - s.LogInfo("running in background...") - - // prepare next channels - defaultRoutes, defaultNames := s.RoutingHandler.GetDefaultRoutes() - droppedRoutes, droppedNames := s.RoutingHandler.GetDroppedRoutes() - - // prepare transforms - listChannel := []chan dnsutils.DNSMessage{} - listChannel = append(listChannel, s.outputChan) - subprocessors := transformers.NewTransforms(&s.config.OutgoingTransformers, s.logger, s.name, listChannel, 0) - - // goroutine to process transformed dns messages - go s.Process() - - // init remote conn - go s.ConnectToRemote() - - // loop to process incoming messages -RUN_LOOP: - for { - select { - case <-s.stopRun: - // cleanup transformers - subprocessors.Reset() - - s.doneRun <- true - break RUN_LOOP - - // new config provided? - case cfg, opened := <-s.configChan: - if !opened { - return - } - s.config = cfg - s.ReadConfig() - subprocessors.ReloadConfig(&cfg.OutgoingTransformers) - - case dm, opened := <-s.inputChan: - if !opened { - s.LogInfo("input channel closed!") - return - } - - // apply tranforms, init dns message with additionnals parts if necessary - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - s.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - continue - } - - // send to next ? - s.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - - // send to output channel - s.outputChan <- dm - } - } - s.LogInfo("run terminated") -} - -func (s *Syslog) FlushBuffer(buf *[]dnsutils.DNSMessage) { - buffer := new(bytes.Buffer) - var err error - - for _, dm := range *buf { - switch s.config.Loggers.Syslog.Mode { - case pkgconfig.ModeText: - // write the text line to the buffer - buffer.Write(dm.Bytes(s.textFormat, - s.config.Global.TextFormatDelimiter, - s.config.Global.TextFormatBoundary)) - - // replace NULL char from text line directly in the buffer - // because the NULL is a end of log in syslog - for i := 0; i < buffer.Len(); i++ { - if buffer.Bytes()[i] == 0 { - buffer.Bytes()[i] = s.config.Loggers.Syslog.ReplaceNullChar[0] - } - } - - // ensure it ends in a \n - buffer.WriteString("\n") - - // write the modified content of the buffer to s.syslogWriter - // and reset the buffer - _, err = buffer.WriteTo(s.syslogWriter) - - case pkgconfig.ModeJSON: - // encode to json the dns message - json.NewEncoder(buffer).Encode(dm) - - // write the content of the buffer to s.syslogWriter - // and reset the buffer - _, err = buffer.WriteTo(s.syslogWriter) - - case pkgconfig.ModeFlatJSON: - // get flatten object - flat, errflat := dm.Flatten() - if errflat != nil { - s.LogError("flattening DNS message failed: %e", err) - continue - } - - // encode to json - json.NewEncoder(buffer).Encode(flat) - - // write the content of the buffer to s.syslogWriter - // and reset the buffer - _, err = buffer.WriteTo(s.syslogWriter) - } - - if err != nil { - s.LogError("write error %s", err) - s.syslogReady = false - <-s.transportReconnect - break - } - } - - // reset buffer - *buf = nil -} - -func (s *Syslog) Process() { - // init buffer - bufferDm := []dnsutils.DNSMessage{} - - // init flust timer for buffer - flushInterval := time.Duration(s.config.Loggers.Syslog.FlushInterval) * time.Second - flushTimer := time.NewTimer(flushInterval) - - s.LogInfo("processing dns messages...") -PROCESS_LOOP: - for { - select { - case <-s.stopProcess: - // close connection - if s.syslogWriter != nil { - s.syslogWriter.Close() - } - s.doneProcess <- true - break PROCESS_LOOP - - case <-s.transportReady: - s.LogInfo("syslog transport is ready") - s.syslogReady = true - - // incoming dns message to process - case dm, opened := <-s.outputChan: - if !opened { - s.LogInfo("output channel closed!") - return - } - - // discar dns message if the connection is not ready - if !s.syslogReady { - continue - } - // append dns message to buffer - bufferDm = append(bufferDm, dm) - - // buffer is full ? - if len(bufferDm) >= s.config.Loggers.Syslog.BufferSize { - s.FlushBuffer(&bufferDm) - } - - // flush the buffer - case <-flushTimer.C: - if !s.syslogReady { - bufferDm = nil - } - - if len(bufferDm) > 0 { - s.FlushBuffer(&bufferDm) - } - - // restart timer - flushTimer.Reset(flushInterval) - } - } - s.LogInfo("processing terminated") -} diff --git a/loggers/tcpclient.go b/loggers/tcpclient.go deleted file mode 100644 index 1543e25b..00000000 --- a/loggers/tcpclient.go +++ /dev/null @@ -1,394 +0,0 @@ -package loggers - -import ( - "bufio" - "crypto/tls" - "encoding/json" - "errors" - "io" - "net" - "strconv" - "strings" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" -) - -type TCPClient struct { - stopProcess chan bool - doneProcess chan bool - stopRun chan bool - doneRun chan bool - stopRead chan bool - doneRead chan bool - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - textFormat []string - name string - transport string - transportWriter *bufio.Writer - transportConn net.Conn - transportReady chan bool - transportReconnect chan bool - writerReady bool - RoutingHandler pkgutils.RoutingHandler -} - -func NewTCPClient(config *pkgconfig.Config, logger *logger.Logger, name string) *TCPClient { - logger.Info(pkgutils.PrefixLogLogger+"[%s] tcpclient - enabled", name) - s := &TCPClient{ - stopProcess: make(chan bool), - doneProcess: make(chan bool), - stopRun: make(chan bool), - doneRun: make(chan bool), - stopRead: make(chan bool), - doneRead: make(chan bool), - inputChan: make(chan dnsutils.DNSMessage, config.Loggers.TCPClient.ChannelBufferSize), - outputChan: make(chan dnsutils.DNSMessage, config.Loggers.TCPClient.ChannelBufferSize), - transportReady: make(chan bool), - transportReconnect: make(chan bool), - logger: logger, - config: config, - configChan: make(chan *pkgconfig.Config), - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - } - - s.ReadConfig() - - return s -} - -func (c *TCPClient) GetName() string { return c.name } - -func (c *TCPClient) AddDroppedRoute(wrk pkgutils.Worker) { - c.RoutingHandler.AddDroppedRoute(wrk) -} - -func (c *TCPClient) AddDefaultRoute(wrk pkgutils.Worker) { - c.RoutingHandler.AddDefaultRoute(wrk) -} - -func (c *TCPClient) SetLoggers(loggers []pkgutils.Worker) {} - -func (c *TCPClient) ReadConfig() { - c.transport = c.config.Loggers.TCPClient.Transport - - // begin backward compatibility - if c.config.Loggers.TCPClient.TLSSupport { - c.transport = netlib.SocketTLS - } - if len(c.config.Loggers.TCPClient.SockPath) > 0 { - c.transport = netlib.SocketUnix - } - // end - - if len(c.config.Loggers.TCPClient.TextFormat) > 0 { - c.textFormat = strings.Fields(c.config.Loggers.TCPClient.TextFormat) - } else { - c.textFormat = strings.Fields(c.config.Global.TextFormat) - } -} - -func (c *TCPClient) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration!") - c.configChan <- config -} - -func (c *TCPClient) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogLogger+"["+c.name+"] tcpclient - "+msg, v...) -} - -func (c *TCPClient) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogLogger+"["+c.name+"] tcpclient - "+msg, v...) -} - -func (c *TCPClient) GetInputChannel() chan dnsutils.DNSMessage { - return c.inputChan -} - -func (c *TCPClient) Stop() { - c.LogInfo("stopping logger...") - c.RoutingHandler.Stop() - - c.LogInfo("stopping to run...") - c.stopRun <- true - <-c.doneRun - - c.LogInfo("stopping to read...") - c.stopRead <- true - <-c.doneRead - - c.LogInfo("stopping to process...") - c.stopProcess <- true - <-c.doneProcess -} - -func (c *TCPClient) Disconnect() { - if c.transportConn != nil { - c.LogInfo("closing tcp connection") - c.transportConn.Close() - } -} - -func (c *TCPClient) ReadFromConnection() { - buffer := make([]byte, 4096) - - go func() { - for { - _, err := c.transportConn.Read(buffer) - if err != nil { - if errors.Is(err, io.EOF) || errors.Is(err, net.ErrClosed) { - c.LogInfo("read from connection terminated") - break - } - c.LogError("Error on reading: %s", err.Error()) - } - // We just discard the data - } - }() - - // block goroutine until receive true event in stopRead channel - <-c.stopRead - c.doneRead <- true - - c.LogInfo("read goroutine terminated") -} - -func (c *TCPClient) ConnectToRemote() { - for { - if c.transportConn != nil { - c.transportConn.Close() - c.transportConn = nil - } - - address := c.config.Loggers.TCPClient.RemoteAddress + ":" + strconv.Itoa(c.config.Loggers.TCPClient.RemotePort) - connTimeout := time.Duration(c.config.Loggers.TCPClient.ConnectTimeout) * time.Second - - // make the connection - var conn net.Conn - var err error - - switch c.transport { - case netlib.SocketUnix: - address = c.config.Loggers.TCPClient.RemoteAddress - if len(c.config.Loggers.TCPClient.SockPath) > 0 { - address = c.config.Loggers.TCPClient.SockPath - } - c.LogInfo("connecting to %s://%s", c.transport, address) - conn, err = net.DialTimeout(c.transport, address, connTimeout) - - case netlib.SocketTCP: - c.LogInfo("connecting to %s://%s", c.transport, address) - conn, err = net.DialTimeout(c.transport, address, connTimeout) - - case netlib.SocketTLS: - c.LogInfo("connecting to %s://%s", c.transport, address) - - var tlsConfig *tls.Config - - tlsOptions := pkgconfig.TLSOptions{ - InsecureSkipVerify: c.config.Loggers.TCPClient.TLSInsecure, - MinVersion: c.config.Loggers.TCPClient.TLSMinVersion, - CAFile: c.config.Loggers.TCPClient.CAFile, - CertFile: c.config.Loggers.TCPClient.CertFile, - KeyFile: c.config.Loggers.TCPClient.KeyFile, - } - - tlsConfig, err = pkgconfig.TLSClientConfig(tlsOptions) - if err == nil { - dialer := &net.Dialer{Timeout: connTimeout} - conn, err = tls.DialWithDialer(dialer, netlib.SocketTCP, address, tlsConfig) - } - default: - c.logger.Fatal("logger=tcpclient - invalid transport:", c.transport) - } - - // something is wrong during connection ? - if err != nil { - c.LogError("%s", err) - c.LogInfo("retry to connect in %d seconds", c.config.Loggers.TCPClient.RetryInterval) - time.Sleep(time.Duration(c.config.Loggers.TCPClient.RetryInterval) * time.Second) - continue - } - - c.transportConn = conn - - // block until framestream is ready - c.transportReady <- true - - // block until an error occurred, need to reconnect - c.transportReconnect <- true - } -} - -func (c *TCPClient) FlushBuffer(buf *[]dnsutils.DNSMessage) { - for _, dm := range *buf { - if c.config.Loggers.TCPClient.Mode == pkgconfig.ModeText { - c.transportWriter.Write(dm.Bytes(c.textFormat, - c.config.Global.TextFormatDelimiter, - c.config.Global.TextFormatBoundary)) - c.transportWriter.WriteString(c.config.Loggers.TCPClient.PayloadDelimiter) - } - - if c.config.Loggers.TCPClient.Mode == pkgconfig.ModeJSON { - json.NewEncoder(c.transportWriter).Encode(dm) - c.transportWriter.WriteString(c.config.Loggers.TCPClient.PayloadDelimiter) - } - - if c.config.Loggers.TCPClient.Mode == pkgconfig.ModeFlatJSON { - flat, err := dm.Flatten() - if err != nil { - c.LogError("flattening DNS message failed: %e", err) - continue - } - json.NewEncoder(c.transportWriter).Encode(flat) - c.transportWriter.WriteString(c.config.Loggers.TCPClient.PayloadDelimiter) - } - - // flush the transport buffer - err := c.transportWriter.Flush() - if err != nil { - c.LogError("send frame error", err.Error()) - c.writerReady = false - <-c.transportReconnect - break - } - } - - // reset buffer - *buf = nil -} - -func (c *TCPClient) Run() { - c.LogInfo("running in background...") - - // prepare next channels - defaultRoutes, defaultNames := c.RoutingHandler.GetDefaultRoutes() - droppedRoutes, droppedNames := c.RoutingHandler.GetDroppedRoutes() - - // prepare transforms - listChannel := []chan dnsutils.DNSMessage{} - listChannel = append(listChannel, c.outputChan) - subprocessors := transformers.NewTransforms(&c.config.OutgoingTransformers, c.logger, c.name, listChannel, 0) - - // goroutine to process transformed dns messages - go c.Process() - - // loop to process incoming messages -RUN_LOOP: - for { - select { - case <-c.stopRun: - // cleanup transformers - subprocessors.Reset() - - c.doneRun <- true - break RUN_LOOP - - case cfg, opened := <-c.configChan: - if !opened { - return - } - c.config = cfg - c.ReadConfig() - subprocessors.ReloadConfig(&cfg.OutgoingTransformers) - - case dm, opened := <-c.inputChan: - if !opened { - c.LogInfo("input channel closed!") - return - } - - // apply tranforms, init dns message with additionnals parts if necessary - subprocessors.InitDNSMessageFormat(&dm) - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { - c.RoutingHandler.SendTo(droppedRoutes, droppedNames, dm) - continue - } - - // send to next ? - c.RoutingHandler.SendTo(defaultRoutes, defaultNames, dm) - - // send to output channel - c.outputChan <- dm - } - } - c.LogInfo("run terminated") -} - -func (c *TCPClient) Process() { - // init buffer - bufferDm := []dnsutils.DNSMessage{} - - // init flust timer for buffer - flushInterval := time.Duration(c.config.Loggers.TCPClient.FlushInterval) * time.Second - flushTimer := time.NewTimer(flushInterval) - - // init remote conn - go c.ConnectToRemote() - - c.LogInfo("ready to process") -PROCESS_LOOP: - for { - select { - case <-c.stopProcess: - // closing remote connection if exist - c.Disconnect() - c.doneProcess <- true - break PROCESS_LOOP - - case <-c.transportReady: - c.LogInfo("transport connected with success") - c.transportWriter = bufio.NewWriter(c.transportConn) - c.writerReady = true - - // read from the connection until we stop - go c.ReadFromConnection() - - // incoming dns message to process - case dm, opened := <-c.outputChan: - if !opened { - c.LogInfo("output channel closed!") - return - } - - // drop dns message if the connection is not ready to avoid memory leak or - // to block the channel - if !c.writerReady { - continue - } - - // append dns message to buffer - bufferDm = append(bufferDm, dm) - - // buffer is full ? - if len(bufferDm) >= c.config.Loggers.TCPClient.BufferSize { - c.FlushBuffer(&bufferDm) - } - - // flush the buffer - case <-flushTimer.C: - if !c.writerReady { - bufferDm = nil - } - - if len(bufferDm) > 0 { - c.FlushBuffer(&bufferDm) - } - - // restart timer - flushTimer.Reset(flushInterval) - - } - } - c.LogInfo("processing terminated") -} diff --git a/netlib/conn.go b/netlib/conn.go deleted file mode 100644 index 3f64f04e..00000000 --- a/netlib/conn.go +++ /dev/null @@ -1,33 +0,0 @@ -package netlib - -import ( - "io" - "net" -) - -// thanks to https://stackoverflow.com/questions/28967701/golang-tcp-socket-cant-close-after-get-file, -// call conn.CloseRead() before calling conn.Close() -func Close(conn io.Closer, reset bool) error { - type ReadCloser interface { - CloseRead() error - } - - // Aggressive closing, send TCP RESET instead of FIN - if reset { - if tcpConn, ok := conn.(*net.TCPConn); ok { - tcpConn.SetLinger(0) - } - } - - var errs []error - if closer, ok := conn.(ReadCloser); ok { - errs = append(errs, closer.CloseRead()) - } - errs = append(errs, conn.Close()) - for _, err := range errs { - if err != nil { - return err - } - } - return nil -} diff --git a/netlib/constant.go b/netlib/constant.go deleted file mode 100644 index 63887994..00000000 --- a/netlib/constant.go +++ /dev/null @@ -1,28 +0,0 @@ -package netlib - -const ( - ProtoInet = "INET" - ProtoInet6 = "INET6" - ProtoIPv6 = "IPv6" - ProtoIPv4 = "IPv4" - - ProtoUDP = "UDP" - ProtoTCP = "TCP" - - SocketTCP = "tcp" - SocketUDP = "udp" - SocketUnix = "unix" - SocketTLS = "tcp+tls" -) - -var ( - IPVersion = map[string]string{ - ProtoInet: ProtoIPv4, - ProtoInet6: ProtoIPv6, - } - - IPToInet = map[string]string{ - ProtoIPv4: ProtoInet, - ProtoIPv6: ProtoInet6, - } -) diff --git a/netlib/ipdefrag.go b/netlib/ipdefrag.go deleted file mode 100644 index a7d29f90..00000000 --- a/netlib/ipdefrag.go +++ /dev/null @@ -1,386 +0,0 @@ -package netlib - -import ( - "container/list" - "fmt" - "sync" - "time" - - "github.com/google/gopacket" - "github.com/google/gopacket/layers" -) - -const ( - IPv6MinimumFragmentSize = 1280 - IPv6MaximumSize = 65535 - IPv6MaximumFragmentOffset = 8189 - IPv6MaximumFragmentListLen = 52 - - IPv4MinimumFragmentSize = 8 // Minimum size of a single fragment - IPv4MaximumSize = 65535 // Maximum size of a fragment (2^16) - IPv4MaximumFragmentOffset = 8183 // Maximum offset of a fragment - IPv4MaximumFragmentListLen = 8192 // Back out if we get more than this many fragments -) - -type fragments struct { - List list.List - Highest uint16 - Current uint16 - LastSeen time.Time -} - -func (f *fragments) insert(in gopacket.Packet) (gopacket.Packet, error) { - var inFragOffset uint16 - var inFragLength uint16 - var inFragMore bool - - if in.NetworkLayer().LayerType() == layers.LayerTypeIPv6 { - inIP6 := in.Layer(layers.LayerTypeIPv6).(*layers.IPv6) - inFrag6 := in.Layer(layers.LayerTypeIPv6Fragment).(*layers.IPv6Fragment) - inFragOffset = inFrag6.FragmentOffset * 8 - inFragLength = inIP6.Length - 8 - inFragMore = inFrag6.MoreFragments - } - if in.NetworkLayer().LayerType() == layers.LayerTypeIPv4 { - inIP4 := in.Layer(layers.LayerTypeIPv4).(*layers.IPv4) - inFragOffset = inIP4.FragOffset * 8 - inFragLength = inIP4.Length - 20 - inFragMore = inIP4.Flags&layers.IPv4MoreFragments > 0 - } - - if inFragOffset >= f.Highest { - f.List.PushBack(in) - } else { - for e := f.List.Front(); e != nil; e = e.Next() { - packet, _ := e.Value.(gopacket.Packet) - - var fragOffset uint16 - - frag6 := packet.Layer(layers.LayerTypeIPv6Fragment).(*layers.IPv6Fragment) - ip4, _ := e.Value.(*layers.IPv4) - if frag6 != nil { - fragOffset = frag6.FragmentOffset * 8 - } else { - fragOffset = ip4.FragOffset * 8 - } - - if inFragOffset == fragOffset { - return nil, nil - } - if inFragOffset <= fragOffset { - f.List.InsertBefore(in, e) - break - } - } - } - - f.LastSeen = in.Metadata().Timestamp - - // After inserting the Fragment, we update the counters - if f.Highest < inFragOffset+inFragLength { - f.Highest = inFragOffset + inFragLength - } - f.Current += inFragLength - - // Final Fragment ? - if !inFragMore && f.Highest == f.Current { - return f.build(in) - } - return nil, nil -} - -func (f *fragments) build(in gopacket.Packet) (gopacket.Packet, error) { - var final []byte - var currentOffset uint16 - - for e := f.List.Front(); e != nil; e = e.Next() { - pack, _ := e.Value.(gopacket.Packet) - - var fragOffset uint16 - var fragLength uint16 - var fragPayload []byte - var ipOffset uint16 - - if pack.NetworkLayer().LayerType() == layers.LayerTypeIPv6 { - frag6 := pack.Layer(layers.LayerTypeIPv6Fragment).(*layers.IPv6Fragment) - ip6 := pack.Layer(layers.LayerTypeIPv6).(*layers.IPv6) - - fragOffset = frag6.FragmentOffset - fragLength = ip6.Length - fragPayload = frag6.Payload - ipOffset = 8 - } - if pack.NetworkLayer().LayerType() == layers.LayerTypeIPv4 { - ip4 := pack.Layer(layers.LayerTypeIPv4).(*layers.IPv4) - - fragOffset = ip4.FragOffset - fragLength = ip4.Length - fragPayload = ip4.Payload - ipOffset = 20 - } - - offset := fragOffset * 8 - switch { - case offset == currentOffset: - final = append(final, fragPayload...) - currentOffset = currentOffset + fragLength - ipOffset - case offset < currentOffset: - startAt := currentOffset - fragOffset*8 - if startAt > fragLength-ipOffset { - return nil, fmt.Errorf("defrag: invalid fragment") - } - final = append(final, fragPayload[startAt:]...) - currentOffset += fragOffset * 8 - default: - // Houston - we have an hole ! - return nil, fmt.Errorf("defrag: hole found") - } - } - - if in.NetworkLayer().LayerType() == layers.LayerTypeIPv4 { - ip4 := in.Layer(layers.LayerTypeIPv4).(*layers.IPv4) - out := &layers.IPv4{ - Version: ip4.Version, - IHL: ip4.IHL, - TOS: ip4.TOS, - Length: f.Highest, - Id: ip4.Id, - Flags: 0, - FragOffset: 0, - TTL: ip4.TTL, - Protocol: ip4.Protocol, - Checksum: 0, - SrcIP: ip4.SrcIP, - DstIP: ip4.DstIP, - Options: ip4.Options, - Padding: ip4.Padding, - } - out.Payload = final - - buf := gopacket.NewSerializeBuffer() - ops := gopacket.SerializeOptions{ - FixLengths: true, - ComputeChecksums: true, - } - - ip4Payload, _ := buf.PrependBytes(len(final)) - copy(ip4Payload, final) - out.SerializeTo(buf, ops) - - outPacket := gopacket.NewPacket(buf.Bytes(), layers.LayerTypeIPv4, gopacket.Default) - outPacket.Metadata().CaptureLength = len(outPacket.Data()) - outPacket.Metadata().Length = len(outPacket.Data()) - outPacket.Metadata().Timestamp = in.Metadata().Timestamp - - // workaround to mark the packet as reassembled - outPacket.Metadata().Truncated = true - return outPacket, nil - } - - if in.NetworkLayer().LayerType() == layers.LayerTypeIPv6 { - ip6 := in.Layer(layers.LayerTypeIPv6).(*layers.IPv6) - frag6 := in.Layer(layers.LayerTypeIPv6Fragment).(*layers.IPv6Fragment) - out := &layers.IPv6{ - Version: ip6.Version, - TrafficClass: ip6.TrafficClass, - FlowLabel: ip6.FlowLabel, - Length: f.Highest, - NextHeader: frag6.NextHeader, - HopLimit: ip6.HopLimit, - SrcIP: ip6.SrcIP, - DstIP: ip6.DstIP, - HopByHop: ip6.HopByHop, - } - out.Payload = final - - buf := gopacket.NewSerializeBuffer() - ops := gopacket.SerializeOptions{ - FixLengths: true, - ComputeChecksums: true, - } - - v6Payload, _ := buf.PrependBytes(len(final)) - copy(v6Payload, final) - - out.SerializeTo(buf, ops) - outPacket := gopacket.NewPacket(buf.Bytes(), layers.LayerTypeIPv6, gopacket.Default) - outPacket.Metadata().CaptureLength = len(outPacket.Data()) - outPacket.Metadata().Length = len(outPacket.Data()) - outPacket.Metadata().Timestamp = in.Metadata().Timestamp - - // workaround to mark the packet as reassembled - outPacket.Metadata().Truncated = true - - return outPacket, nil - } - return nil, nil -} - -type ipFlow struct { - flow gopacket.Flow - id uint32 -} - -func newIPv4(packet gopacket.Packet) ipFlow { - ip4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4) - return ipFlow{ - flow: ip4.NetworkFlow(), - id: uint32(ip4.Id), - } -} - -func newIPv6(packet gopacket.Packet) ipFlow { - frag := packet.Layer(layers.LayerTypeIPv6Fragment).(*layers.IPv6Fragment) - ip6 := packet.Layer(layers.LayerTypeIPv6).(*layers.IPv6) - return ipFlow{ - flow: ip6.NetworkFlow(), - id: frag.Identification, - } -} - -type IPDefragmenter struct { - sync.RWMutex - ipFlows map[ipFlow]*fragments -} - -func NewIPDefragmenter() *IPDefragmenter { - return &IPDefragmenter{ - ipFlows: make(map[ipFlow]*fragments), - } -} - -func (d *IPDefragmenter) DefragIP(in gopacket.Packet) (gopacket.Packet, error) { - // check if we need to defrag - if st := d.dontDefrag(in); st { - return in, nil - } - - // perfom security checks - if err := d.securityChecks(in); err != nil { - return nil, err - } - - // ok, got a fragment - // have we already seen a flow between src/dst with that Id? - var ipf ipFlow - var fl *fragments - var exist bool - var maxFrag int - - if in.NetworkLayer().LayerType() == layers.LayerTypeIPv4 { - ipf = newIPv4(in) - maxFrag = IPv4MaximumFragmentListLen - } - if in.NetworkLayer().LayerType() == layers.LayerTypeIPv6 { - ipf = newIPv6(in) - maxFrag = IPv6MaximumFragmentListLen - } - d.Lock() - fl, exist = d.ipFlows[ipf] - if !exist { - fl = new(fragments) - d.ipFlows[ipf] = fl - } - d.Unlock() - - // insert, and if final build it - out, err2 := fl.insert(in) - - // at last, if we hit the maximum frag list len - // without any defrag success, we just drop everything and - // raise an error - if out == nil && fl.List.Len()+1 > maxFrag { - d.flush(ipf) - return nil, fmt.Errorf("fragment List hits its maximum") - } - - // if we got a packet, it's a new one, and he is defragmented - // when defrag is done for a flow between two ip clean the list - if out != nil { - d.flush(ipf) - return out, nil - } - return nil, err2 -} - -func (d *IPDefragmenter) dontDefrag(in gopacket.Packet) bool { - if in.NetworkLayer().LayerType() == layers.LayerTypeIPv6 { - // check if we need to defrag - frag := in.Layer(layers.LayerTypeIPv6Fragment) - if frag == nil { - return true - } - } - - if in.NetworkLayer().LayerType() == layers.LayerTypeIPv4 { - ip4 := in.Layer(layers.LayerTypeIPv4).(*layers.IPv4) - // don't defrag packet with DF flag - if ip4.Flags&layers.IPv4DontFragment != 0 { - return true - } - // don't defrag not fragmented ones - if ip4.Flags&layers.IPv4MoreFragments == 0 && ip4.FragOffset == 0 { - return true - } - } - - return false -} - -func (d *IPDefragmenter) securityChecks(in gopacket.Packet) error { - if in.NetworkLayer().LayerType() == layers.LayerTypeIPv6 { - frag6 := in.Layer(layers.LayerTypeIPv6Fragment).(*layers.IPv6Fragment) - - // don't allow too big fragment offset - if frag6.FragmentOffset > IPv6MaximumFragmentOffset { - return fmt.Errorf("fragment offset too big (handcrafted? %d > %d)", frag6.FragmentOffset, IPv6MaximumFragmentOffset) - } - fragOffset := uint32(frag6.FragmentOffset * 8) - - // don't allow fragment that would oversize an IP packet - if fragOffset+uint32(len(frag6.Payload)) > IPv6MaximumSize { - return fmt.Errorf("fragment will overrun (handcrafted? %d > %d)", fragOffset+uint32(len(frag6.Payload)), IPv6MaximumFragmentOffset) - } - } - if in.NetworkLayer().LayerType() == layers.LayerTypeIPv4 { - ip4 := in.Layer(layers.LayerTypeIPv4).(*layers.IPv4) - fragSize := ip4.Length - uint16(ip4.IHL)*4 - - // don't allow small fragments outside of specification - if fragSize < IPv4MinimumFragmentSize { - return fmt.Errorf("fragment too small(handcrafted? %d < %d)", fragSize, IPv4MinimumFragmentSize) - } - - // don't allow too big fragment offset - if ip4.FragOffset > IPv4MaximumFragmentOffset { - return fmt.Errorf("fragment offset too big (handcrafted? %d > %d)", ip4.FragOffset, IPv4MaximumFragmentOffset) - } - fragOffset := ip4.FragOffset * 8 - - // don't allow fragment that would oversize an IP packet - if fragOffset+ip4.Length > IPv4MaximumSize { - return fmt.Errorf("fragment will overrun (handcrafted? %d > %d)", fragOffset+ip4.Length, IPv4MaximumSize) - } - } - - return nil -} - -func (d *IPDefragmenter) flush(ipf ipFlow) { - d.Lock() - delete(d.ipFlows, ipf) - d.Unlock() -} - -func (d *IPDefragmenter) DiscardOlderThan(t time.Time) int { - var nb int - d.Lock() - for k, v := range d.ipFlows { - if v.LastSeen.Before(t) { - nb++ - delete(d.ipFlows, k) - } - } - d.Unlock() - return nb -} diff --git a/netlib/networkdecoder.go b/netlib/networkdecoder.go deleted file mode 100644 index 6d7237e0..00000000 --- a/netlib/networkdecoder.go +++ /dev/null @@ -1,126 +0,0 @@ -package netlib - -import ( - "fmt" - - "github.com/google/gopacket" - "github.com/google/gopacket/layers" -) - -type NetDecoder struct{} - -const ( - IPv4ProtocolTCP = layers.IPProtocolTCP - IPv4ProtocolUDP = layers.IPProtocolUDP - IPv6ProtocolTCP = layers.IPProtocolTCP - IPv6ProtocolUDP = layers.IPProtocolUDP - IPv6ProtocolFragment = layers.IPProtocolIPv6Fragment -) - -func (d *NetDecoder) Decode(data []byte, p gopacket.PacketBuilder) error { - // Decode the Ethernet layer - ethernetLayer := &layers.Ethernet{} - if err := ethernetLayer.DecodeFromBytes(data, p); err != nil { - return err - } - p.AddLayer(ethernetLayer) - p.SetLinkLayer(ethernetLayer) - - // Check the EtherType of the Ethernet layer to determine the next layer - switch ethernetLayer.EthernetType { - case layers.EthernetTypeIPv4: - return d.decodeIPv4(ethernetLayer.Payload, p) - case layers.EthernetTypeIPv6: - return d.decodeIPv6(ethernetLayer.Payload, p) - } - - return nil -} - -func (d *NetDecoder) decodeIPv4(data []byte, p gopacket.PacketBuilder) error { - // Decode the IPv4 layer - ipv4Layer := &layers.IPv4{} - if err := ipv4Layer.DecodeFromBytes(data, p); err != nil { - return err - } - p.AddLayer(ipv4Layer) - p.SetNetworkLayer(ipv4Layer) - - // Check the Protocol of the IPv4 layer to determine the next layer - switch ipv4Layer.Protocol { - case IPv4ProtocolTCP: - return d.decodeTCP(ipv4Layer.Payload, p) - case IPv4ProtocolUDP: - return d.decodeUDP(ipv4Layer.Payload, p) - } - - return nil -} - -func (d *NetDecoder) decodeIPv6(data []byte, p gopacket.PacketBuilder) error { - - ipv6Layer := &layers.IPv6{} - if err := ipv6Layer.DecodeFromBytes(data, p); err != nil { - return err - } - p.AddLayer(ipv6Layer) - p.SetNetworkLayer(ipv6Layer) - - // Check the NextHeader of the IPv6 layer to determine the next layer - switch ipv6Layer.NextHeader { - case IPv6ProtocolTCP: - return d.decodeTCP(ipv6Layer.Payload, p) - case IPv6ProtocolUDP: - return d.decodeUDP(ipv6Layer.Payload, p) - case IPv6ProtocolFragment: - return d.decodeIPv6Fragment(ipv6Layer.Payload, p) - } - return nil -} - -func (d *NetDecoder) decodeIPv6Fragment(data []byte, p gopacket.PacketBuilder) error { - // Create a new packet from the byte slice - packet := gopacket.NewPacket(data, layers.LayerTypeIPv6Fragment, gopacket.Default) - - ipv6FragLayer := packet.Layer(layers.LayerTypeIPv6Fragment) - if ipv6FragLayer == nil { - return fmt.Errorf("no ipv6 fragment layer") - } - - p.AddLayer(ipv6FragLayer) - - ipv6Frag := ipv6FragLayer.(*layers.IPv6Fragment) - - // This is the last fragment, so we can decode the payload - switch ipv6Frag.NextHeader { - case layers.IPProtocolTCP: - return d.decodeTCP(ipv6FragLayer.LayerPayload(), p) - case layers.IPProtocolUDP: - return d.decodeUDP(ipv6FragLayer.LayerPayload(), p) - } - return nil -} - -func (d *NetDecoder) decodeTCP(data []byte, p gopacket.PacketBuilder) error { - // Decode the TCP layer - tcpLayer := &layers.TCP{} - if err := tcpLayer.DecodeFromBytes(data, p); err != nil { - return err - } - p.AddLayer(tcpLayer) - p.SetTransportLayer(tcpLayer) - - return nil -} - -func (d *NetDecoder) decodeUDP(data []byte, p gopacket.PacketBuilder) error { - // Decode the UDP layer - udpLayer := &layers.UDP{} - if err := udpLayer.DecodeFromBytes(data, p); err != nil { - return err - } - p.AddLayer(udpLayer) - p.SetTransportLayer(udpLayer) - - return nil -} diff --git a/netlib/networkdecoder_test.go b/netlib/networkdecoder_test.go deleted file mode 100644 index b7f82484..00000000 --- a/netlib/networkdecoder_test.go +++ /dev/null @@ -1,326 +0,0 @@ -package netlib - -import ( - "testing" - - "github.com/google/gopacket" - "github.com/google/gopacket/layers" -) - -func TestNetDecoder_Decode_IPv4_UDP(t *testing.T) { - pkt := []byte{ - // ethernet - 0x00, 0x0c, 0x29, 0x8a, 0x5d, 0xd7, 0x00, 0x86, 0x9c, 0xe7, 0x55, 0x14, 0x08, 0x00, - // ipv4 - 0x45, 0x00, 0x00, 0x44, 0xe5, 0x6a, 0x00, 0x00, 0x6f, 0x11, - 0xec, 0x11, 0xac, 0xd9, 0x28, 0x4c, 0xc1, 0x18, 0xe3, 0xee, - // udp - 0xdd, 0x68, 0x00, 0x35, 0x00, 0x30, 0x0c, 0x33, - // udp payload (dns) - 0xd4, 0x3f, 0x00, 0x10, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x08, 0x77, - 0x65, 0x62, 0x65, 0x72, 0x6c, 0x61, 0x62, 0x02, 0x64, 0x65, 0x00, 0x00, 0x30, 0x00, - 0x01, 0x00, 0x00, 0x29, 0x10, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, - } - - decoder := &NetDecoder{} - - packet := gopacket.NewPacket(pkt, decoder, gopacket.NoCopy) - - packetLayers := packet.Layers() - if len(packetLayers) != 3 { - t.Fatalf("Unexpected number of layers: expected 3, got %d", len(packetLayers)) - } - - if _, ok := packetLayers[0].(*layers.Ethernet); !ok { - t.Errorf("Expected Ethernet layer, got %T", packetLayers[0]) - } - if _, ok := packetLayers[1].(*layers.IPv4); !ok { - t.Errorf("Expected IPv4 layer, got %T", packetLayers[1]) - } - ip4 := packetLayers[1].(*layers.IPv4) - if ip4.Flags&layers.IPv4MoreFragments > 0 { - t.Errorf("Expected more fragment") - } - if _, ok := packetLayers[2].(*layers.UDP); !ok { - t.Errorf("Expected UDP layer, got %T", packetLayers[2]) - } -} - -func TestNetDecoder_Decode_IPv4_TCP(t *testing.T) { - pkt := []byte{ - // ethernet - 0xb0, 0xbb, 0xe5, 0xb2, 0x46, 0x4c, 0xb0, 0x35, 0x9f, 0xd4, 0x03, 0x91, 0x08, 0x00, - // ipv4 - 0x45, 0x00, 0x00, 0x69, 0xb7, 0x65, 0x40, 0x00, 0x40, 0x06, 0xbf, - 0x6e, 0xc0, 0xa8, 0x01, 0x11, 0x01, 0x01, 0x01, 0x01, - // tcp - 0x8d, 0xcd, 0x00, 0x35, 0x39, 0x4f, 0x0c, 0xbb, 0xcf, 0x72, 0x32, 0xb3, 0x80, 0x18, - 0x01, 0xf6, 0x38, 0xc2, 0x00, 0x00, 0x01, 0x01, 0x08, 0x0a, 0x09, 0x5d, 0x2c, 0x7a, 0x65, 0xe0, - 0x63, 0x90, 0x00, 0x33, 0x85, 0x9f, 0x01, 0x20, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x06, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x03, 0x63, 0x6f, 0x6d, 0x00, 0x00, 0x01, 0x00, 0x01, - 0x00, 0x00, 0x29, 0x04, 0xd0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x00, 0x0a, 0x00, 0x08, 0xdf, - 0x41, 0x92, 0x72, 0x53, 0xf5, 0x1b, 0x48, - } - - decoder := &NetDecoder{} - - packet := gopacket.NewPacket(pkt, decoder, gopacket.NoCopy) - - packetLayers := packet.Layers() - if len(packetLayers) != 3 { - t.Fatalf("Unexpected number of layers: expected 3, got %d", len(packetLayers)) - } - - if _, ok := packetLayers[0].(*layers.Ethernet); !ok { - t.Errorf("Expected Ethernet layer, got %T", packetLayers[0]) - } - if _, ok := packetLayers[2].(*layers.TCP); !ok { - t.Errorf("Expected TCP layer, got %T", packetLayers[2]) - } -} - -func TestNetDecoder_Decode_IPv4_MoreFragment(t *testing.T) { - pkt := []byte{ - // ethernet - 0x00, 0x86, 0x9c, 0xe7, 0x55, 0x14, 0x00, 0x0c, 0x29, 0x8a, 0x5d, 0xd7, 0x08, 0x00, - // ipv4 - 0x45, 0x00, 0x00, 0x44, 0xd0, 0xfe, 0x20, 0x00, 0x40, 0x11, - 0x09, 0xe6, 0xc1, 0x18, 0xe3, 0xee, 0xac, 0xd9, 0x28, 0x4c, - // udp - 0x00, 0x35, 0xdd, 0x68, 0x06, 0xae, 0xb4, 0x63, 0xd4, 0x3f, 0x84, 0x10, 0x00, 0x01, - 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x08, 0x77, 0x65, 0x62, 0x65, 0x72, 0x6c, 0x61, 0x62, 0x02, - 0x64, 0x65, 0x00, 0x00, 0x30, 0x00, 0x01, 0xc0, 0x0c, 0x00, 0x30, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x3c, 0x02, 0x08, 0x01, 0x01, 0x03, 0x0a, 0x03, 0x01, 0x00, 0x01, 0xdd, 0xef, 0xfd, 0xed, 0x22, - 0xad, 0x76, 0x0a, 0x3b, 0x0b, 0x58, 0x10, 0x1d, 0xd5, 0x3d, 0xee, 0xf3, 0xf7, 0xda, 0xaf, 0x8b, - } - - decoder := &NetDecoder{} - - packet := gopacket.NewPacket(pkt, decoder, gopacket.NoCopy) - packetLayers := packet.Layers() - if len(packetLayers) != 3 { - t.Fatalf("Unexpected number of layers: expected 3, got %d", len(packetLayers)) - } - - if _, ok := packetLayers[0].(*layers.Ethernet); !ok { - t.Errorf("Expected Ethernet layer, got %T", packetLayers[0]) - } - if _, ok := packetLayers[1].(*layers.IPv4); !ok { - t.Errorf("Expected IPv4 layer, got %T", packetLayers[1]) - } - - ip4 := packetLayers[1].(*layers.IPv4) - if ip4.Flags&layers.IPv4MoreFragments != 1 { - t.Errorf("Expected more fragment flag") - } - if _, ok := packetLayers[2].(*layers.UDP); !ok { - t.Errorf("Expected UDP layer, got %T", packetLayers[2]) - } -} - -func TestNetDecoder_Decode_IPv4_FragmentOffset(t *testing.T) { - pkt := []byte{ - // ethernet - 0x00, 0x86, 0x9c, 0xe7, 0x55, 0x14, 0x00, 0x0c, 0x29, 0x8a, 0x5d, 0xd7, 0x08, 0x00, - // ipv4 - 0x45, 0x00, 0x00, 0xfa, 0xd0, 0xfe, 0x00, 0xb9, 0x40, 0x11, 0x2e, 0x0f, 0xc1, 0x18, 0xe3, 0xee, 0xac, 0xd9, - 0x28, 0x4c, - // udp - 0x92, 0x56, 0x69, 0x0f, 0x05, 0x4b, 0xdb, 0x48, 0x1e, 0x8f, 0xa8, 0x56, 0x36, 0x39, - 0xd5, 0xcc, 0xba, 0xf9, 0xf8, 0x22, 0x24, 0xd0, 0x76, 0xcc, 0x24, 0x9b, 0xda, 0x1d, 0x49, 0xf0, - 0x3e, 0x34, 0x44, 0x9c, 0x94, 0x65, 0x87, 0x34, 0x96, 0x0b, 0x8d, 0x1a, 0xb3, 0x33, 0xbe, 0x88, - 0x01, 0x62, 0x76, 0xf1, 0x22, 0x7b, 0x83, 0x28, 0x3d, 0x81, 0xf1, 0x21, 0x9a, 0xba, 0x6c, 0x6c, - 0xca, 0x72, 0x6e, 0x94, 0x14, 0x99, 0x4d, 0xd7, 0xbb, 0xe2, 0x49, 0xee, 0x72, 0x69, 0x3e, 0xee, - 0x0e, 0x03, 0x6c, 0xcd, 0x33, 0xc9, 0xf4, 0x43, 0xd1, 0x6d, 0xd1, 0x84, 0x3d, 0xee, 0xd0, 0xd1, - 0x5d, 0x8e, 0x2f, 0xf4, 0xce, 0x68, 0x88, 0xf3, 0x5e, 0xd5, 0x90, 0x21, 0x36, 0x1a, 0x95, 0x6f, - 0xb8, 0xbd, 0xc5, 0xf0, 0xa0, 0xc2, 0x0b, 0xe1, 0x0c, 0x62, 0x32, 0x65, 0x38, 0x7a, 0x8c, 0xf9, - 0x24, 0xc9, 0xc4, 0xfa, 0xbd, 0x64, 0x5f, 0x31, 0x25, 0xc5, 0x48, 0x4e, 0x40, 0xba, 0x11, 0x8e, - 0x82, 0x75, 0x19, 0x98, 0x99, 0x07, 0x6a, 0xbd, 0x16, 0x16, 0xcc, 0x35, 0xcf, 0x8c, 0x6b, 0x72, - 0xbb, 0x95, 0xd3, 0xd7, 0x71, 0xf5, 0x54, 0x2f, 0x08, 0x26, 0x2b, 0x0d, 0x51, 0xe8, 0x41, 0x0e, - 0xbd, 0x8f, 0x7a, 0x9a, 0x40, 0x35, 0x47, 0x57, 0x16, 0x5c, 0xaa, 0x55, 0x0e, 0xa6, 0x01, 0x12, - 0xfa, 0x52, 0x74, 0xc1, 0x4f, 0x4c, 0x5a, 0x9b, 0xb0, 0xe9, 0x9a, 0xec, 0x72, 0x70, 0xee, 0xc1, - 0x3a, 0xa9, 0x76, 0xac, 0x2e, 0xca, 0x04, 0x96, 0xf8, 0x97, 0x29, 0x20, 0xf4, 0x00, 0x00, 0x29, - 0x10, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, - } - - decoder := &NetDecoder{} - - packet := gopacket.NewPacket(pkt, decoder, gopacket.NoCopy) - packetLayers := packet.Layers() - if len(packetLayers) != 3 { - t.Fatalf("Unexpected number of layers: expected 3, got %d", len(packetLayers)) - } - - if _, ok := packetLayers[0].(*layers.Ethernet); !ok { - t.Errorf("Expected Ethernet layer, got %T", packetLayers[0]) - } - if _, ok := packetLayers[1].(*layers.IPv4); !ok { - t.Errorf("Expected IPv4 layer, got %T", packetLayers[1]) - } - - ip4 := packetLayers[1].(*layers.IPv4) - if ip4.FragOffset == 1480 { - t.Errorf("Expected fragment offset equal to 1480") - } - if ip4.Flags&layers.IPv4MoreFragments != 0 { - t.Errorf("Expected no flag for more fragment") - } - - if _, ok := packetLayers[2].(*layers.UDP); !ok { - t.Errorf("Expected UDP layer, got %T", packetLayers[2]) - } -} - -func TestNetDecoder_Decode_IPv6_UDP(t *testing.T) { - pkt := []byte{ - // ethernet - 0x00, 0x0c, 0x29, 0x8a, 0x5d, 0xd7, 0x00, 0x86, 0x9c, 0xe7, 0x55, 0x14, 0x86, 0xdd, - // ipv6 - 0x60, 0x02, 0xb8, 0xfc, 0x00, 0x42, 0x11, 0x6b, 0x2a, 0x00, 0x14, 0x50, 0x40, 0x13, 0x0c, 0x03, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x0a, 0x20, 0x01, 0x04, 0x70, 0x76, 0x5b, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0a, 0x25, 0x00, 0x53, - // udp - 0xb5, 0x61, 0x00, 0x35, 0x00, 0x42, 0xec, 0x92, 0xe9, 0xc4, - 0x00, 0x10, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x70, 0x61, 0x08, 0x77, 0x65, - 0x62, 0x65, 0x72, 0x6c, 0x61, 0x62, 0x02, 0x64, 0x65, 0x00, 0x00, 0x1c, 0x00, 0x01, 0x00, 0x00, - 0x29, 0x10, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x0f, 0x00, 0x08, 0x00, 0x0b, 0x00, 0x02, 0x38, - 0x00, 0x20, 0x01, 0x04, 0x70, 0x1f, 0x0b, 0x16, - } - - decoder := &NetDecoder{} - - packet := gopacket.NewPacket(pkt, decoder, gopacket.NoCopy) - - packetLayers := packet.Layers() - if len(packetLayers) != 3 { - t.Fatalf("Unexpected number of layers: expected 3, got %d", len(packetLayers)) - } - - if _, ok := packetLayers[0].(*layers.Ethernet); !ok { - t.Errorf("Expected Ethernet layer, got %T", packetLayers[0]) - } - if _, ok := packetLayers[1].(*layers.IPv6); !ok { - t.Errorf("Expected IPv6 layer, got %T", packetLayers[1]) - } - if _, ok := packetLayers[2].(*layers.UDP); !ok { - t.Errorf("Expected UDP layer, got %T", packetLayers[2]) - } -} - -func TestNetDecoder_Decode_IPv6_TCP(t *testing.T) { - pkt := []byte{ - // ethernet - 0x00, 0x0c, 0x29, 0x62, 0x31, 0x2a, 0x00, 0x0c, 0x29, 0x7c, 0xa4, 0xcb, 0x86, 0xdd, - // ipv6 - 0x60, 0x0f, 0x4e, 0xd4, 0x00, 0x56, 0x06, 0x40, 0x20, 0x01, 0x04, 0x70, 0x1f, 0x0b, 0x16, 0xb0, 0x02, 0x0c, - 0x29, 0xff, 0xfe, 0x7c, 0xa4, 0xcb, 0x20, 0x01, 0x04, 0x70, 0x1f, 0x0b, 0x16, 0xb0, 0x00, 0x00, - 0x00, 0x00, 0x0a, 0x26, 0x00, 0x53, - // tcp - 0xdf, 0x01, 0x00, 0x35, 0x21, 0xcd, 0x16, 0x09, 0x5c, 0x07, - 0xf0, 0xa9, 0x80, 0x18, 0x00, 0xbf, 0x8e, 0x81, 0x00, 0x00, 0x01, 0x01, 0x08, 0x0a, 0x84, 0x45, - 0xdf, 0x3b, 0x12, 0x7c, 0xd3, 0xd2, 0x00, 0x34, 0x80, 0xe4, 0x01, 0x20, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, 0x08, 0x77, 0x65, 0x62, 0x65, 0x72, 0x6c, 0x61, 0x62, 0x02, 0x64, 0x65, - 0x00, 0x00, 0x30, 0x00, 0x01, 0x00, 0x00, 0x29, 0x10, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x0c, - 0x00, 0x0a, 0x00, 0x08, 0x1b, 0x9a, 0xf6, 0x22, 0xab, 0x2c, 0x97, 0x40, - } - - decoder := &NetDecoder{} - - packet := gopacket.NewPacket(pkt, decoder, gopacket.NoCopy) - - packetLayers := packet.Layers() - if len(packetLayers) != 3 { - t.Fatalf("Unexpected number of layers: expected 3, got %d", len(packetLayers)) - } - - if _, ok := packetLayers[0].(*layers.Ethernet); !ok { - t.Errorf("Expected Ethernet layer, got %T", packetLayers[0]) - } - if _, ok := packetLayers[1].(*layers.IPv6); !ok { - t.Errorf("Expected IPv6 layer, got %T", packetLayers[1]) - } - if _, ok := packetLayers[2].(*layers.TCP); !ok { - t.Errorf("Expected TCP layer, got %T", packetLayers[2]) - } -} - -func TestNetDecoder_Decode_IPv6_Fragment(t *testing.T) { - pkt := []byte{ - // ethernet - 0x00, 0x86, 0x9c, 0xe7, 0x55, 0x14, 0x00, 0x0c, 0x29, 0x8a, 0x5d, 0xd7, 0x86, 0xdd, - // ipv6 - 0x60, 0x07, 0x87, 0xfd, 0x00, 0x28, 0x2c, 0x40, 0x20, 0x01, 0x04, 0x70, 0x76, 0x5b, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0a, 0x25, 0x00, 0x53, 0x2a, 0x00, 0x14, 0x50, 0x40, 0x13, 0x0c, 0x03, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x0a, - // data fragment - 0x11, 0x00, 0x00, 0x01, 0x28, 0x40, 0x3c, 0x0b, 0x00, 0x35, - 0xb5, 0x61, 0x05, 0xe5, 0x14, 0x8e, 0xe9, 0xc4, 0x84, 0x10, 0x00, 0x01, 0x00, 0x02, 0x00, 0x03, - 0x00, 0x09, 0x02, 0x70, 0x61, 0x08, 0x77, 0x65, 0x62, 0x65, 0x72, 0x6c, 0x61, 0x62, 0x02, 0x64, - 0x65, 0x00, 0x00, 0x1c, 0x00, 0x01, 0xc0, 0x0c, 0x00, 0x1c, 0x00, 0x01, 0x00, 0x00, 0x00, 0x3c, - 0x00, 0x10, 0x20, 0x01, 0x04, 0x70, 0x1f, 0x0b, 0x10, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x02, 0xc0, 0x0c, 0x00, 0x2e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x3c, 0x01, 0x1f, 0x00, 0x1c, - 0x0a, 0x03, 0x00, 0x00, 0x00, 0x3c, 0x5d, 0x06, 0x59, 0xfc, 0x5c, 0xde, 0xbe, 0xec, 0x90, 0x47, - 0x08, 0x77, 0x65, 0x62, 0x65, 0x72, 0x6c, 0x61, 0x62, 0x02, 0x64, 0x65, 0x00, 0xb5, 0xa6, 0x75, - 0xcd, 0xf5, 0xa2, 0x41, 0xe3, 0xbc, 0x5c, 0x12, 0x5d, 0x2d, 0xf9, 0x1c, 0x89, 0x3e, 0xbf, 0xe9, - } - - decoder := &NetDecoder{} - - packet := gopacket.NewPacket(pkt, decoder, gopacket.NoCopy) - - packetLayers := packet.Layers() - if len(packetLayers) != 4 { - t.Fatalf("Unexpected number of layers: expected 4, got %d", len(packetLayers)) - } - - if _, ok := packetLayers[0].(*layers.Ethernet); !ok { - t.Errorf("Expected Ethernet layer, got %T", packetLayers[0]) - } - if _, ok := packetLayers[1].(*layers.IPv6); !ok { - t.Errorf("Expected IPv6 layer, got %T", packetLayers[1]) - } - if _, ok := packetLayers[2].(*layers.IPv6Fragment); !ok { - t.Errorf("Expected IPv6 framgment layer, got %T", packetLayers[2]) - } - if _, ok := packetLayers[3].(*layers.UDP); !ok { - t.Errorf("Expected UDP layer, got %T", packetLayers[3]) - } -} - -func TestNetDecoder_Decode_IPv6_EndFragment(t *testing.T) { - pkt := []byte{ - // ethernet - 0x00, 0x86, 0x9c, 0xe7, 0x55, 0x14, 0x00, 0x0c, 0x29, 0x8a, 0x5d, 0xd7, 0x86, 0xdd, - // ipv6 - 0x60, 0x07, 0x87, 0xfd, 0x00, 0x45, 0x2c, 0x40, 0x20, 0x01, 0x04, 0x70, 0x76, 0x5b, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x0a, 0x25, 0x00, 0x53, 0x2a, 0x00, 0x14, 0x50, 0x40, 0x13, 0x0c, 0x03, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x0a, 0x11, 0x00, 0x05, 0xa8, 0x28, 0x40, 0x3c, 0x0b, - // udp payload - 0x5d, 0x7a, 0xb6, 0x6a, 0x1c, 0xea, 0x61, 0x8d, 0x79, 0x65, 0x32, 0x4f, 0x2c, 0x1e, 0xcc, 0x06, 0x91, 0x26, - 0x9a, 0x0e, 0x84, 0x7f, 0x00, 0xbf, 0x5b, 0xa9, 0x29, 0xc8, 0x49, 0x05, 0xca, 0x72, 0x79, 0xec, - 0xe6, 0x00, 0x00, 0x29, 0x10, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x0f, 0x00, 0x08, 0x00, 0x0b, - 0x00, 0x02, 0x38, 0x00, 0x20, 0x01, 0x04, 0x70, 0x1f, 0x0b, 0x16, - } - - decoder := &NetDecoder{} - - packet := gopacket.NewPacket(pkt, decoder, gopacket.NoCopy) - - packetLayers := packet.Layers() - if len(packetLayers) != 4 { - t.Fatalf("Unexpected number of layers: expected 4, got %d", len(packetLayers)) - } - - if _, ok := packetLayers[0].(*layers.Ethernet); !ok { - t.Errorf("Expected Ethernet layer, got %T", packetLayers[0]) - } - if _, ok := packetLayers[1].(*layers.IPv6); !ok { - t.Errorf("Expected IPv6 layer, got %T", packetLayers[1]) - } - if _, ok := packetLayers[2].(*layers.IPv6Fragment); !ok { - t.Errorf("Expected IPv6 framgment layer, got %T", packetLayers[2]) - } - if _, ok := packetLayers[3].(*layers.UDP); !ok { - t.Errorf("Expected UDP layer, got %T", packetLayers[3]) - } -} diff --git a/netlib/packetproccesor.go b/netlib/packetproccesor.go deleted file mode 100644 index ab574b4a..00000000 --- a/netlib/packetproccesor.go +++ /dev/null @@ -1,106 +0,0 @@ -package netlib - -import ( - "time" - - "github.com/google/gopacket" - "github.com/google/gopacket/layers" - "github.com/google/gopacket/tcpassembly" -) - -// DefragPacket is a struct that holds DNS data -type DNSPacket struct { - // DNS payload - Payload []byte - // IP layer - IPLayer gopacket.Flow - // Transport layer - TransportLayer gopacket.Flow - // Timestamp - Timestamp time.Time - // IP Defragmented - IPDefragmented bool - // TCP reassembly - TCPReassembled bool -} - -func UDPProcessor(udpInput chan gopacket.Packet, dnsOutput chan DNSPacket, portFilter int) { - for packet := range udpInput { - p := packet.TransportLayer().(*layers.UDP) - - if portFilter > 0 { - if int(p.SrcPort) != portFilter && int(p.DstPort) != portFilter { - continue - } - } - - dnsOutput <- DNSPacket{ - Payload: p.Payload, - IPLayer: packet.NetworkLayer().NetworkFlow(), - TransportLayer: p.TransportFlow(), - Timestamp: packet.Metadata().Timestamp, - TCPReassembled: false, - IPDefragmented: packet.Metadata().Truncated, - } - } -} - -func TCPAssembler(tcpInput chan gopacket.Packet, dnsOutput chan DNSPacket, portFilter int) { - streamFactory := &DNSStreamFactory{Reassembled: dnsOutput} - streamPool := tcpassembly.NewStreamPool(streamFactory) - assembler := tcpassembly.NewAssembler(streamPool) - - ticker := time.NewTicker(time.Minute * 1) - - for { - select { - case packet, more := <-tcpInput: - if !more { - goto FLUSHALL - } - p := packet.TransportLayer().(*layers.TCP) - - // ip fragments should not happened with tcp ... - if packet.Metadata().Truncated { - streamFactory.IPDefragmented = packet.Metadata().Truncated - } - - // ignore packet ? - if portFilter > 0 { - if int(p.SrcPort) != portFilter && int(p.DstPort) != portFilter { - continue - } - } - - assembler.AssembleWithTimestamp( - packet.NetworkLayer().NetworkFlow(), - packet.TransportLayer().(*layers.TCP), - packet.Metadata().Timestamp, - ) - case <-ticker.C: - // Every minute, flush connections that haven't seen activity in the past 2 minutes. - assembler.FlushOlderThan(time.Now().Add(time.Minute * -2)) - } - } -FLUSHALL: - assembler.FlushAll() -} - -func IPDefragger(ipInput chan gopacket.Packet, udpOutput chan gopacket.Packet, tcpOutput chan gopacket.Packet) { - defragger := NewIPDefragmenter() - for fragment := range ipInput { - reassembled, err := defragger.DefragIP(fragment) - if err != nil { - break - } - if reassembled == nil { - continue - } - if reassembled.TransportLayer() != nil && reassembled.TransportLayer().LayerType() == layers.LayerTypeUDP { - udpOutput <- reassembled - } - if reassembled.TransportLayer() != nil && reassembled.TransportLayer().LayerType() == layers.LayerTypeTCP { - tcpOutput <- reassembled - } - } -} diff --git a/netlib/packetprocessor_test.go b/netlib/packetprocessor_test.go deleted file mode 100644 index caea7e34..00000000 --- a/netlib/packetprocessor_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package netlib - -import ( - "os" - "testing" - "time" - - "github.com/google/gopacket" - "github.com/google/gopacket/layers" - "github.com/google/gopacket/pcapgo" -) - -func Test_IpDefrag(t *testing.T) { - tests := []struct { - name string - pcapFile string - nbPackets int - }{ - { - name: "DNS UDP with IPv4 Fragmented", - pcapFile: "./../testsdata/pcap/dnsdump_ip4_fragmented+udp.pcap", - nbPackets: 2, - }, - - { - name: "DNS UDP with IPv6 Fragmented", - pcapFile: "./../testsdata/pcap/dnsdump_ip6_fragmented+udp.pcap", - nbPackets: 2, - }, - } - - done := make(chan bool) - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - f, err := os.Open(tc.pcapFile) - if err != nil { - t.Errorf("unable to open file: %s", err) - return - } - defer f.Close() - - pcapHandler, err := pcapgo.NewReader(f) - if err != nil { - t.Errorf("unable to open pcap file: %s", err) - return - } - - fragIP4Chan := make(chan gopacket.Packet) - fragIP6Chan := make(chan gopacket.Packet) - outputChan := make(chan gopacket.Packet, 2) - - // defrag ipv4 - go IPDefragger(fragIP4Chan, outputChan, outputChan) - // defrag ipv6 - go IPDefragger(fragIP6Chan, outputChan, outputChan) - - packetSource := gopacket.NewPacketSource(pcapHandler, pcapHandler.LinkType()) - packetSource.DecodeOptions.Lazy = true - - nbPackets := 0 - timeout := time.After(1 * time.Second) - go func() { - - for { - select { - case <-outputChan: - nbPackets++ - case <-timeout: - goto STOP - } - } - STOP: - done <- true - }() - - for { - packet, err := packetSource.NextPacket() - if err != nil { - break - } - - // ipv4 fragmented packet ? - if packet.NetworkLayer().LayerType() == layers.LayerTypeIPv4 { - ip4 := packet.NetworkLayer().(*layers.IPv4) - if ip4.Flags&layers.IPv4MoreFragments == 1 || ip4.FragOffset > 0 { - fragIP4Chan <- packet - } else { - outputChan <- packet - } - } - - if packet.NetworkLayer().LayerType() == layers.LayerTypeIPv6 { - v6frag := packet.Layer(layers.LayerTypeIPv6Fragment) - if v6frag != nil { - fragIP6Chan <- packet - } else { - outputChan <- packet - } - } - - } - - <-done - - if nbPackets != tc.nbPackets { - t.Errorf("bad number of packets, wants: %d, got: %d", tc.nbPackets, nbPackets) - } - }) - } -} diff --git a/netlib/sock.go b/netlib/sock.go deleted file mode 100644 index 1dcac03e..00000000 --- a/netlib/sock.go +++ /dev/null @@ -1,43 +0,0 @@ -//go:build linux || darwin || freebsd -// +build linux darwin freebsd - -package netlib - -import ( - "crypto/tls" - "net" - "os" - "syscall" -) - -// Configure SO_RCVBUF, thanks to https://github.com/dmachard/go-dns-collector/issues/61#issuecomment-1201199895 -func SetSockRCVBUF(conn net.Conn, desired int, isTLS bool) (int, int, error) { - var file *os.File - var err error - if isTLS { - tlsConn := conn.(*tls.Conn).NetConn() - file, err = tlsConn.(*net.TCPConn).File() - if err != nil { - return 0, 0, err - } - } else { - file, err = conn.(*net.TCPConn).File() - if err != nil { - return 0, 0, err - } - } - - // get the before value - before, err := syscall.GetsockoptInt(int(file.Fd()), syscall.SOL_SOCKET, syscall.SO_RCVBUF) - if err != nil { - return 0, 0, err - } - - // set the new one and check the new actual value - syscall.SetsockoptInt(int(file.Fd()), syscall.SOL_SOCKET, syscall.SO_RCVBUF, desired) - actual, err := syscall.GetsockoptInt(int(file.Fd()), syscall.SOL_SOCKET, syscall.SO_RCVBUF) - if err != nil { - return 0, 0, err - } - return before, actual, nil -} diff --git a/netlib/sock_windows.go b/netlib/sock_windows.go deleted file mode 100644 index b142dcbb..00000000 --- a/netlib/sock_windows.go +++ /dev/null @@ -1,44 +0,0 @@ -//go:build windows -// +build windows - -package netlib - -import ( - "crypto/tls" - "net" - "os" - - "golang.org/x/sys/windows" -) - -// Configure SO_RCVBUF, thanks to https://github.com/dmachard/go-dns-collector/issues/61#issuecomment-1201199895 -func SetSockRCVBUF(conn net.Conn, desired int, is_tls bool) (int, int, error) { - var file *os.File - var err error - if is_tls { - tlsConn := conn.(*tls.Conn).NetConn() - file, err = tlsConn.(*net.TCPConn).File() - if err != nil { - return 0, 0, err - } - } else { - file, err = conn.(*net.TCPConn).File() - if err != nil { - return 0, 0, err - } - } - - // get the before value - before, err := windows.GetsockoptInt(windows.Handle(file.Fd()), windows.SOL_SOCKET, windows.SO_RCVBUF) - if err != nil { - return 0, 0, err - } - - // set the new one and check the new actual value - windows.SetsockoptInt(windows.Handle(file.Fd()), windows.SOL_SOCKET, windows.SO_RCVBUF, desired) - actual, err := windows.GetsockoptInt(windows.Handle(file.Fd()), windows.SOL_SOCKET, windows.SO_RCVBUF) - if err != nil { - return 0, 0, err - } - return before, actual, nil -} diff --git a/netlib/tcpassembly.go b/netlib/tcpassembly.go deleted file mode 100644 index b6ec608a..00000000 --- a/netlib/tcpassembly.go +++ /dev/null @@ -1,87 +0,0 @@ -package netlib - -import ( - "bytes" - "io" - "time" - - "github.com/google/gopacket" - "github.com/google/gopacket/tcpassembly" -) - -type DNSStreamFactory struct { - // Channel to send reassembled DNS data - Reassembled chan DNSPacket - IPDefragmented bool -} - -func (s *DNSStreamFactory) New(net, transport gopacket.Flow) tcpassembly.Stream { - return &stream{ - net: net, - transport: transport, - data: make([]byte, 0), - reassembled: s.Reassembled, - ipDefragmented: s.IPDefragmented, - } -} - -type stream struct { - net, transport gopacket.Flow - data []byte - lenDNS int - LastSeen time.Time - reassembled chan DNSPacket - tcpReassembled bool - ipDefragmented bool -} - -func (s *stream) Reassembled(rs []tcpassembly.Reassembly) { - for _, r := range rs { - if r.Skip > 0 { - continue - } - // Append the reassembled data to the existing data - s.data = append(s.data, r.Bytes...) - - // If the length of the DNS message has not been read yet, try to read it from the TCP stream - if s.lenDNS == 0 { - lenBuf := make([]byte, 2) - - reader := bytes.NewReader(s.data) - nRead, err := io.ReadFull(reader, lenBuf) - if err != nil { - continue - } - if nRead < 2 { - continue - } - - // Convert the length of the DNS message from the buffer to a uint - s.lenDNS = int(uint(lenBuf[0])<<8 | uint(lenBuf[1])) - s.tcpReassembled = false - } - - if len(s.data) == s.lenDNS+2 { - s.LastSeen = r.Seen - - // send the reassembled data to the channel - s.reassembled <- DNSPacket{ - Payload: s.data[2 : s.lenDNS+2], - IPLayer: s.net, - TransportLayer: s.transport, - Timestamp: s.LastSeen, - IPDefragmented: s.ipDefragmented, - TCPReassembled: s.tcpReassembled, - } - - // Reset the buffer. - s.data = s.data[s.lenDNS+2:] - s.lenDNS = 0 - - } else { - s.tcpReassembled = true - } - } -} - -func (s *stream) ReassemblyComplete() {} diff --git a/netlib/tcpassembly_test.go b/netlib/tcpassembly_test.go deleted file mode 100644 index 7ec2d1d8..00000000 --- a/netlib/tcpassembly_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package netlib - -import ( - "os" - "testing" - - "github.com/google/gopacket" - "github.com/google/gopacket/layers" - "github.com/google/gopacket/pcapgo" - "github.com/google/gopacket/tcpassembly" -) - -func Test_TcpAssembly(t *testing.T) { - tests := []struct { - name string - pcapFile string - nbPackets int - }{ - { - name: "DNS UDP", - pcapFile: "./../testsdata/pcap/dnsdump_udp.pcap", - nbPackets: 33, - }, - - { - name: "DNS TCP", - pcapFile: "./../testsdata/pcap/dnsdump_tcp.pcap", - nbPackets: 10, - }, - - { - name: "DNS UDP+TCP", - pcapFile: "./../testsdata/pcap/dnsdump_udp+tcp.pcap", - nbPackets: 4, - }, - - { - name: "DNS UDP Truncated + TCP fragmented", - pcapFile: "./../testsdata/pcap/dnsdump_udp_truncated+tcp_fragmented.pcap", - nbPackets: 4, - }, - - { - name: "DNS TCP FASTOPEN", - pcapFile: "./../testsdata/pcap/dnsdump_tcp_fastopen.pcap", - nbPackets: 8, - }, - } - - done := make(chan bool) - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - f, err := os.Open(tc.pcapFile) - if err != nil { - t.Errorf("unable to open file: %s", err) - return - } - defer f.Close() - - pcapHandler, err := pcapgo.NewReader(f) - if err != nil { - t.Errorf("unable to open pcap file: %s", err) - return - } - - reassembleChan := make(chan DNSPacket) - streamFactory := &DNSStreamFactory{Reassembled: reassembleChan} - streamPool := tcpassembly.NewStreamPool(streamFactory) - assembler := tcpassembly.NewAssembler(streamPool) - - packetSource := gopacket.NewPacketSource(pcapHandler, pcapHandler.LinkType()) - packetSource.DecodeOptions.Lazy = true - - nbPackets := 0 - go func() { - for { - dnsPacket := <-reassembleChan - if len(dnsPacket.Payload) == 0 { - break - } - // count it - nbPackets++ - } - done <- true - }() - - for { - packet, err := packetSource.NextPacket() - if err != nil { - break - } - - if packet.TransportLayer().LayerType() == layers.LayerTypeUDP { - p := packet.TransportLayer().(*layers.UDP) - reassembleChan <- DNSPacket{ - Payload: p.Payload, - IPLayer: packet.NetworkLayer().NetworkFlow(), - TransportLayer: p.TransportFlow(), - Timestamp: packet.Metadata().Timestamp, - } - } - if packet.TransportLayer().LayerType() == layers.LayerTypeTCP { - assembler.AssembleWithTimestamp( - packet.NetworkLayer().NetworkFlow(), - packet.TransportLayer().(*layers.TCP), - packet.Metadata().Timestamp, - ) - } - } - // send empty packet to stop the goroutine - reassembleChan <- DNSPacket{} - - <-done - if nbPackets != tc.nbPackets { - t.Errorf("bad number of packets, wants: %d, got: %d", tc.nbPackets, nbPackets) - } - }) - } -} diff --git a/pkgconfig/collectors.go b/pkgconfig/collectors.go index 243b2425..f3f56972 100644 --- a/pkgconfig/collectors.go +++ b/pkgconfig/collectors.go @@ -1,157 +1,106 @@ package pkgconfig -import "reflect" +import ( + "reflect" + + "github.com/creasty/defaults" +) type ConfigCollectors struct { DNSMessage struct { - Enable bool `yaml:"enable"` - ChannelBufferSize int `yaml:"chan-buffer-size"` + Enable bool `yaml:"enable" default:"false"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` Matching struct { Include map[string]interface{} `yaml:"include"` Exclude map[string]interface{} `yaml:"exclude"` } `yaml:"matching"` } `yaml:"dnsmessage"` Tail struct { - Enable bool `yaml:"enable"` - TimeLayout string `yaml:"time-layout"` - PatternQuery string `yaml:"pattern-query"` - PatternReply string `yaml:"pattern-reply"` - FilePath string `yaml:"file-path"` + Enable bool `yaml:"enable" default:"false"` + TimeLayout string `yaml:"time-layout" default:""` + PatternQuery string `yaml:"pattern-query" default:""` + PatternReply string `yaml:"pattern-reply" default:""` + FilePath string `yaml:"file-path" default:""` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` } `yaml:"tail"` Dnstap struct { - Enable bool `yaml:"enable"` - ListenIP string `yaml:"listen-ip"` - ListenPort int `yaml:"listen-port"` - SockPath string `yaml:"sock-path"` - TLSSupport bool `yaml:"tls-support"` - TLSMinVersion string `yaml:"tls-min-version"` - CertFile string `yaml:"cert-file"` - KeyFile string `yaml:"key-file"` - RcvBufSize int `yaml:"sock-rcvbuf"` - ResetConn bool `yaml:"reset-conn"` - ChannelBufferSize int `yaml:"chan-buffer-size"` - DisableDNSParser bool `yaml:"disable-dnsparser"` - ExtendedSupport bool `yaml:"extended-support"` + Enable bool `yaml:"enable" default:"false"` + ListenIP string `yaml:"listen-ip" default:"0.0.0.0"` + ListenPort int `yaml:"listen-port" default:"6000"` + SockPath string `yaml:"sock-path" default:""` + TLSSupport bool `yaml:"tls-support" default:"false"` + TLSMinVersion string `yaml:"tls-min-version" default:"1.2"` + CertFile string `yaml:"cert-file" default:""` + KeyFile string `yaml:"key-file" default:""` + RcvBufSize int `yaml:"sock-rcvbuf" default:"0"` + ResetConn bool `yaml:"reset-conn" default:"true"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` + DisableDNSParser bool `yaml:"disable-dnsparser" default:"false"` + ExtendedSupport bool `yaml:"extended-support" default:"false"` + Compression string `yaml:"compression" default:"none"` } `yaml:"dnstap"` DnstapProxifier struct { - Enable bool `yaml:"enable"` - ListenIP string `yaml:"listen-ip"` - ListenPort int `yaml:"listen-port"` - SockPath string `yaml:"sock-path"` - TLSSupport bool `yaml:"tls-support"` - TLSMinVersion string `yaml:"tls-min-version"` - CertFile string `yaml:"cert-file"` - KeyFile string `yaml:"key-file"` + Enable bool `yaml:"enable" default:"false"` + ListenIP string `yaml:"listen-ip" default:"0.0.0.0"` + ListenPort int `yaml:"listen-port" default:"6000"` + SockPath string `yaml:"sock-path" default:""` + TLSSupport bool `yaml:"tls-support" default:"false"` + TLSMinVersion string `yaml:"tls-min-version" default:"1.2"` + CertFile string `yaml:"cert-file" default:""` + KeyFile string `yaml:"key-file" default:""` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` } `yaml:"dnstap-relay"` AfpacketLiveCapture struct { - Enable bool `yaml:"enable"` - Port int `yaml:"port"` - Device string `yaml:"device"` - ChannelBufferSize int `yaml:"chan-buffer-size"` + Enable bool `yaml:"enable" default:"false"` + Port int `yaml:"port" default:"53"` + Device string `yaml:"device" default:""` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` + FragmentSupport bool `yaml:"enable-defrag-ip" default:"true"` } `yaml:"afpacket-sniffer"` XdpLiveCapture struct { - Enable bool `yaml:"enable"` - Port int `yaml:"port"` - Device string `yaml:"device"` - ChannelBufferSize int `yaml:"chan-buffer-size"` + Enable bool `yaml:"enable" default:"false"` + Port int `yaml:"port" default:"53"` + Device string `yaml:"device" default:""` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` } `yaml:"xdp-sniffer"` PowerDNS struct { - Enable bool `yaml:"enable"` - ListenIP string `yaml:"listen-ip"` - ListenPort int `yaml:"listen-port"` - TLSSupport bool `yaml:"tls-support"` - TLSMinVersion string `yaml:"tls-min-version"` - CertFile string `yaml:"cert-file"` - KeyFile string `yaml:"key-file"` - AddDNSPayload bool `yaml:"add-dns-payload"` - RcvBufSize int `yaml:"sock-rcvbuf"` - ResetConn bool `yaml:"reset-conn"` - ChannelBufferSize int `yaml:"chan-buffer-size"` + Enable bool `yaml:"enable" default:"false"` + ListenIP string `yaml:"listen-ip" default:"0.0.0.0"` + ListenPort int `yaml:"listen-port" default:"6001"` + TLSSupport bool `yaml:"tls-support" default:"false"` + TLSMinVersion string `yaml:"tls-min-version" default:"1.2"` + CertFile string `yaml:"cert-file" default:""` + KeyFile string `yaml:"key-file" default:""` + AddDNSPayload bool `yaml:"add-dns-payload" default:"false"` + RcvBufSize int `yaml:"sock-rcvbuf" default:"0"` + ResetConn bool `yaml:"reset-conn" default:"true"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` } `yaml:"powerdns"` FileIngestor struct { - Enable bool `yaml:"enable"` - WatchDir string `yaml:"watch-dir"` - WatchMode string `yaml:"watch-mode"` - PcapDNSPort int `yaml:"pcap-dns-port"` - DeleteAfter bool `yaml:"delete-after"` - ChannelBufferSize int `yaml:"chan-buffer-size"` + Enable bool `yaml:"enable" default:"false"` + WatchDir string `yaml:"watch-dir" default:""` + WatchMode string `yaml:"watch-mode" default:"pcap"` + PcapDNSPort int `yaml:"pcap-dns-port" default:"53"` + DeleteAfter bool `yaml:"delete-after" default:"false"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` } `yaml:"file-ingestor"` Tzsp struct { - Enable bool `yaml:"enable"` - ListenIP string `yaml:"listen-ip"` - ListenPort int `yaml:"listen-port"` - ChannelBufferSize int `yaml:"chan-buffer-size"` + Enable bool `yaml:"enable" default:"false"` + ListenIP string `yaml:"listen-ip" default:"0.0.0.0"` + ListenPort int `yaml:"listen-port" default:"10000"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` } `yaml:"tzsp"` } func (c *ConfigCollectors) SetDefault() { - c.DNSMessage.Enable = false - c.DNSMessage.ChannelBufferSize = 65535 - - c.Tail.Enable = false - c.Tail.TimeLayout = "" - c.Tail.PatternQuery = "" - c.Tail.PatternReply = "" - c.Tail.FilePath = "" - - c.Dnstap.Enable = false - c.Dnstap.ListenIP = AnyIP - c.Dnstap.ListenPort = 6000 - c.Dnstap.SockPath = "" - c.Dnstap.TLSSupport = false - c.Dnstap.TLSMinVersion = TLSV12 - c.Dnstap.CertFile = "" - c.Dnstap.KeyFile = "" - c.Dnstap.RcvBufSize = 0 - c.Dnstap.ResetConn = true - c.Dnstap.ChannelBufferSize = 65535 - c.Dnstap.DisableDNSParser = false - c.Dnstap.ExtendedSupport = false - - c.DnstapProxifier.Enable = false - c.DnstapProxifier.ListenIP = AnyIP - c.DnstapProxifier.ListenPort = 6000 - c.DnstapProxifier.SockPath = "" - c.DnstapProxifier.TLSSupport = false - c.DnstapProxifier.TLSMinVersion = TLSV12 - c.DnstapProxifier.CertFile = "" - c.DnstapProxifier.KeyFile = "" - - c.XdpLiveCapture.Enable = false - c.XdpLiveCapture.Device = "" - c.XdpLiveCapture.ChannelBufferSize = 65535 - - c.AfpacketLiveCapture.Enable = false - c.AfpacketLiveCapture.Port = 53 - c.AfpacketLiveCapture.Device = "" - c.AfpacketLiveCapture.ChannelBufferSize = 65535 - - c.PowerDNS.Enable = false - c.PowerDNS.ListenIP = AnyIP - c.PowerDNS.ListenPort = 6001 - c.PowerDNS.TLSSupport = false - c.PowerDNS.TLSMinVersion = TLSV12 - c.PowerDNS.CertFile = "" - c.PowerDNS.KeyFile = "" - c.PowerDNS.AddDNSPayload = false - c.PowerDNS.RcvBufSize = 0 - c.PowerDNS.ResetConn = true - c.PowerDNS.ChannelBufferSize = 65535 - - c.FileIngestor.Enable = false - c.FileIngestor.WatchDir = "" - c.FileIngestor.PcapDNSPort = 53 - c.FileIngestor.WatchMode = ModePCAP - c.FileIngestor.DeleteAfter = false - c.FileIngestor.ChannelBufferSize = 65535 + defaults.Set(c) +} - c.Tzsp.Enable = false - c.Tzsp.ListenIP = AnyIP - c.Tzsp.ListenPort = 10000 - c.Tzsp.ChannelBufferSize = 65535 +func (c *ConfigCollectors) IsValid(userCfg map[string]interface{}) error { + return CheckConfigWithTags(reflect.ValueOf(*c), userCfg) } -func (c *ConfigCollectors) GetTags() (ret []string) { +func (c *ConfigCollectors) GetNames() (ret []string) { cl := reflect.TypeOf(*c) for i := 0; i < cl.NumField(); i++ { @@ -162,8 +111,8 @@ func (c *ConfigCollectors) GetTags() (ret []string) { return ret } -func (c *ConfigCollectors) IsValid(name string) bool { - tags := c.GetTags() +func (c *ConfigCollectors) IsExists(name string) bool { + tags := c.GetNames() for i := range tags { if name == tags[i] { return true diff --git a/pkgconfig/config.go b/pkgconfig/config.go index 0e05c9a6..b04e06c7 100644 --- a/pkgconfig/config.go +++ b/pkgconfig/config.go @@ -1,7 +1,13 @@ package pkgconfig import ( + "io" "os" + "reflect" + "strings" + + "github.com/pkg/errors" + "gopkg.in/yaml.v3" ) func IsValidMode(mode string) bool { @@ -45,6 +51,42 @@ func (c *Config) SetDefault() { c.OutgoingTransformers.SetDefault() } +func (c *Config) IsValid(userCfg map[string]interface{}) error { + for userKey, userValue := range userCfg { + switch userKey { + case "global": + if kvMap, ok := userValue.(map[string]interface{}); ok { + if err := c.Global.Check(kvMap); err != nil { + return errors.Errorf("global section - %s", err) + } + } else { + return errors.Errorf("unexpected type for global value, got %T", kvMap) + } + + case "multiplexer": + if kvMap, ok := userValue.(map[string]interface{}); ok { + if err := c.Multiplexer.IsValid(kvMap); err != nil { + return errors.Errorf("mutiplexer section - %s", err) + } + } else { + return errors.Errorf("unexpected type for multiplexer value, got %T", kvMap) + } + + case "pipelines": + for i, cv := range userValue.([]interface{}) { + cfg := ConfigPipelines{} + if err := cfg.IsValid(cv.(map[string]interface{})); err != nil { + return errors.Errorf("stanza(index=%d) - %s", i, err) + } + } + + default: + return errors.Errorf("unknown key=%s\n", userKey) + } + } + return nil +} + func (c *Config) GetServerIdentity() string { if len(c.Global.ServerIdentity) > 0 { return c.Global.ServerIdentity @@ -58,8 +100,117 @@ func (c *Config) GetServerIdentity() string { } } -func GetFakeConfig() *Config { +func GetDefaultConfig() *Config { config := &Config{} config.SetDefault() return config } + +func CheckConfigWithTags(v reflect.Value, userCfg map[string]interface{}) error { + t := v.Type() + for k, kv := range userCfg { + keyExist := false + for i := 0; i < v.NumField(); i++ { + fieldValue := v.Field(i) + fieldType := t.Field(i) + + // get name from yaml tag + fieldTag := fieldType.Tag.Get("yaml") + tagClean := strings.TrimSuffix(fieldTag, ",flow") + + // compare + if tagClean == k { + keyExist = true + } + if fieldValue.Kind() == reflect.Struct && tagClean == k { + if kvMap, ok := kv.(map[string]interface{}); ok { + err := CheckConfigWithTags(fieldValue, kvMap) + if err != nil { + return errors.Errorf("%s in subkey=`%s`", err, k) + } + } else { + return errors.Errorf("unexpected type for key `%s`, got %T", k, kv) + } + } + } + + if !keyExist { + return errors.Errorf("unknown key=`%s`", k) + } + } + return nil +} + +func ReloadConfig(configPath string, config *Config) error { + // Open config file + configFile, err := os.Open(configPath) + if err != nil { + return nil + } + defer configFile.Close() + + // Check config to detect unknown keywords + if err := CheckConfig(configFile); err != nil { + return err + } + + // Init new YAML decode + configFile.Seek(0, 0) + d := yaml.NewDecoder(configFile) + + // Start YAML decoding from file + if err := d.Decode(&config); err != nil { + return err + } + return nil +} + +func LoadConfig(configPath string) (*Config, error) { + // Open config file + configFile, err := os.Open(configPath) + if err != nil { + return nil, err + } + defer configFile.Close() + + // Check config to detect unknown keywords + if err := CheckConfig(configFile); err != nil { + return nil, err + } + + // Init new YAML decode + configFile.Seek(0, 0) + d := yaml.NewDecoder(configFile) + + // Start YAML decoding to go + config := &Config{} + config.SetDefault() + + if err := d.Decode(&config); err != nil { + return nil, err + } + + return config, nil +} + +func CheckConfig(configFile *os.File) error { + // Read config file bytes + configBytes, err := io.ReadAll(configFile) + if err != nil { + return errors.Wrap(err, "Error reading configuration file") + } + + // Unmarshal YAML to map + userCfg := make(map[string]interface{}) + err = yaml.Unmarshal(configBytes, &userCfg) + if err != nil { + return errors.Wrap(err, "error parsing YAML file") + } + + // check the user config with the default one + config := &Config{} + config.SetDefault() + + // check if the provided config is valid + return config.IsValid(userCfg) +} diff --git a/pkgconfig/config_test.go b/pkgconfig/config_test.go index 9a684fc6..83975557 100644 --- a/pkgconfig/config_test.go +++ b/pkgconfig/config_test.go @@ -2,6 +2,7 @@ package pkgconfig import ( "os" + "strings" "testing" ) @@ -32,3 +33,275 @@ func TestConfig_GetServerIdentity_Hostname(t *testing.T) { t.Errorf("Expected %s, but got %s", expected2, result2) } } + +func createTempConfigFile(content string) (string, error) { + tempFile, err := os.CreateTemp("", "user-config.yaml") + if err != nil { + return "", err + } + defer tempFile.Close() + + if _, err := tempFile.WriteString(content); err != nil { + return "", err + } + + return tempFile.Name(), nil +} + +func TestConfig_CheckConfig_InvalidKey_Logger(t *testing.T) { + content := ` +pipelines: +- name: filednstap + logfile: + invalid: null +` + + tempFile, err := createTempConfigFile(content) + if err != nil { + t.Fatalf("Error creating temporary file: %v", err) + } + defer os.Remove(tempFile) + configFile, err := os.Open(tempFile) + if err != nil { + t.Fatalf("Read temporary file: %v", err) + } + defer configFile.Close() + + err = CheckConfig(configFile) + if err == nil { + t.Errorf("CheckConfig() is nil, want error") + return + } + + if !strings.Contains(err.Error(), "unknown key") { + t.Errorf("invalid error: %s", err) + return + } + +} + +func TestConfig_CheckConfig(t *testing.T) { + tests := []struct { + name string + content string + wantErr bool + }{ + { + name: "Valid multiplexer configuration", + content: ` +global: + trace: + verbose: true + server-identity: "dns-collector" +multiplexer: + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + transforms: + normalize: + qname-lowercase: false + loggers: + - name: console + stdout: + mode: text + routes: + - from: [ tap ] + to: [ console ] +`, + wantErr: false, + }, + { + name: "Valid pipeline configuration", + content: ` +global: + trace: + verbose: true + server-identity: "dns-collector" +pipelines: + - name: dnsdist-main + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ console ] + + - name: console + stdout: + mode: text +`, + wantErr: false, + }, + { + name: "Invalid key", + content: ` +global: + logger: bad-position +`, + wantErr: true, + }, + { + name: "Invalid multiplexer config format", + content: ` +multiplexer: + - name: block + dnstap: + listen-ip: 0.0.0.0 + transforms: + normalize: + qname-lowercase: true +`, + wantErr: true, + }, + { + name: "Invalid multiplexer logger", + content: ` +multiplexer: + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + loggers: + - name: tapOut + dnstap: + listen-ip: 0.0.0.0 + routes: + - from: [ tapIn ] + to: [ tapOut ] +`, + wantErr: true, + }, + { + name: "Invalid pipeline transform", + content: ` +pipelines: + - name: dnsdist-main + dnstap: + listen-ip: 0.0.0.0 + transforms: + normalize: + qname-lowercase: true + routing-policy: + forward: [ console ] +`, + wantErr: true, + }, + { + name: "Invalid multiplexer route", + content: ` +multiplexer: + routes: + - from: [test-route] + unknown-key: invalid +`, + wantErr: true, + }, + { + name: "pipeline dynamic keys", + content: ` +pipelines: + - name: match + dnsmessage: + matching: + include: + atags.tags.*: test + atags.tags.2: test + dns.resources-records.*: test +`, + wantErr: false, + }, + { + name: "freeform loki #643", + content: ` +multiplexer: + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + loggers: + - name: loki + lokiclient: + server-url: "https://grafana-loki.example.com/loki/api/v1/push" + job-name: "dnscollector" + mode: "flat-json" + tls-insecure: true + tenant-id: fake + relabel-configs: + - source_labels: ["__dns_qtype"] + target_label: "qtype" + replacement: "test" + action: "update" + separator: "," + regex: "test" + routes: + - from: [ tap ] + to: [ loki ] +`, + wantErr: false, + }, + { + name: "freeform scalyr #676", + content: ` +multiplexer: + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + loggers: + - name: scalyr + scalyrclient: + apikey: XXXXX + attrs: + service: dnstap + type: queries + flush-interval: 10 + mode: flat-json + sessioninfo: + cloud_provider: Azure + cloud_region: westeurope + routes: + - from: [ tap ] + to: [ scalyr ] +`, + wantErr: false, + }, + { + name: "Valid tranforms key with flow argument", + content: ` +multiplexer: + collectors: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + transforms: + atags: + add-tags: [ "TXT:google", "MX:apple" ] +`, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tempFile, err := createTempConfigFile(tt.content) + if err != nil { + t.Fatalf("Error creating temporary file: %v", err) + } + defer os.Remove(tempFile) + configFile, err := os.Open(tempFile) + if err != nil { + t.Fatalf("Read temporary file: %v", err) + } + defer configFile.Close() + + err = CheckConfig(configFile) + if (err != nil) != tt.wantErr { + t.Errorf("CheckConfig() error = %v, wantErr %v", err, tt.wantErr) + return + } + }) + } +} diff --git a/pkgconfig/constants.go b/pkgconfig/constants.go index 7e8a90f7..9c976027 100644 --- a/pkgconfig/constants.go +++ b/pkgconfig/constants.go @@ -1,12 +1,9 @@ package pkgconfig -import ( - "crypto/tls" -) - const ( StrUnknown = "UNKNOWN" + ProgQname = "dns.collector" ProgName = "dnscollector" LocalhostIP = "127.0.0.1" AnyIP = "0.0.0.0" @@ -18,13 +15,8 @@ const ( ValidDomain = "dnscollector.dev." BadDomainLabel = "ultramegaverytoolonglabel-ultramegaverytoolonglabel-ultramegaverytoolonglabel.dnscollector.dev." badLongLabel = "ultramegaverytoolonglabel-ultramegaverytoolonglabel-" - BadVeryLongDomain = "ultramegaverytoolonglabel.dnscollector" + - badLongLabel + - badLongLabel + - badLongLabel + - badLongLabel + - badLongLabel + - ".dev." + BadVeryLongDomain = "ultramegaverytoolonglabel.dnscollector" + badLongLabel + badLongLabel + + badLongLabel + badLongLabel + badLongLabel + ".dev." ModeText = "text" ModeJSON = "json" @@ -35,11 +27,6 @@ const ( SASLMechanismPlain = "PLAIN" SASLMechanismScram = "SCRAM-SHA-512" - TLSV10 = "1.0" - TLSV11 = "1.1" - TLSV12 = "1.2" - TLSV13 = "1.3" - CompressGzip = "gzip" CompressSnappy = "snappy" CompressLz4 = "lz4" @@ -48,10 +35,16 @@ const ( ) var ( - TLSVersion = map[string]uint16{ - TLSV10: tls.VersionTLS10, - TLSV11: tls.VersionTLS11, - TLSV12: tls.VersionTLS12, - TLSV13: tls.VersionTLS13, - } + PrefixLogWorker = "worker - " + PrefixLogTransformer = "transformer - " + DefaultBufferSize = 512 + DefaultBufferOne = 1 + DefaultMonitor = true + WorkerMonitorDisabled = false + + ExpectedQname = "dnscollector.dev" + ExpectedQname2 = "dns.collector" + ExpectedBufferMsg511 = ".*buffer is full, 511.*" + ExpectedBufferMsg1023 = ".*buffer is full, 1023.*" + ExpectedIdentity = "powerdnspb" ) diff --git a/pkgconfig/global.go b/pkgconfig/global.go index 82963562..30ba19f9 100644 --- a/pkgconfig/global.go +++ b/pkgconfig/global.go @@ -1,31 +1,48 @@ package pkgconfig +import ( + "reflect" + + "github.com/creasty/defaults" +) + type ConfigGlobal struct { - TextFormat string `yaml:"text-format"` - TextFormatDelimiter string `yaml:"text-format-delimiter"` - TextFormatSplitter string `yaml:"text-format-splitter"` - TextFormatBoundary string `yaml:"text-format-boundary"` + TextFormat string `yaml:"text-format" default:"timestamp identity operation rcode queryip queryport family protocol length-unit qname qtype latency"` + TextFormatDelimiter string `yaml:"text-format-delimiter" default:" "` + TextFormatSplitter string `yaml:"text-format-splitter" default:" "` + TextFormatBoundary string `yaml:"text-format-boundary" default:"\""` Trace struct { - Verbose bool `yaml:"verbose"` - LogMalformed bool `yaml:"log-malformed"` - Filename string `yaml:"filename"` - MaxSize int `yaml:"max-size"` - MaxBackups int `yaml:"max-backups"` + Verbose bool `yaml:"verbose" default:"false"` + LogMalformed bool `yaml:"log-malformed" default:"false"` + Filename string `yaml:"filename" default:""` + MaxSize int `yaml:"max-size" default:"10"` + MaxBackups int `yaml:"max-backups" default:"10"` } `yaml:"trace"` - ServerIdentity string `yaml:"server-identity"` + ServerIdentity string `yaml:"server-identity" default:""` + PidFile string `yaml:"pid-file" default:""` + Worker struct { + InternalMonitor int `yaml:"interval-monitor" default:"10"` + ChannelBufferSize int `yaml:"buffer-size" default:"4096"` + } `yaml:"worker"` + Telemetry struct { + Enabled bool `yaml:"enabled" default:"true"` + WebPath string `yaml:"web-path" default:"/metrics"` + WebListen string `yaml:"web-listen" default:":9165"` + PromPrefix string `yaml:"prometheus-prefix" default:"dnscollector_exporter"` + TLSSupport bool `yaml:"tls-support" default:"false"` + TLSCertFile string `yaml:"tls-cert-file" default:""` + TLSKeyFile string `yaml:"tls-key-file" default:""` + ClientCAFile string `yaml:"client-ca-file" default:""` + BasicAuthEnable bool `yaml:"basic-auth-enable" default:"false"` + BasicAuthLogin string `yaml:"basic-auth-login" default:"admin"` + BasicAuthPwd string `yaml:"basic-auth-pwd" default:"changeme"` + } `yaml:"telemetry"` } func (c *ConfigGlobal) SetDefault() { - // global config - c.TextFormat = "timestamp identity operation rcode queryip queryport family protocol length-unit qname qtype latency" - c.TextFormatDelimiter = " " - c.TextFormatSplitter = " " - c.TextFormatBoundary = "\"" + defaults.Set(c) +} - c.Trace.Verbose = false - c.Trace.LogMalformed = false - c.Trace.Filename = "" - c.Trace.MaxSize = 10 - c.Trace.MaxBackups = 10 - c.ServerIdentity = "" +func (c *ConfigGlobal) Check(userCfg map[string]interface{}) error { + return CheckConfigWithTags(reflect.ValueOf(*c), userCfg) } diff --git a/pkgconfig/global_test.go b/pkgconfig/global_test.go index a6d7c6ae..9a58e7a2 100644 --- a/pkgconfig/global_test.go +++ b/pkgconfig/global_test.go @@ -12,4 +12,8 @@ func TestConfigGlobalSetDefault(t *testing.T) { if config.Trace.Verbose != false { t.Errorf("verbose mode should be disabled") } + + if config.PidFile != "" { + t.Errorf("pidfile should be empty") + } } diff --git a/pkgconfig/loggers.go b/pkgconfig/loggers.go index 6a4438ba..8cf56523 100644 --- a/pkgconfig/loggers.go +++ b/pkgconfig/loggers.go @@ -3,551 +3,324 @@ package pkgconfig import ( "reflect" - "github.com/dmachard/go-dnscollector/netlib" + "github.com/creasty/defaults" "github.com/prometheus/prometheus/model/relabel" ) type ConfigLoggers struct { + DevNull struct { + Enable bool `yaml:"enable" default:"false"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` + } `yaml:"devnull"` Stdout struct { - Enable bool `yaml:"enable"` - Mode string `yaml:"mode"` - TextFormat string `yaml:"text-format"` - ChannelBufferSize int `yaml:"chan-buffer-size"` + Enable bool `yaml:"enable" default:"false"` + Mode string `yaml:"mode" default:"text"` + TextFormat string `yaml:"text-format" default:""` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` } `yaml:"stdout"` Prometheus struct { - Enable bool `yaml:"enable"` - ListenIP string `yaml:"listen-ip"` - ListenPort int `yaml:"listen-port"` - TLSSupport bool `yaml:"tls-support"` - TLSMutual bool `yaml:"tls-mutual"` - TLSMinVersion string `yaml:"tls-min-version"` - CertFile string `yaml:"cert-file"` - KeyFile string `yaml:"key-file"` - PromPrefix string `yaml:"prometheus-prefix"` - LabelsList []string `yaml:"prometheus-labels"` - TopN int `yaml:"top-n"` - BasicAuthLogin string `yaml:"basic-auth-login"` - BasicAuthPwd string `yaml:"basic-auth-pwd"` - BasicAuthEnabled bool `yaml:"basic-auth-enable"` - ChannelBufferSize int `yaml:"chan-buffer-size"` - RequestersMetricsEnabled bool `yaml:"requesters-metrics-enabled"` - DomainsMetricsEnabled bool `yaml:"domains-metrics-enabled"` - NoErrorMetricsEnabled bool `yaml:"noerror-metrics-enabled"` - ServfailMetricsEnabled bool `yaml:"servfail-metrics-enabled"` - NonExistentMetricsEnabled bool `yaml:"nonexistent-metrics-enabled"` - TimeoutMetricsEnabled bool `yaml:"timeout-metrics-enabled"` - HistogramMetricsEnabled bool `yaml:"histogram-metrics-enabled"` - RequestersCacheTTL int `yaml:"requesters-cache-ttl"` - RequestersCacheSize int `yaml:"requesters-cache-size"` - DomainsCacheTTL int `yaml:"domains-cache-ttl"` - DomainsCacheSize int `yaml:"domains-cache-size"` - NoErrorDomainsCacheTTL int `yaml:"noerror-domains-cache-ttl"` - NoErrorDomainsCacheSize int `yaml:"noerror-domains-cache-size"` - ServfailDomainsCacheTTL int `yaml:"servfail-domains-cache-ttl"` - ServfailDomainsCacheSize int `yaml:"servfail-domains-cache-size"` - NXDomainsCacheTTL int `yaml:"nonexistent-domains-cache-ttl"` - NXDomainsCacheSize int `yaml:"nonexistent-domains-cache-size"` - DefaultDomainsCacheTTL int `yaml:"default-domains-cache-ttl"` - DefaultDomainsCacheSize int `yaml:"default-domains-cache-size"` + Enable bool `yaml:"enable" default:"false"` + ListenIP string `yaml:"listen-ip" default:"127.0.0.1"` + ListenPort int `yaml:"listen-port" default:"8081"` + TLSSupport bool `yaml:"tls-support" default:"false"` + TLSMutual bool `yaml:"tls-mutual" default:"false"` + TLSMinVersion string `yaml:"tls-min-version" default:"1.2"` + CertFile string `yaml:"cert-file" default:""` + KeyFile string `yaml:"key-file" default:""` + PromPrefix string `yaml:"prometheus-prefix" default:"dnscollector"` + LabelsList []string `yaml:"prometheus-labels" default:"[]"` + TopN int `yaml:"top-n" default:"10"` + BasicAuthLogin string `yaml:"basic-auth-login" default:"admin"` + BasicAuthPwd string `yaml:"basic-auth-pwd" default:"changeme"` + BasicAuthEnabled bool `yaml:"basic-auth-enable" default:"true"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` + RequestersMetricsEnabled bool `yaml:"requesters-metrics-enabled" default:"true"` + DomainsMetricsEnabled bool `yaml:"domains-metrics-enabled" default:"true"` + NoErrorMetricsEnabled bool `yaml:"noerror-metrics-enabled" default:"true"` + ServfailMetricsEnabled bool `yaml:"servfail-metrics-enabled" default:"true"` + NonExistentMetricsEnabled bool `yaml:"nonexistent-metrics-enabled" default:"true"` + TimeoutMetricsEnabled bool `yaml:"timeout-metrics-enabled" default:"false"` + HistogramMetricsEnabled bool `yaml:"histogram-metrics-enabled" default:"false"` + RequestersCacheTTL int `yaml:"requesters-cache-ttl" default:"250000"` + RequestersCacheSize int `yaml:"requesters-cache-size" default:"3600"` + DomainsCacheTTL int `yaml:"domains-cache-ttl" default:"500000"` + DomainsCacheSize int `yaml:"domains-cache-size" default:"3600"` + NoErrorDomainsCacheTTL int `yaml:"noerror-domains-cache-ttl" default:"100000"` + NoErrorDomainsCacheSize int `yaml:"noerror-domains-cache-size" default:"3600"` + ServfailDomainsCacheTTL int `yaml:"servfail-domains-cache-ttl" default:"10000"` + ServfailDomainsCacheSize int `yaml:"servfail-domains-cache-size" default:"3600"` + NXDomainsCacheTTL int `yaml:"nonexistent-domains-cache-ttl" default:"10000"` + NXDomainsCacheSize int `yaml:"nonexistent-domains-cache-size" default:"3600"` + DefaultDomainsCacheTTL int `yaml:"default-domains-cache-ttl" default:"1000"` + DefaultDomainsCacheSize int `yaml:"default-domains-cache-size" default:"3600"` } `yaml:"prometheus"` RestAPI struct { - Enable bool `yaml:"enable"` - ListenIP string `yaml:"listen-ip"` - ListenPort int `yaml:"listen-port"` - BasicAuthLogin string `yaml:"basic-auth-login"` - BasicAuthPwd string `yaml:"basic-auth-pwd"` - TLSSupport bool `yaml:"tls-support"` - TLSMinVersion string `yaml:"tls-min-version"` - CertFile string `yaml:"cert-file"` - KeyFile string `yaml:"key-file"` - TopN int `yaml:"top-n"` - ChannelBufferSize int `yaml:"chan-buffer-size"` + Enable bool `yaml:"enable" default:"false"` + ListenIP string `yaml:"listen-ip" default:"127.0.0.1"` + ListenPort int `yaml:"listen-port" default:"8080"` + BasicAuthLogin string `yaml:"basic-auth-login" default:"admin"` + BasicAuthPwd string `yaml:"basic-auth-pwd" default:"changeme"` + TLSSupport bool `yaml:"tls-support" default:"false"` + TLSMinVersion string `yaml:"tls-min-version" default:"1.2"` + CertFile string `yaml:"cert-file" default:""` + KeyFile string `yaml:"key-file" default:""` + TopN int `yaml:"top-n" default:"100"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` } `yaml:"restapi"` LogFile struct { - Enable bool `yaml:"enable"` - FilePath string `yaml:"file-path"` - MaxSize int `yaml:"max-size"` - MaxFiles int `yaml:"max-files"` - FlushInterval int `yaml:"flush-interval"` - Compress bool `yaml:"compress"` - CompressInterval int `yaml:"compress-interval"` - CompressPostCommand string `yaml:"compress-postcommand"` - Mode string `yaml:"mode"` - PostRotateCommand string `yaml:"postrotate-command"` - PostRotateDelete bool `yaml:"postrotate-delete-success"` - TextFormat string `yaml:"text-format"` - ChannelBufferSize int `yaml:"chan-buffer-size"` - ExtendedSupport bool `yaml:"extended-support"` + Enable bool `yaml:"enable" default:"false"` + FilePath string `yaml:"file-path" default:""` + MaxSize int `yaml:"max-size" default:"100"` + MaxFiles int `yaml:"max-files" default:"10"` + FlushInterval int `yaml:"flush-interval" default:"10"` + Compress bool `yaml:"compress" default:"false"` + CompressInterval int `yaml:"compress-interval" default:"60"` + CompressPostCommand string `yaml:"compress-postcommand" default:""` + Mode string `yaml:"mode" default:"text"` + PostRotateCommand string `yaml:"postrotate-command" default:""` + PostRotateDelete bool `yaml:"postrotate-delete-success" default:"false"` + TextFormat string `yaml:"text-format" default:""` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` + ExtendedSupport bool `yaml:"extended-support" default:"false"` } `yaml:"logfile"` DNSTap struct { - Enable bool `yaml:"enable"` - RemoteAddress string `yaml:"remote-address"` - RemotePort int `yaml:"remote-port"` - Transport string `yaml:"transport"` - SockPath string `yaml:"sock-path"` - ConnectTimeout int `yaml:"connect-timeout"` - RetryInterval int `yaml:"retry-interval"` - FlushInterval int `yaml:"flush-interval"` - TLSSupport bool `yaml:"tls-support"` - TLSInsecure bool `yaml:"tls-insecure"` - TLSMinVersion string `yaml:"tls-min-version"` - CAFile string `yaml:"ca-file"` - CertFile string `yaml:"cert-file"` - KeyFile string `yaml:"key-file"` - ServerID string `yaml:"server-id"` - OverwriteIdentity bool `yaml:"overwrite-identity"` - BufferSize int `yaml:"buffer-size"` - ChannelBufferSize int `yaml:"chan-buffer-size"` - ExtendedSupport bool `yaml:"extended-support"` + Enable bool `yaml:"enable" default:"false"` + RemoteAddress string `yaml:"remote-address" default:"127.0.0.1"` + RemotePort int `yaml:"remote-port" default:"6000"` + Transport string `yaml:"transport" default:"tcp"` + SockPath string `yaml:"sock-path" default:""` + ConnectTimeout int `yaml:"connect-timeout" default:"5"` + RetryInterval int `yaml:"retry-interval" default:"10"` + FlushInterval int `yaml:"flush-interval" default:"30"` + TLSSupport bool `yaml:"tls-support" default:"false"` + TLSInsecure bool `yaml:"tls-insecure" default:"false"` + TLSMinVersion string `yaml:"tls-min-version" default:"1.2"` + CAFile string `yaml:"ca-file" default:""` + CertFile string `yaml:"cert-file" default:""` + KeyFile string `yaml:"key-file" default:""` + ServerID string `yaml:"server-id" default:""` + OverwriteIdentity bool `yaml:"overwrite-identity" default:"false"` + BufferSize int `yaml:"buffer-size" default:"100"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` + ExtendedSupport bool `yaml:"extended-support" default:"false"` + Compression string `yaml:"compression" default:"none"` } `yaml:"dnstapclient"` TCPClient struct { - Enable bool `yaml:"enable"` - RemoteAddress string `yaml:"remote-address"` - RemotePort int `yaml:"remote-port"` - SockPath string `yaml:"sock-path"` // deprecated - RetryInterval int `yaml:"retry-interval"` - Transport string `yaml:"transport"` - TLSSupport bool `yaml:"tls-support"` // deprecated - TLSInsecure bool `yaml:"tls-insecure"` - TLSMinVersion string `yaml:"tls-min-version"` - CAFile string `yaml:"ca-file"` - CertFile string `yaml:"cert-file"` - KeyFile string `yaml:"key-file"` - Mode string `yaml:"mode"` - TextFormat string `yaml:"text-format"` - PayloadDelimiter string `yaml:"delimiter"` - BufferSize int `yaml:"buffer-size"` - FlushInterval int `yaml:"flush-interval"` - ConnectTimeout int `yaml:"connect-timeout"` - ChannelBufferSize int `yaml:"chan-buffer-size"` + Enable bool `yaml:"enable" default:"false"` + RemoteAddress string `yaml:"remote-address" default:"127.0.0.1"` + RemotePort int `yaml:"remote-port" default:"9999"` + SockPath string `yaml:"sock-path" default:""` // deprecated + RetryInterval int `yaml:"retry-interval" default:"10"` + Transport string `yaml:"transport" default:"tcp"` + TLSSupport bool `yaml:"tls-support" default:"false"` // deprecated + TLSInsecure bool `yaml:"tls-insecure" default:"false"` + TLSMinVersion string `yaml:"tls-min-version" default:"1.2"` + CAFile string `yaml:"ca-file" default:""` + CertFile string `yaml:"cert-file" default:""` + KeyFile string `yaml:"key-file" default:""` + Mode string `yaml:"mode" default:"flat-json"` + TextFormat string `yaml:"text-format" default:""` + PayloadDelimiter string `yaml:"delimiter" default:"\n"` + BufferSize int `yaml:"buffer-size" default:"100"` + FlushInterval int `yaml:"flush-interval" default:"30"` + ConnectTimeout int `yaml:"connect-timeout" default:"5"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` } `yaml:"tcpclient"` Syslog struct { - Enable bool `yaml:"enable"` - Severity string `yaml:"severity"` - Facility string `yaml:"facility"` - Transport string `yaml:"transport"` - RemoteAddress string `yaml:"remote-address"` - RetryInterval int `yaml:"retry-interval"` - TextFormat string `yaml:"text-format"` - Mode string `yaml:"mode"` - TLSInsecure bool `yaml:"tls-insecure"` - TLSMinVersion string `yaml:"tls-min-version"` - CAFile string `yaml:"ca-file"` - CertFile string `yaml:"cert-file"` - KeyFile string `yaml:"key-file"` - Formatter string `yaml:"formatter"` - Framer string `yaml:"framer"` - Hostname string `yaml:"hostname"` - AppName string `yaml:"app-name"` - ChannelBufferSize int `yaml:"chan-buffer-size"` - Tag string `yaml:"tag"` - ReplaceNullChar string `yaml:"replace-null-char"` - FlushInterval int `yaml:"flush-interval"` - BufferSize int `yaml:"buffer-size"` + Enable bool `yaml:"enable" default:"false"` + Severity string `yaml:"severity" default:"INFO"` + Facility string `yaml:"facility" default:"DAEMON"` + Transport string `yaml:"transport" default:"local"` + RemoteAddress string `yaml:"remote-address" default:"127.0.0.1:514"` + RetryInterval int `yaml:"retry-interval" default:"10"` + TextFormat string `yaml:"text-format" default:""` + Mode string `yaml:"mode" default:"text"` + TLSInsecure bool `yaml:"tls-insecure" default:"false"` + TLSMinVersion string `yaml:"tls-min-version" default:"1.2"` + CAFile string `yaml:"ca-file" default:""` + CertFile string `yaml:"cert-file" default:""` + KeyFile string `yaml:"key-file" default:""` + Formatter string `yaml:"formatter" default:"rfc5424"` + Framer string `yaml:"framer" default:""` + Hostname string `yaml:"hostname" default:""` + AppName string `yaml:"app-name" default:"DNScollector"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` + Tag string `yaml:"tag" default:""` + ReplaceNullChar string `yaml:"replace-null-char" default:"�"` + FlushInterval int `yaml:"flush-interval" default:"30"` + BufferSize int `yaml:"buffer-size" default:"100"` } `yaml:"syslog"` Fluentd struct { - Enable bool `yaml:"enable"` - RemoteAddress string `yaml:"remote-address"` - RemotePort int `yaml:"remote-port"` - SockPath string `yaml:"sock-path"` // deprecated - ConnectTimeout int `yaml:"connect-timeout"` - RetryInterval int `yaml:"retry-interval"` - FlushInterval int `yaml:"flush-interval"` - Transport string `yaml:"transport"` - TLSSupport bool `yaml:"tls-support"` // deprecated - TLSInsecure bool `yaml:"tls-insecure"` - TLSMinVersion string `yaml:"tls-min-version"` - CAFile string `yaml:"ca-file"` - CertFile string `yaml:"cert-file"` - KeyFile string `yaml:"key-file"` - Tag string `yaml:"tag"` - BufferSize int `yaml:"buffer-size"` - ChannelBufferSize int `yaml:"chan-buffer-size"` + Enable bool `yaml:"enable" default:"false"` + RemoteAddress string `yaml:"remote-address" default:"127.0.0.1"` + RemotePort int `yaml:"remote-port" default:"24224"` + SockPath string `yaml:"sock-path" default:""` // deprecated + ConnectTimeout int `yaml:"connect-timeout" default:"5"` + RetryInterval int `yaml:"retry-interval" default:"10"` + FlushInterval int `yaml:"flush-interval" default:"30"` + Transport string `yaml:"transport" default:"tcp"` + TLSSupport bool `yaml:"tls-support" default:"false"` // deprecated + TLSInsecure bool `yaml:"tls-insecure" default:"false"` + TLSMinVersion string `yaml:"tls-min-version" default:"1.2"` + CAFile string `yaml:"ca-file" default:""` + CertFile string `yaml:"cert-file" default:""` + KeyFile string `yaml:"key-file" default:""` + Tag string `yaml:"tag" default:"dns.collector"` + BufferSize int `yaml:"buffer-size" default:"100"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"4096"` } `yaml:"fluentd"` InfluxDB struct { - Enable bool `yaml:"enable"` - ServerURL string `yaml:"server-url"` - AuthToken string `yaml:"auth-token"` - TLSSupport bool `yaml:"tls-support"` - TLSInsecure bool `yaml:"tls-insecure"` - TLSMinVersion string `yaml:"tls-min-version"` - CAFile string `yaml:"ca-file"` - CertFile string `yaml:"cert-file"` - KeyFile string `yaml:"key-file"` - Bucket string `yaml:"bucket"` - Organization string `yaml:"organization"` - ChannelBufferSize int `yaml:"chan-buffer-size"` + Enable bool `yaml:"enable" default:"false"` + ServerURL string `yaml:"server-url" default:"http://localhost:8086"` + AuthToken string `yaml:"auth-token" default:""` + TLSSupport bool `yaml:"tls-support" default:"false"` + TLSInsecure bool `yaml:"tls-insecure" default:"false"` + TLSMinVersion string `yaml:"tls-min-version" default:"1.2"` + CAFile string `yaml:"ca-file" default:""` + CertFile string `yaml:"cert-file" default:""` + KeyFile string `yaml:"key-file" default:""` + Bucket string `yaml:"bucket" default:""` + Organization string `yaml:"organization" default:""` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` } `yaml:"influxdb"` LokiClient struct { - Enable bool `yaml:"enable"` - ServerURL string `yaml:"server-url"` - JobName string `yaml:"job-name"` - Mode string `yaml:"mode"` - FlushInterval int `yaml:"flush-interval"` - BatchSize int `yaml:"batch-size"` - RetryInterval int `yaml:"retry-interval"` - TextFormat string `yaml:"text-format"` - ProxyURL string `yaml:"proxy-url"` - TLSInsecure bool `yaml:"tls-insecure"` - TLSMinVersion string `yaml:"tls-min-version"` - CAFile string `yaml:"ca-file"` - CertFile string `yaml:"cert-file"` - KeyFile string `yaml:"key-file"` - BasicAuthLogin string `yaml:"basic-auth-login"` - BasicAuthPwd string `yaml:"basic-auth-pwd"` - BasicAuthPwdFile string `yaml:"basic-auth-pwd-file"` - TenantID string `yaml:"tenant-id"` - RelabelConfigs []*relabel.Config `yaml:"relabel-configs"` - ChannelBufferSize int `yaml:"chan-buffer-size"` + Enable bool `yaml:"enable" default:"false"` + ServerURL string `yaml:"server-url" default:"http://localhost:3100/loki/api/v1/push"` + JobName string `yaml:"job-name" default:"dnscollector"` + Mode string `yaml:"mode" default:"text"` + FlushInterval int `yaml:"flush-interval" default:"5"` + BatchSize int `yaml:"batch-size" default:"1048576"` + RetryInterval int `yaml:"retry-interval" default:"10"` + TextFormat string `yaml:"text-format" default:""` + ProxyURL string `yaml:"proxy-url" default:""` + TLSInsecure bool `yaml:"tls-insecure" default:"false"` + TLSMinVersion string `yaml:"tls-min-version" default:"1.2"` + CAFile string `yaml:"ca-file" default:""` + CertFile string `yaml:"cert-file" default:""` + KeyFile string `yaml:"key-file" default:""` + BasicAuthLogin string `yaml:"basic-auth-login" default:""` + BasicAuthPwd string `yaml:"basic-auth-pwd" default:""` + BasicAuthPwdFile string `yaml:"basic-auth-pwd-file" default:""` + TenantID string `yaml:"tenant-id" default:""` + RelabelConfigs []*relabel.Config `yaml:"relabel-configs" default:"[]"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` } `yaml:"lokiclient"` Statsd struct { - Enable bool `yaml:"enable"` - Prefix string `yaml:"prefix"` - RemoteAddress string `yaml:"remote-address"` - RemotePort int `yaml:"remote-port"` - ConnectTimeout int `yaml:"connect-timeout"` - Transport string `yaml:"transport"` - FlushInterval int `yaml:"flush-interval"` - TLSSupport bool `yaml:"tls-support"` // deprecated - TLSInsecure bool `yaml:"tls-insecure"` - TLSMinVersion string `yaml:"tls-min-version"` - CAFile string `yaml:"ca-file"` - CertFile string `yaml:"cert-file"` - KeyFile string `yaml:"key-file"` - ChannelBufferSize int `yaml:"chan-buffer-size"` + Enable bool `yaml:"enable" default:"false"` + Prefix string `yaml:"prefix" default:"dnscollector"` + RemoteAddress string `yaml:"remote-address" default:"127.0.0.1"` + RemotePort int `yaml:"remote-port" default:"8125"` + ConnectTimeout int `yaml:"connect-timeout" default:"5"` + Transport string `yaml:"transport" default:"udp"` + FlushInterval int `yaml:"flush-interval" default:"10"` + CertFile string `yaml:"cert-file" default:""` + TLSSupport bool `yaml:"tls-support" default:"false"` // deprecated + TLSInsecure bool `yaml:"tls-insecure" default:"false"` + TLSMinVersion string `yaml:"tls-min-version" default:"1.2"` + CAFile string `yaml:"ca-file" default:""` + KeyFile string `yaml:"key-file" default:""` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` } `yaml:"statsd"` ElasticSearchClient struct { - Enable bool `yaml:"enable"` - Index string `yaml:"index"` - Server string `yaml:"server"` - ChannelBufferSize int `yaml:"chan-buffer-size"` - BulkSize int `yaml:"bulk-size"` - FlushInterval int `yaml:"flush-interval"` + Enable bool `yaml:"enable" default:"false"` + Index string `yaml:"index" default:"dnscollector"` + Server string `yaml:"server" default:"http://127.0.0.1:9200/"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` + BulkSize int `yaml:"bulk-size" default:"5242880"` + BulkChannelSize int `yaml:"bulk-channel-size" default:"10"` + FlushInterval int `yaml:"flush-interval" default:"10"` + Compression string `yaml:"compression" default:"none"` } `yaml:"elasticsearch"` ScalyrClient struct { - Enable bool `yaml:"enable"` - Mode string `yaml:"mode"` - TextFormat string `yaml:"text-format"` - SessionInfo map[string]string `yaml:"sessioninfo"` - Attrs map[string]interface{} `yaml:"attrs"` - ServerURL string `yaml:"server-url"` - APIKey string `yaml:"apikey"` - Parser string `yaml:"parser"` - FlushInterval int `yaml:"flush-interval"` - ProxyURL string `yaml:"proxy-url"` - TLSInsecure bool `yaml:"tls-insecure"` - TLSMinVersion string `yaml:"tls-min-version"` - CAFile string `yaml:"ca-file"` - CertFile string `yaml:"cert-file"` - KeyFile string `yaml:"key-file"` - ChannelBufferSize int `yaml:"chan-buffer-size"` + Enable bool `yaml:"enable" default:"false"` + Mode string `yaml:"mode" default:"text"` + TextFormat string `yaml:"text-format" default:""` + SessionInfo map[string]string `yaml:"sessioninfo" default:"{}"` + Attrs map[string]interface{} `yaml:"attrs" default:"{}"` + ServerURL string `yaml:"server-url" default:"app.scalyr.com"` + APIKey string `yaml:"apikey" default:""` + Parser string `yaml:"parser" default:""` + FlushInterval int `yaml:"flush-interval" default:"10"` + ProxyURL string `yaml:"proxy-url" default:""` + TLSInsecure bool `yaml:"tls-insecure" default:"false"` + TLSMinVersion string `yaml:"tls-min-version" default:"1.2"` + CAFile string `yaml:"ca-file" default:""` + CertFile string `yaml:"cert-file" default:""` + KeyFile string `yaml:"key-file" default:""` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` } `yaml:"scalyrclient"` RedisPub struct { - Enable bool `yaml:"enable"` - RemoteAddress string `yaml:"remote-address"` - RemotePort int `yaml:"remote-port"` - SockPath string `yaml:"sock-path"` // deprecated - RetryInterval int `yaml:"retry-interval"` - Transport string `yaml:"transport"` - TLSSupport bool `yaml:"tls-support"` // deprecated - TLSInsecure bool `yaml:"tls-insecure"` - TLSMinVersion string `yaml:"tls-min-version"` - CAFile string `yaml:"ca-file"` - CertFile string `yaml:"cert-file"` - KeyFile string `yaml:"key-file"` - Mode string `yaml:"mode"` - TextFormat string `yaml:"text-format"` - PayloadDelimiter string `yaml:"delimiter"` - BufferSize int `yaml:"buffer-size"` - FlushInterval int `yaml:"flush-interval"` - ConnectTimeout int `yaml:"connect-timeout"` - RedisChannel string `yaml:"redis-channel"` - ChannelBufferSize int `yaml:"chan-buffer-size"` + Enable bool `yaml:"enable" default:"false"` + RemoteAddress string `yaml:"remote-address" default:"127.0.0.1"` + RemotePort int `yaml:"remote-port" default:"6379"` + SockPath string `yaml:"sock-path" default:""` // deprecated + RetryInterval int `yaml:"retry-interval" default:"10"` + Transport string `yaml:"transport" default:"tcp"` + TLSSupport bool `yaml:"tls-support" default:"false"` // deprecated + TLSInsecure bool `yaml:"tls-insecure" default:"false"` + TLSMinVersion string `yaml:"tls-min-version" default:"1.2"` + CAFile string `yaml:"ca-file" default:""` + CertFile string `yaml:"cert-file" default:""` + KeyFile string `yaml:"key-file" default:""` + Mode string `yaml:"mode" default:"flat-json"` + TextFormat string `yaml:"text-format" default:""` + PayloadDelimiter string `yaml:"delimiter" default:"\n"` + BufferSize int `yaml:"buffer-size" default:"100"` + FlushInterval int `yaml:"flush-interval" default:"30"` + ConnectTimeout int `yaml:"connect-timeout" default:"5"` + RedisChannel string `yaml:"redis-channel" default:"dns_collector"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` } `yaml:"redispub"` KafkaProducer struct { - Enable bool `yaml:"enable"` - RemoteAddress string `yaml:"remote-address"` - RemotePort int `yaml:"remote-port"` - RetryInterval int `yaml:"retry-interval"` - TLSSupport bool `yaml:"tls-support"` - TLSInsecure bool `yaml:"tls-insecure"` - TLSMinVersion string `yaml:"tls-min-version"` - CAFile string `yaml:"ca-file"` - CertFile string `yaml:"cert-file"` - KeyFile string `yaml:"key-file"` - SaslSupport bool `yaml:"sasl-support"` - SaslUsername string `yaml:"sasl-username"` - SaslPassword string `yaml:"sasl-password"` - SaslMechanism string `yaml:"sasl-mechanism"` - Mode string `yaml:"mode"` - BufferSize int `yaml:"buffer-size"` - FlushInterval int `yaml:"flush-interval"` - ConnectTimeout int `yaml:"connect-timeout"` - Topic string `yaml:"topic"` - Partition int `yaml:"partition"` - ChannelBufferSize int `yaml:"chan-buffer-size"` - Compression string `yaml:"compression"` + Enable bool `yaml:"enable" default:"false"` + RemoteAddress string `yaml:"remote-address" default:"127.0.0.1"` + RemotePort int `yaml:"remote-port" default:"9092"` + RetryInterval int `yaml:"retry-interval" default:"10"` + TLSSupport bool `yaml:"tls-support" default:"false"` + TLSInsecure bool `yaml:"tls-insecure" default:"false"` + TLSMinVersion string `yaml:"tls-min-version" default:"1.2"` + CAFile string `yaml:"ca-file" default:""` + CertFile string `yaml:"cert-file" default:""` + KeyFile string `yaml:"key-file" default:""` + SaslSupport bool `yaml:"sasl-support" default:"false"` + SaslUsername string `yaml:"sasl-username" default:""` + SaslPassword string `yaml:"sasl-password" default:""` + SaslMechanism string `yaml:"sasl-mechanism" default:"PLAIN"` + Mode string `yaml:"mode" default:"flat-json"` + TextFormat string `yaml:"text-format" default:""` + BufferSize int `yaml:"buffer-size" default:"100"` + FlushInterval int `yaml:"flush-interval" default:"10"` + ConnectTimeout int `yaml:"connect-timeout" default:"5"` + Topic string `yaml:"topic" default:"dnscollector"` + Partition int `yaml:"partition" default:"0"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` + Compression string `yaml:"compression" default:"none"` } `yaml:"kafkaproducer"` FalcoClient struct { - Enable bool `yaml:"enable"` - URL string `yaml:"url"` - ChannelBufferSize int `yaml:"chan-buffer-size"` + Enable bool `yaml:"enable" default:"false"` + URL string `yaml:"url" default:"http://127.0.0.1:9200"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` } `yaml:"falco"` + ClickhouseClient struct { + Enable bool `yaml:"enable" default:"false"` + URL string `yaml:"url" default:"http://localhost:8123"` + User string `yaml:"user" default:"default"` + Password string `yaml:"password" default:"password"` + Database string `yaml:"database" default:"dnscollector"` + Table string `yaml:"table" default:"records"` + ChannelBufferSize int `yaml:"chan-buffer-size" default:"0"` + } `yaml:"clickhouse"` } func (c *ConfigLoggers) SetDefault() { - c.Stdout.Enable = false - c.Stdout.Mode = ModeText - c.Stdout.TextFormat = "" - c.Stdout.ChannelBufferSize = 65535 - - c.DNSTap.Enable = false - c.DNSTap.RemoteAddress = LocalhostIP - c.DNSTap.RemotePort = 6000 - c.DNSTap.Transport = netlib.SocketTCP - c.DNSTap.ConnectTimeout = 5 - c.DNSTap.RetryInterval = 10 - c.DNSTap.FlushInterval = 30 - c.DNSTap.SockPath = "" - c.DNSTap.TLSSupport = false - c.DNSTap.TLSInsecure = false - c.DNSTap.TLSMinVersion = TLSV12 - c.DNSTap.CAFile = "" - c.DNSTap.CertFile = "" - c.DNSTap.KeyFile = "" - c.DNSTap.ServerID = "" - c.DNSTap.OverwriteIdentity = false - c.DNSTap.BufferSize = 100 - c.DNSTap.ChannelBufferSize = 65535 - c.DNSTap.ExtendedSupport = false - - c.LogFile.Enable = false - c.LogFile.FilePath = "" - c.LogFile.FlushInterval = 10 - c.LogFile.MaxSize = 100 - c.LogFile.MaxFiles = 10 - c.LogFile.Compress = false - c.LogFile.CompressInterval = 60 - c.LogFile.CompressPostCommand = "" - c.LogFile.Mode = ModeText - c.LogFile.PostRotateCommand = "" - c.LogFile.PostRotateDelete = false - c.LogFile.TextFormat = "" - c.LogFile.ChannelBufferSize = 65535 - c.LogFile.ExtendedSupport = false - - c.Prometheus.Enable = false - c.Prometheus.ListenIP = LocalhostIP - c.Prometheus.ListenPort = 8081 - c.Prometheus.TLSSupport = false - c.Prometheus.TLSMutual = false - c.Prometheus.TLSMinVersion = TLSV12 - c.Prometheus.CertFile = "" - c.Prometheus.KeyFile = "" - c.Prometheus.PromPrefix = ProgName - c.Prometheus.TopN = 10 - c.Prometheus.BasicAuthLogin = "admin" - c.Prometheus.BasicAuthPwd = "changeme" - c.Prometheus.BasicAuthEnabled = true - c.Prometheus.ChannelBufferSize = 65535 - c.Prometheus.HistogramMetricsEnabled = false - c.Prometheus.RequestersMetricsEnabled = true - c.Prometheus.DomainsMetricsEnabled = true - c.Prometheus.NoErrorMetricsEnabled = true - c.Prometheus.ServfailMetricsEnabled = true - c.Prometheus.NonExistentMetricsEnabled = true - c.Prometheus.RequestersCacheTTL = 3600 - c.Prometheus.RequestersCacheSize = 250000 - c.Prometheus.DomainsCacheTTL = 3600 - c.Prometheus.DomainsCacheSize = 500000 - c.Prometheus.DomainsCacheTTL = 3600 - c.Prometheus.NoErrorDomainsCacheSize = 100000 - c.Prometheus.NoErrorDomainsCacheTTL = 3600 - c.Prometheus.ServfailDomainsCacheSize = 10000 - c.Prometheus.ServfailDomainsCacheTTL = 3600 - c.Prometheus.NXDomainsCacheSize = 10000 - c.Prometheus.NXDomainsCacheTTL = 3600 - c.Prometheus.DefaultDomainsCacheSize = 1000 - c.Prometheus.DefaultDomainsCacheTTL = 3600 - - c.RestAPI.Enable = false - c.RestAPI.ListenIP = LocalhostIP - c.RestAPI.ListenPort = 8080 - c.RestAPI.BasicAuthLogin = "admin" - c.RestAPI.BasicAuthPwd = "changeme" - c.RestAPI.TLSSupport = false - c.RestAPI.TLSMinVersion = TLSV12 - c.RestAPI.CertFile = "" - c.RestAPI.KeyFile = "" - c.RestAPI.TopN = 100 - c.RestAPI.ChannelBufferSize = 65535 - - c.TCPClient.Enable = false - c.TCPClient.RemoteAddress = LocalhostIP - c.TCPClient.RemotePort = 9999 - c.TCPClient.SockPath = "" - c.TCPClient.RetryInterval = 10 - c.TCPClient.Transport = netlib.SocketTCP - c.TCPClient.TLSSupport = false - c.TCPClient.TLSInsecure = false - c.TCPClient.TLSMinVersion = TLSV12 - c.TCPClient.CAFile = "" - c.TCPClient.CertFile = "" - c.TCPClient.KeyFile = "" - c.TCPClient.Mode = ModeFlatJSON - c.TCPClient.TextFormat = "" - c.TCPClient.PayloadDelimiter = "\n" - c.TCPClient.BufferSize = 100 - c.TCPClient.ConnectTimeout = 5 - c.TCPClient.FlushInterval = 30 - c.TCPClient.ChannelBufferSize = 65535 - - c.Syslog.Enable = false - c.Syslog.Severity = "INFO" - c.Syslog.Facility = "DAEMON" - c.Syslog.Transport = "local" - c.Syslog.RemoteAddress = "127.0.0.1:514" - c.Syslog.TextFormat = "" - c.Syslog.Mode = ModeText - c.Syslog.RetryInterval = 10 - c.Syslog.TLSInsecure = false - c.Syslog.TLSMinVersion = TLSV12 - c.Syslog.CAFile = "" - c.Syslog.CertFile = "" - c.Syslog.KeyFile = "" - c.Syslog.ChannelBufferSize = 65535 - c.Syslog.Tag = "" - c.Syslog.Framer = "" - c.Syslog.Formatter = "rfc5424" - c.Syslog.Hostname = "" - c.Syslog.AppName = "DNScollector" - c.Syslog.ReplaceNullChar = "�" - c.Syslog.FlushInterval = 30 - c.Syslog.BufferSize = 100 - - c.Fluentd.Enable = false - c.Fluentd.RemoteAddress = LocalhostIP - c.Fluentd.RemotePort = 24224 - c.Fluentd.SockPath = "" // deprecated - c.Fluentd.RetryInterval = 10 - c.Fluentd.ConnectTimeout = 5 - c.Fluentd.FlushInterval = 30 - c.Fluentd.Transport = netlib.SocketTCP - c.Fluentd.TLSSupport = false // deprecated - c.Fluentd.TLSInsecure = false - c.Fluentd.TLSMinVersion = TLSV12 - c.Fluentd.CAFile = "" - c.Fluentd.CertFile = "" - c.Fluentd.KeyFile = "" - c.Fluentd.Tag = "dns.collector" - c.Fluentd.BufferSize = 100 - c.Fluentd.ChannelBufferSize = 65535 - - c.InfluxDB.Enable = false - c.InfluxDB.ServerURL = "http://localhost:8086" - c.InfluxDB.AuthToken = "" - c.InfluxDB.TLSSupport = false - c.InfluxDB.TLSInsecure = false - c.InfluxDB.TLSMinVersion = TLSV12 - c.InfluxDB.CAFile = "" - c.InfluxDB.CertFile = "" - c.InfluxDB.KeyFile = "" - c.InfluxDB.Bucket = "" - c.InfluxDB.Organization = "" - c.InfluxDB.ChannelBufferSize = 65535 - - c.LokiClient.Enable = false - c.LokiClient.ServerURL = "http://localhost:3100/loki/api/v1/push" - c.LokiClient.JobName = ProgName - c.LokiClient.Mode = ModeText - c.LokiClient.FlushInterval = 5 - c.LokiClient.BatchSize = 1024 * 1024 - c.LokiClient.RetryInterval = 10 - c.LokiClient.TextFormat = "" - c.LokiClient.ProxyURL = "" - c.LokiClient.TLSInsecure = false - c.LokiClient.TLSMinVersion = TLSV12 - c.LokiClient.CAFile = "" - c.LokiClient.CertFile = "" - c.LokiClient.KeyFile = "" - c.LokiClient.BasicAuthLogin = "" - c.LokiClient.BasicAuthPwd = "" - c.LokiClient.BasicAuthPwdFile = "" - c.LokiClient.TenantID = "" - c.LokiClient.ChannelBufferSize = 65535 - - c.Statsd.Enable = false - c.Statsd.Prefix = ProgName - c.Statsd.RemoteAddress = LocalhostIP - c.Statsd.RemotePort = 8125 - c.Statsd.Transport = netlib.SocketUDP - c.Statsd.ConnectTimeout = 5 - c.Statsd.FlushInterval = 10 - c.Statsd.TLSSupport = false // deprecated - c.Statsd.TLSInsecure = false - c.Statsd.TLSMinVersion = TLSV12 - c.Statsd.CAFile = "" - c.Statsd.CertFile = "" - c.Statsd.KeyFile = "" - c.Statsd.ChannelBufferSize = 65535 - - c.ElasticSearchClient.Enable = false - c.ElasticSearchClient.Server = "http://127.0.0.1:9200/" - c.ElasticSearchClient.Index = "" - c.ElasticSearchClient.ChannelBufferSize = 65535 - c.ElasticSearchClient.BulkSize = 100 - c.ElasticSearchClient.FlushInterval = 10 - - c.RedisPub.Enable = false - c.RedisPub.RemoteAddress = LocalhostIP - c.RedisPub.RemotePort = 6379 - c.RedisPub.SockPath = "" - c.RedisPub.RetryInterval = 10 - c.RedisPub.Transport = netlib.SocketTCP - c.RedisPub.TLSSupport = false - c.RedisPub.TLSInsecure = false - c.RedisPub.TLSMinVersion = TLSV12 - c.RedisPub.CAFile = "" - c.RedisPub.CertFile = "" - c.RedisPub.KeyFile = "" - c.RedisPub.Mode = ModeFlatJSON - c.RedisPub.TextFormat = "" - c.RedisPub.PayloadDelimiter = "\n" - c.RedisPub.BufferSize = 100 - c.RedisPub.ConnectTimeout = 5 - c.RedisPub.FlushInterval = 30 - c.RedisPub.RedisChannel = "dns_collector" - c.RedisPub.ChannelBufferSize = 65535 - - c.KafkaProducer.Enable = false - c.KafkaProducer.RemoteAddress = LocalhostIP - c.KafkaProducer.RemotePort = 9092 - c.KafkaProducer.RetryInterval = 10 - c.KafkaProducer.TLSSupport = false - c.KafkaProducer.TLSInsecure = false - c.KafkaProducer.TLSMinVersion = TLSV12 - c.KafkaProducer.CAFile = "" - c.KafkaProducer.CertFile = "" - c.KafkaProducer.KeyFile = "" - c.KafkaProducer.SaslSupport = false - c.KafkaProducer.SaslUsername = "" - c.KafkaProducer.SaslPassword = "" - c.KafkaProducer.SaslMechanism = SASLMechanismPlain - c.KafkaProducer.Mode = ModeFlatJSON - c.KafkaProducer.BufferSize = 100 - c.KafkaProducer.ConnectTimeout = 5 - c.KafkaProducer.FlushInterval = 10 - c.KafkaProducer.Topic = "dnscollector" - c.KafkaProducer.Partition = 0 - c.KafkaProducer.ChannelBufferSize = 65535 - c.KafkaProducer.Compression = CompressNone + defaults.Set(c) +} - c.FalcoClient.Enable = false - c.FalcoClient.URL = "http://127.0.0.1:9200" - c.FalcoClient.ChannelBufferSize = 65535 +func (c *ConfigLoggers) IsValid(userCfg map[string]interface{}) error { + return CheckConfigWithTags(reflect.ValueOf(*c), userCfg) } -func (c *ConfigLoggers) GetTags() (ret []string) { +func (c *ConfigLoggers) GetNames() (ret []string) { cl := reflect.TypeOf(*c) for i := 0; i < cl.NumField(); i++ { @@ -558,8 +331,8 @@ func (c *ConfigLoggers) GetTags() (ret []string) { return ret } -func (c *ConfigLoggers) IsValid(name string) bool { - tags := c.GetTags() +func (c *ConfigLoggers) IsExists(name string) bool { + tags := c.GetNames() for i := range tags { if name == tags[i] { return true diff --git a/pkgconfig/multiplexer.go b/pkgconfig/multiplexer.go index f48cc302..8a83b606 100644 --- a/pkgconfig/multiplexer.go +++ b/pkgconfig/multiplexer.go @@ -1,5 +1,9 @@ package pkgconfig +import ( + "github.com/pkg/errors" +) + type ConfigMultiplexer struct { Collectors []MultiplexInOut `yaml:"collectors"` Loggers []MultiplexInOut `yaml:"loggers"` @@ -12,13 +16,84 @@ func (c *ConfigMultiplexer) SetDefault() { c.Routes = []MultiplexRoutes{} } +func (c *ConfigMultiplexer) IsValid(userCfg map[string]interface{}) error { + for k, v := range userCfg { + switch k { + case "collectors": + for i, cv := range v.([]interface{}) { + cfg := MultiplexInOut{IsCollector: true} + if err := cfg.IsValid(cv.(map[string]interface{})); err != nil { + return errors.Errorf("collector(index=%d) - %s", i, err) + } + } + + case "loggers": + for i, cv := range v.([]interface{}) { + cfg := MultiplexInOut{IsCollector: false} + if err := cfg.IsValid(cv.(map[string]interface{})); err != nil { + return errors.Errorf("logger(index=%d) - %s", i, err) + } + } + + case "routes": + for i, cv := range v.([]interface{}) { + cfg := MultiplexRoutes{} + if err := cfg.IsValid(cv.(map[string]interface{})); err != nil { + return errors.Errorf("route(index=%d) - %s", i, err) + } + } + + default: + return errors.Errorf("unknown multiplexer key=%s\n", k) + } + } + return nil +} + type MultiplexInOut struct { - Name string `yaml:"name"` - Transforms map[string]interface{} `yaml:"transforms"` - Params map[string]interface{} `yaml:",inline"` + Name string `yaml:"name"` + Transforms map[string]interface{} `yaml:"transforms"` + Params map[string]interface{} `yaml:",inline"` + IsCollector bool +} + +func (c *MultiplexInOut) IsValid(userCfg map[string]interface{}) error { + if _, ok := userCfg["name"]; !ok { + return errors.Errorf("name key is required") + } + delete(userCfg, "name") + + if _, ok := userCfg["transforms"]; ok { + cfg := ConfigTransformers{} + if err := cfg.IsValid(userCfg["transforms"].(map[string]interface{})); err != nil { + return errors.Errorf("transform - %s", err) + } + delete(userCfg, "transforms") + } + + var err error + if c.IsCollector { + cfg := ConfigCollectors{} + err = cfg.IsValid(userCfg) + } else { + cfg := ConfigLoggers{} + err = cfg.IsValid(userCfg) + } + + return err } type MultiplexRoutes struct { Src []string `yaml:"from,flow"` Dst []string `yaml:"to,flow"` } + +func (c *MultiplexRoutes) IsValid(userCfg map[string]interface{}) error { + if _, ok := userCfg["from"]; !ok { + return errors.Errorf("the key 'from' is required") + } + if _, ok := userCfg["to"]; !ok { + return errors.Errorf("the key 'to' is required") + } + return nil +} diff --git a/pkgconfig/pipelines.go b/pkgconfig/pipelines.go index 126f743a..d8941f57 100644 --- a/pkgconfig/pipelines.go +++ b/pkgconfig/pipelines.go @@ -1,5 +1,11 @@ package pkgconfig +import ( + "fmt" + + "github.com/pkg/errors" +) + type ConfigPipelines struct { Name string `yaml:"name"` Transforms map[string]interface{} `yaml:"transforms"` @@ -7,7 +13,63 @@ type ConfigPipelines struct { RoutingPolicy PipelinesRouting `yaml:"routing-policy"` } +func (c *ConfigPipelines) IsValid(userCfg map[string]interface{}) error { + if _, ok := userCfg["name"]; !ok { + return errors.Errorf("name key is required") + } + delete(userCfg, "name") + + if _, ok := userCfg["transforms"]; ok { + cfg := ConfigTransformers{} + if err := cfg.IsValid(userCfg["transforms"].(map[string]interface{})); err != nil { + return errors.Errorf("transform - %s", err) + } + delete(userCfg, "transforms") + } + + if _, ok := userCfg["routing-policy"]; ok { + cfg := PipelinesRouting{} + if err := cfg.IsValid(userCfg["routing-policy"].(map[string]interface{})); err != nil { + return errors.Errorf("routing-policy - %s", err) + } + delete(userCfg, "routing-policy") + } + + wc := ConfigCollectors{} + wl := ConfigLoggers{} + + for workerName := range userCfg { + collectorExist := wc.IsExists(workerName) + loggerExist := wl.IsExists(workerName) + if !collectorExist && !loggerExist { + return errors.Errorf("invalid worker type - %s", workerName) + } + + if collectorExist { + if err := wc.IsValid(userCfg); err != nil { + return errors.Errorf("%s", err) + } + } + if loggerExist { + if err := wl.IsValid(userCfg); err != nil { + return errors.Errorf("%s", err) + } + } + } + + return nil +} + type PipelinesRouting struct { - Default []string `yaml:"default,flow"` + Forward []string `yaml:"forward,flow"` Dropped []string `yaml:"dropped,flow"` } + +func (c *PipelinesRouting) IsValid(userCfg map[string]interface{}) error { + for k := range userCfg { + if k != "forward" && k != "dropped" { + return fmt.Errorf("invalid key '%s'", k) + } + } + return nil +} diff --git a/pkgconfig/pipelines_test.go b/pkgconfig/pipelines_test.go new file mode 100644 index 00000000..1a0aa820 --- /dev/null +++ b/pkgconfig/pipelines_test.go @@ -0,0 +1,72 @@ +package pkgconfig + +import ( + "testing" +) + +func TestConfigPipelines_IsValid(t *testing.T) { + testCases := []struct { + name string + config map[string]interface{} + expectErr bool + errorMsg string + }{ + { + name: "Valid Config", + config: map[string]interface{}{ + "name": "pipeline1", + "transforms": map[string]interface{}{"normalize": map[string]interface{}{}}, + "routing-policy": map[string]interface{}{"forward": []string{"route1"}, "dropped": []string{"route2"}}, + }, + expectErr: false, + }, + { + name: "Missing Name", + config: map[string]interface{}{ + "transforms": map[string]interface{}{"normalize": map[string]interface{}{}}, + "routing-policy": map[string]interface{}{"forward": []string{"route1"}, "dropped": []string{"route2"}}, + }, + expectErr: true, + errorMsg: "name key is required", + }, + { + name: "Invalid Routing Policy Key", + config: map[string]interface{}{ + "name": "testPipeline", + "routing-policy": map[string]interface{}{"forward": []string{"route1"}, "invalid": []string{"route2"}}, + }, + expectErr: true, + errorMsg: "routing-policy - invalid key 'invalid'", + }, + { + name: "Invalid Transforms", + config: map[string]interface{}{ + "name": "testPipeline", + "transforms": map[string]interface{}{ + "invalidTransform": "invalidValue", + }, + }, + expectErr: true, + errorMsg: "transform - unknown key=`invalidTransform`", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + pipeline := ConfigPipelines{} + err := pipeline.IsValid(tc.config) + + if tc.expectErr { + if err == nil { + t.Errorf("expected error but got nil") + } else if err.Error() != tc.errorMsg { + t.Errorf("expected error message '%s', but got '%s'", tc.errorMsg, err.Error()) + } + } else { + if err != nil { + t.Errorf("expected no error, but got %v", err) + } + } + }) + } +} diff --git a/pkgconfig/tls_client.go b/pkgconfig/tls_client.go deleted file mode 100644 index eeb000ef..00000000 --- a/pkgconfig/tls_client.go +++ /dev/null @@ -1,73 +0,0 @@ -package pkgconfig - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "os" -) - -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} - -func IsValidTLS(mode string) bool { - switch mode { - case - TLSV10, - TLSV11, - TLSV12, - TLSV13: - return true - } - return false -} - -type TLSOptions struct { - CAFile string - CertFile string - KeyFile string - InsecureSkipVerify bool - MinVersion string -} - -func TLSClientConfig(options TLSOptions) (*tls.Config, error) { - - tlsConfig := &tls.Config{ - MinVersion: tls.VersionTLS12, - InsecureSkipVerify: false, - CipherSuites: clientCipherSuites, - } - tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify - - if len(options.CAFile) > 0 { - CAs := x509.NewCertPool() - pemData, err := os.ReadFile(options.CAFile) - if err != nil { - return nil, fmt.Errorf("could not read CA certificate %q: %w", options.CAFile, err) - } - if !CAs.AppendCertsFromPEM(pemData) { - return nil, fmt.Errorf("failed to append certificates from PEM file: %q", options.CAFile) - } - tlsConfig.RootCAs = CAs - } - - if len(options.CertFile) > 0 && len(options.KeyFile) > 0 { - cer, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - return nil, fmt.Errorf("loading certificate failed: %w", err) - } - tlsConfig.Certificates = []tls.Certificate{cer} - } - - if tlsVersion, ok := TLSVersion[options.MinVersion]; ok { - tlsConfig.MinVersion = tlsVersion - } else { - return nil, fmt.Errorf("invalid minimum TLS version: %x", options.MinVersion) - } - - return tlsConfig, nil -} diff --git a/pkgconfig/tls_client_test.go b/pkgconfig/tls_client_test.go deleted file mode 100644 index db3889c4..00000000 --- a/pkgconfig/tls_client_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package pkgconfig - -import ( - "crypto/tls" - "reflect" - "testing" -) - -func TestConfigClientTLSNoVerify(t *testing.T) { - tlsConfig, err := TLSClientConfig(TLSOptions{InsecureSkipVerify: true, MinVersion: TLSV12}) - - if err != nil || tlsConfig == nil { - t.Fatal("Unable to configure client TLS", err) - } - - if !reflect.DeepEqual(tlsConfig.CipherSuites, clientCipherSuites) { - t.Fatal("Unexpected client cipher suites") - } - if tlsConfig.MinVersion != tls.VersionTLS12 { - t.Fatal("Unexpected client TLS version") - } - - if tlsConfig.Certificates != nil { - t.Fatal("Somehow client certificates were set") - } -} diff --git a/pkgconfig/transformers.go b/pkgconfig/transformers.go index 33461919..1d82119b 100644 --- a/pkgconfig/transformers.go +++ b/pkgconfig/transformers.go @@ -1,137 +1,102 @@ package pkgconfig +import ( + "reflect" + + "github.com/creasty/defaults" +) + +type RelabelingConfig struct { + Regex string `yaml:"regex"` + Replacement string `yaml:"replacement"` +} + type ConfigTransformers struct { UserPrivacy struct { - Enable bool `yaml:"enable"` - AnonymizeIP bool `yaml:"anonymize-ip"` - AnonymizeIPV4Bits string `yaml:"anonymize-v4bits"` - AnonymizeIPV6Bits string `yaml:"anonymize-v6bits"` - MinimazeQname bool `yaml:"minimaze-qname"` - HashIP bool `yaml:"hash-ip"` - HashIPAlgo string `yaml:"hash-ip-algo"` + Enable bool `yaml:"enable" default:"false"` + AnonymizeIP bool `yaml:"anonymize-ip" default:"false"` + AnonymizeIPV4Bits string `yaml:"anonymize-v4bits" default:"0.0.0.0/16"` + AnonymizeIPV6Bits string `yaml:"anonymize-v6bits" default:"::/64"` + MinimazeQname bool `yaml:"minimaze-qname" default:"false"` + HashQueryIP bool `yaml:"hash-query-ip" default:"false"` + HashReplyIP bool `yaml:"hash-reply-ip" default:"false"` + HashIPAlgo string `yaml:"hash-ip-algo" default:"sha1"` } `yaml:"user-privacy"` Normalize struct { - Enable bool `yaml:"enable"` - QnameLowerCase bool `yaml:"qname-lowercase"` - QuietText bool `yaml:"quiet-text"` - AddTld bool `yaml:"add-tld"` - AddTldPlusOne bool `yaml:"add-tld-plus-one"` + Enable bool `yaml:"enable" default:"false"` + QnameLowerCase bool `yaml:"qname-lowercase" default:"false"` + RRLowerCase bool `yaml:"rr-lowercase" default:"false"` + QuietText bool `yaml:"quiet-text" default:"false"` + AddTld bool `yaml:"add-tld" default:"false"` + AddTldPlusOne bool `yaml:"add-tld-plus-one" default:"false"` } `yaml:"normalize"` Latency struct { - Enable bool `yaml:"enable"` - MeasureLatency bool `yaml:"measure-latency"` - UnansweredQueries bool `yaml:"unanswered-queries"` - QueriesTimeout int `yaml:"queries-timeout"` + Enable bool `yaml:"enable" default:"false"` + MeasureLatency bool `yaml:"measure-latency" default:"false"` + UnansweredQueries bool `yaml:"unanswered-queries" default:"false"` + QueriesTimeout int `yaml:"queries-timeout" default:"2"` } `yaml:"latency"` Reducer struct { - Enable bool `yaml:"enable"` - RepetitiveTrafficDetector bool `yaml:"repetitive-traffic-detector"` - QnamePlusOne bool `yaml:"qname-plus-one"` - WatchInterval int `yaml:"watch-interval"` + Enable bool `yaml:"enable" default:"false"` + RepetitiveTrafficDetector bool `yaml:"repetitive-traffic-detector" default:"false"` + QnamePlusOne bool `yaml:"qname-plus-one" default:"false"` + WatchInterval int `yaml:"watch-interval" default:"5"` } `yaml:"reducer"` Filtering struct { - Enable bool `yaml:"enable"` - DropFqdnFile string `yaml:"drop-fqdn-file"` - DropDomainFile string `yaml:"drop-domain-file"` - KeepFqdnFile string `yaml:"keep-fqdn-file"` - KeepDomainFile string `yaml:"keep-domain-file"` - DropQueryIPFile string `yaml:"drop-queryip-file"` - KeepQueryIPFile string `yaml:"keep-queryip-file"` - KeepRdataFile string `yaml:"keep-rdata-file"` - DropRcodes []string `yaml:"drop-rcodes,flow"` - LogQueries bool `yaml:"log-queries"` - LogReplies bool `yaml:"log-replies"` - Downsample int `yaml:"downsample"` + Enable bool `yaml:"enable" default:"false"` + DropFqdnFile string `yaml:"drop-fqdn-file" default:""` + DropDomainFile string `yaml:"drop-domain-file" default:""` + KeepFqdnFile string `yaml:"keep-fqdn-file" default:""` + KeepDomainFile string `yaml:"keep-domain-file" default:""` + DropQueryIPFile string `yaml:"drop-queryip-file" default:""` + KeepQueryIPFile string `yaml:"keep-queryip-file" default:""` + KeepRdataFile string `yaml:"keep-rdata-file" default:""` + DropRcodes []string `yaml:"drop-rcodes,flow" default:"[]"` + LogQueries bool `yaml:"log-queries" default:"true"` + LogReplies bool `yaml:"log-replies" default:"true"` + Downsample int `yaml:"downsample" default:"0"` } `yaml:"filtering"` GeoIP struct { - Enable bool `yaml:"enable"` - DBCountryFile string `yaml:"mmdb-country-file"` - DBCityFile string `yaml:"mmdb-city-file"` - DBASNFile string `yaml:"mmdb-asn-file"` + Enable bool `yaml:"enable" default:"false"` + DBCountryFile string `yaml:"mmdb-country-file" default:""` + DBCityFile string `yaml:"mmdb-city-file" default:""` + DBASNFile string `yaml:"mmdb-asn-file" default:""` } `yaml:"geoip"` Suspicious struct { - Enable bool `yaml:"enable"` - ThresholdQnameLen int `yaml:"threshold-qname-len"` - ThresholdPacketLen int `yaml:"threshold-packet-len"` - ThresholdSlow float64 `yaml:"threshold-slow"` - CommonQtypes []string `yaml:"common-qtypes,flow"` - UnallowedChars []string `yaml:"unallowed-chars,flow"` - ThresholdMaxLabels int `yaml:"threshold-max-labels"` - WhitelistDomains []string `yaml:"whitelist-domains,flow"` + Enable bool `yaml:"enable" default:"false"` + ThresholdQnameLen int `yaml:"threshold-qname-len" default:"100"` + ThresholdPacketLen int `yaml:"threshold-packet-len" default:"1000"` + ThresholdSlow float64 `yaml:"threshold-slow" default:"1.0"` + CommonQtypes []string `yaml:"common-qtypes,flow" default:"[\"A\", \"AAAA\", \"TXT\", \"CNAME\", \"PTR\", \"NAPTR\", \"DNSKEY\", \"SRV\", \"SOA\", \"NS\", \"MX\", \"DS\", \"HTTPS\"]"` + UnallowedChars []string `yaml:"unallowed-chars,flow" default:"[\"\\\"\", \"==\", \"/\", \":\"]"` + ThresholdMaxLabels int `yaml:"threshold-max-labels" default:"10"` + WhitelistDomains []string `yaml:"whitelist-domains,flow" default:"[\"\\\\.ip6\\\\.arpa\"]"` } `yaml:"suspicious"` Extract struct { - Enable bool `yaml:"enable"` - AddPayload bool `yaml:"add-payload"` + Enable bool `yaml:"enable" default:"false"` + AddPayload bool `yaml:"add-payload" default:"false"` } `yaml:"extract"` MachineLearning struct { - Enable bool `yaml:"enable"` - AddFeatures bool `yaml:"add-features"` + Enable bool `yaml:"enable" default:"false"` + AddFeatures bool `yaml:"add-features" default:"false"` } `yaml:"machine-learning"` ATags struct { - Enable bool `yaml:"enable"` - Tags []string `yaml:"tags,flow"` + Enable bool `yaml:"enable" default:"false"` + AddTags []string `yaml:"add-tags,flow" default:"[]"` } `yaml:"atags"` + Relabeling struct { + Enable bool `yaml:"enable" default:"false"` + Rename []RelabelingConfig `yaml:"rename,flow"` + Remove []RelabelingConfig `yaml:"remove,flow"` + } `yaml:"relabeling"` } func (c *ConfigTransformers) SetDefault() { - c.Suspicious.Enable = false - c.Suspicious.ThresholdQnameLen = 100 - c.Suspicious.ThresholdPacketLen = 1000 - c.Suspicious.ThresholdSlow = 1.0 - c.Suspicious.CommonQtypes = []string{"A", "AAAA", "TXT", "CNAME", "PTR", - "NAPTR", "DNSKEY", "SRV", "SOA", "NS", "MX", "DS", "HTTPS"} - c.Suspicious.UnallowedChars = []string{"\"", "==", "/", ":"} - c.Suspicious.ThresholdMaxLabels = 10 - c.Suspicious.WhitelistDomains = []string{"\\.ip6\\.arpa"} - - c.UserPrivacy.Enable = false - c.UserPrivacy.AnonymizeIP = false - c.UserPrivacy.AnonymizeIPV4Bits = "0.0.0.0/16" - c.UserPrivacy.AnonymizeIPV6Bits = "::/64" - c.UserPrivacy.MinimazeQname = false - c.UserPrivacy.HashIP = false - c.UserPrivacy.HashIPAlgo = "sha1" - - c.Normalize.Enable = false - c.Normalize.QnameLowerCase = false - c.Normalize.QuietText = false - c.Normalize.AddTld = false - c.Normalize.AddTldPlusOne = false - - c.Latency.Enable = false - c.Latency.MeasureLatency = false - c.Latency.UnansweredQueries = false - c.Latency.QueriesTimeout = 2 - - c.Reducer.Enable = false - c.Reducer.RepetitiveTrafficDetector = false - c.Reducer.QnamePlusOne = false - c.Reducer.WatchInterval = 5 - - c.Filtering.Enable = false - c.Filtering.DropFqdnFile = "" - c.Filtering.DropDomainFile = "" - c.Filtering.KeepFqdnFile = "" - c.Filtering.KeepDomainFile = "" - c.Filtering.DropQueryIPFile = "" - c.Filtering.DropRcodes = []string{} - c.Filtering.LogQueries = true - c.Filtering.LogReplies = true - c.Filtering.Downsample = 0 - - c.GeoIP.Enable = false - c.GeoIP.DBCountryFile = "" - c.GeoIP.DBCityFile = "" - c.GeoIP.DBASNFile = "" - - c.Extract.Enable = false - c.Extract.AddPayload = false - - c.MachineLearning.Enable = false - c.MachineLearning.AddFeatures = false + defaults.Set(c) +} - c.ATags.Enable = false - c.ATags.Tags = []string{} +func (c *ConfigTransformers) IsValid(userCfg map[string]interface{}) error { + return CheckConfigWithTags(reflect.ValueOf(*c), userCfg) } func GetFakeConfigTransformers() *ConfigTransformers { diff --git a/pkglinker/multiplexer.go b/pkginit/multiplexer.go similarity index 71% rename from pkglinker/multiplexer.go rename to pkginit/multiplexer.go index cc2df354..5ab0402b 100644 --- a/pkglinker/multiplexer.go +++ b/pkginit/multiplexer.go @@ -1,13 +1,11 @@ -package pkglinker +package pkginit import ( "fmt" "strings" - "github.com/dmachard/go-dnscollector/collectors" - "github.com/dmachard/go-dnscollector/loggers" "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" + "github.com/dmachard/go-dnscollector/workers" "github.com/dmachard/go-logger" "gopkg.in/yaml.v2" ) @@ -69,6 +67,10 @@ func GetItemConfig(section string, config *pkgconfig.Config, item pkgconfig.Mult // add transformer for k, v := range item.Transforms { + if _, ok := v.(map[string]interface{}); !ok { + panic("main - yaml transform config error - map expected") + } + v.(map[string]interface{})["enable"] = true cfg[section+Transformers].(map[string]interface{})[k] = v } @@ -83,7 +85,7 @@ func GetItemConfig(section string, config *pkgconfig.Config, item pkgconfig.Mult return subcfg } -func InitMultiplexer(mapLoggers map[string]pkgutils.Worker, mapCollectors map[string]pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger) { +func InitMultiplexer(mapLoggers map[string]workers.Worker, mapCollectors map[string]workers.Worker, config *pkgconfig.Config, logger *logger.Logger) { // checking all routes before to continue if err := AreRoutesValid(config); err != nil { @@ -96,53 +98,59 @@ func InitMultiplexer(mapLoggers map[string]pkgutils.Worker, mapCollectors map[st subcfg := GetItemConfig("loggers", config, output) // registor the logger if enabled + if subcfg.Loggers.DevNull.Enable && IsLoggerRouted(config, output.Name) { + mapLoggers[output.Name] = workers.NewDevNull(subcfg, logger, output.Name) + } if subcfg.Loggers.RestAPI.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewRestAPI(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewRestAPI(subcfg, logger, output.Name) } if subcfg.Loggers.Prometheus.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewPrometheus(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewPrometheus(subcfg, logger, output.Name) } if subcfg.Loggers.Stdout.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewStdOut(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewStdOut(subcfg, logger, output.Name) } if subcfg.Loggers.LogFile.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewLogFile(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewLogFile(subcfg, logger, output.Name) } if subcfg.Loggers.DNSTap.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewDnstapSender(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewDnstapSender(subcfg, logger, output.Name) } if subcfg.Loggers.TCPClient.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewTCPClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewTCPClient(subcfg, logger, output.Name) } if subcfg.Loggers.Syslog.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewSyslog(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewSyslog(subcfg, logger, output.Name) } if subcfg.Loggers.Fluentd.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewFluentdClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewFluentdClient(subcfg, logger, output.Name) } if subcfg.Loggers.InfluxDB.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewInfluxDBClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewInfluxDBClient(subcfg, logger, output.Name) } if subcfg.Loggers.LokiClient.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewLokiClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewLokiClient(subcfg, logger, output.Name) } if subcfg.Loggers.Statsd.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewStatsdClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewStatsdClient(subcfg, logger, output.Name) } if subcfg.Loggers.ElasticSearchClient.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewElasticSearchClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewElasticSearchClient(subcfg, logger, output.Name) } if subcfg.Loggers.ScalyrClient.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewScalyrClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewScalyrClient(subcfg, logger, output.Name) } if subcfg.Loggers.RedisPub.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewRedisPub(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewRedisPub(subcfg, logger, output.Name) } if subcfg.Loggers.KafkaProducer.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewKafkaProducer(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewKafkaProducer(subcfg, logger, output.Name) } if subcfg.Loggers.FalcoClient.Enable && IsLoggerRouted(config, output.Name) { - mapLoggers[output.Name] = loggers.NewFalcoClient(subcfg, logger, output.Name) + mapLoggers[output.Name] = workers.NewFalcoClient(subcfg, logger, output.Name) + } + if subcfg.Loggers.ClickhouseClient.Enable && IsLoggerRouted(config, output.Name) { + mapLoggers[output.Name] = workers.NewClickhouseClient(subcfg, logger, output.Name) } } @@ -154,35 +162,35 @@ func InitMultiplexer(mapLoggers map[string]pkgutils.Worker, mapCollectors map[st // register the collector if enabled if subcfg.Collectors.Dnstap.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewDnstap(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewDnstapServer(nil, subcfg, logger, input.Name) } if subcfg.Collectors.DnstapProxifier.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewDnstapProxifier(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewDnstapProxifier(nil, subcfg, logger, input.Name) } if subcfg.Collectors.AfpacketLiveCapture.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewAfpacketSniffer(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewAfpacketSniffer(nil, subcfg, logger, input.Name) } if subcfg.Collectors.XdpLiveCapture.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewXDPSniffer(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewXDPSniffer(nil, subcfg, logger, input.Name) } if subcfg.Collectors.Tail.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewTail(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewTail(nil, subcfg, logger, input.Name) } if subcfg.Collectors.PowerDNS.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewProtobufPowerDNS(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewPdnsServer(nil, subcfg, logger, input.Name) } if subcfg.Collectors.FileIngestor.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewFileIngestor(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewFileIngestor(nil, subcfg, logger, input.Name) } if subcfg.Collectors.Tzsp.Enable && IsCollectorRouted(config, input.Name) { - mapCollectors[input.Name] = collectors.NewTZSP(nil, subcfg, logger, input.Name) + mapCollectors[input.Name] = workers.NewTZSP(nil, subcfg, logger, input.Name) } } // here the multiplexer logic // connect collectors between loggers for _, route := range config.Multiplexer.Routes { - var logwrks []pkgutils.Worker + var logwrks []workers.Worker for _, dst := range route.Dst { if _, ok := mapLoggers[dst]; ok { logwrks = append(logwrks, mapLoggers[dst]) @@ -203,7 +211,7 @@ func InitMultiplexer(mapLoggers map[string]pkgutils.Worker, mapCollectors map[st } } -func ReloadMultiplexer(mapLoggers map[string]pkgutils.Worker, mapCollectors map[string]pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger) { +func ReloadMultiplexer(mapLoggers map[string]workers.Worker, mapCollectors map[string]workers.Worker, config *pkgconfig.Config, logger *logger.Logger) { for _, output := range config.Multiplexer.Loggers { newcfg := GetItemConfig("loggers", config, output) if _, ok := mapLoggers[output.Name]; ok { diff --git a/pkglinker/multiplexer_test.go b/pkginit/multiplexer_test.go similarity index 91% rename from pkglinker/multiplexer_test.go rename to pkginit/multiplexer_test.go index bf62490d..0f59be5f 100644 --- a/pkglinker/multiplexer_test.go +++ b/pkginit/multiplexer_test.go @@ -1,4 +1,4 @@ -package pkglinker +package pkginit import ( "testing" @@ -22,7 +22,7 @@ func TestMuxIsDisabled(t *testing.T) { } func TestMuxIsLoggerRouted(t *testing.T) { - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() config.Multiplexer.Routes = append(config.Multiplexer.Routes, pkgconfig.MultiplexRoutes{Dst: []string{"logger1"}}) if !IsLoggerRouted(config, "logger1") { @@ -34,7 +34,7 @@ func TestMuxIsLoggerRouted(t *testing.T) { } func TestMuxIsCollectorRouted(t *testing.T) { - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() config.Multiplexer.Routes = append(config.Multiplexer.Routes, pkgconfig.MultiplexRoutes{Src: []string{"collector1"}}) if !IsCollectorRouted(config, "collector1") { @@ -46,7 +46,7 @@ func TestMuxIsCollectorRouted(t *testing.T) { } func TestMuxRouteIsInvalid(t *testing.T) { - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() config.Multiplexer.Routes = append(config.Multiplexer.Routes, pkgconfig.MultiplexRoutes{Src: []string{"collector1"}}) err := AreRoutesValid(config) diff --git a/pkginit/pipelines.go b/pkginit/pipelines.go new file mode 100644 index 00000000..50e97e17 --- /dev/null +++ b/pkginit/pipelines.go @@ -0,0 +1,304 @@ +package pkginit + +import ( + "fmt" + + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/telemetry" + "github.com/dmachard/go-dnscollector/workers" + "github.com/dmachard/go-logger" + "github.com/pkg/errors" + "gopkg.in/yaml.v2" +) + +func IsPipelinesEnabled(config *pkgconfig.Config) bool { + return len(config.Pipelines) > 0 +} + +func GetStanzaConfig(config *pkgconfig.Config, item pkgconfig.ConfigPipelines) *pkgconfig.Config { + + cfg := make(map[string]interface{}) + section := "collectors" + + // Enable the provided collector or loggers + for k, p := range item.Params { + // is a logger or collector ? + if !config.Loggers.IsExists(k) && !config.Collectors.IsExists(k) { + panic(fmt.Sprintln("main - get stanza config error")) + } + if config.Loggers.IsExists(k) { + section = "loggers" + } + if p == nil { + item.Params[k] = make(map[string]interface{}) + } + item.Params[k].(map[string]interface{})["enable"] = true + + // ignore other keys + break + } + + // prepare a new config + subcfg := &pkgconfig.Config{} + subcfg.SetDefault() + + cfg[section] = item.Params + cfg[section+"-transformers"] = make(map[string]interface{}) + + // add transformers + for k, v := range item.Transforms { + v.(map[string]interface{})["enable"] = true + cfg[section+"-transformers"].(map[string]interface{})[k] = v + } + + // copy global config + subcfg.Global = config.Global + + yamlcfg, _ := yaml.Marshal(cfg) + if err := yaml.Unmarshal(yamlcfg, subcfg); err != nil { + panic(fmt.Sprintf("main - yaml logger config error: %v", err)) + } + + return subcfg +} + +func StanzaNameIsUniq(name string, config *pkgconfig.Config) (ret error) { + stanzaCounter := 0 + for _, stanza := range config.Pipelines { + if name == stanza.Name { + stanzaCounter += 1 + } + } + + if stanzaCounter > 1 { + return fmt.Errorf("stanza=%s allready exists", name) + } + return nil +} + +func IsRouteExist(target string, config *pkgconfig.Config) (ret error) { + for _, stanza := range config.Pipelines { + if target == stanza.Name { + return nil + } + } + return fmt.Errorf("route=%s doest not exist", target) +} + +func CreateRouting(stanza pkgconfig.ConfigPipelines, mapCollectors map[string]workers.Worker, mapLoggers map[string]workers.Worker, logger *logger.Logger) error { + var currentStanza workers.Worker + if collector, ok := mapCollectors[stanza.Name]; ok { + currentStanza = collector + } + if logger, ok := mapLoggers[stanza.Name]; ok { + currentStanza = logger + } + + // forward routing + for _, route := range stanza.RoutingPolicy.Forward { + if route == stanza.Name { + return fmt.Errorf("main - routing error loop with stanza=%s to stanza=%s", stanza.Name, route) + } + if _, ok := mapCollectors[route]; ok { + currentStanza.AddDefaultRoute(mapCollectors[route]) + logger.Info("main - routing (policy=forward) stanza=[%s] to stanza=[%s]", stanza.Name, route) + } else if _, ok := mapLoggers[route]; ok { + currentStanza.AddDefaultRoute(mapLoggers[route]) + logger.Info("main - routing (policy=forward) stanza=[%s] to stanza=[%s]", stanza.Name, route) + } else { + return fmt.Errorf("main - forward routing error from stanza=%s to stanza=%s doest not exist", stanza.Name, route) + } + } + + // dropped routing + for _, route := range stanza.RoutingPolicy.Dropped { + if _, ok := mapCollectors[route]; ok { + currentStanza.AddDroppedRoute(mapCollectors[route]) + logger.Info("main - routing (policy=dropped) stanza=[%s] to stanza=[%s]", stanza.Name, route) + } else if _, ok := mapLoggers[route]; ok { + currentStanza.AddDroppedRoute(mapLoggers[route]) + logger.Info("main - routing (policy=dropped) stanza=[%s] to stanza=[%s]", stanza.Name, route) + } else { + return fmt.Errorf("main - routing error with dropped messages from stanza=%s to stanza=%s doest not exist", stanza.Name, route) + } + } + return nil +} + +func CreateStanza(stanzaName string, config *pkgconfig.Config, mapCollectors map[string]workers.Worker, mapLoggers map[string]workers.Worker, logger *logger.Logger, metrics *telemetry.PrometheusCollector) { + // register the logger if enabled + if config.Loggers.RestAPI.Enable { + mapLoggers[stanzaName] = workers.NewRestAPI(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.Prometheus.Enable { + mapLoggers[stanzaName] = workers.NewPrometheus(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.Stdout.Enable { + mapLoggers[stanzaName] = workers.NewStdOut(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.LogFile.Enable { + mapLoggers[stanzaName] = workers.NewLogFile(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.DNSTap.Enable { + mapLoggers[stanzaName] = workers.NewDnstapSender(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.TCPClient.Enable { + mapLoggers[stanzaName] = workers.NewTCPClient(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.Syslog.Enable { + mapLoggers[stanzaName] = workers.NewSyslog(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.Fluentd.Enable { + mapLoggers[stanzaName] = workers.NewFluentdClient(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.InfluxDB.Enable { + mapLoggers[stanzaName] = workers.NewInfluxDBClient(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.LokiClient.Enable { + mapLoggers[stanzaName] = workers.NewLokiClient(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.Statsd.Enable { + mapLoggers[stanzaName] = workers.NewStatsdClient(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.ElasticSearchClient.Enable { + mapLoggers[stanzaName] = workers.NewElasticSearchClient(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.ScalyrClient.Enable { + mapLoggers[stanzaName] = workers.NewScalyrClient(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.RedisPub.Enable { + mapLoggers[stanzaName] = workers.NewRedisPub(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.KafkaProducer.Enable { + mapLoggers[stanzaName] = workers.NewKafkaProducer(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.FalcoClient.Enable { + mapLoggers[stanzaName] = workers.NewFalcoClient(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.ClickhouseClient.Enable { + mapLoggers[stanzaName] = workers.NewClickhouseClient(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + if config.Loggers.DevNull.Enable { + mapLoggers[stanzaName] = workers.NewDevNull(config, logger, stanzaName) + mapLoggers[stanzaName].SetMetrics(metrics) + } + + // register the collector if enabled + if config.Collectors.DNSMessage.Enable { + mapCollectors[stanzaName] = workers.NewDNSMessage(nil, config, logger, stanzaName) + mapCollectors[stanzaName].SetMetrics(metrics) + } + if config.Collectors.Dnstap.Enable { + mapCollectors[stanzaName] = workers.NewDnstapServer(nil, config, logger, stanzaName) + mapCollectors[stanzaName].SetMetrics(metrics) + } + if config.Collectors.DnstapProxifier.Enable { + mapCollectors[stanzaName] = workers.NewDnstapProxifier(nil, config, logger, stanzaName) + mapCollectors[stanzaName].SetMetrics(metrics) + } + if config.Collectors.AfpacketLiveCapture.Enable { + mapCollectors[stanzaName] = workers.NewAfpacketSniffer(nil, config, logger, stanzaName) + mapCollectors[stanzaName].SetMetrics(metrics) + } + if config.Collectors.XdpLiveCapture.Enable { + mapCollectors[stanzaName] = workers.NewXDPSniffer(nil, config, logger, stanzaName) + mapCollectors[stanzaName].SetMetrics(metrics) + } + if config.Collectors.Tail.Enable { + mapCollectors[stanzaName] = workers.NewTail(nil, config, logger, stanzaName) + mapCollectors[stanzaName].SetMetrics(metrics) + } + if config.Collectors.PowerDNS.Enable { + mapCollectors[stanzaName] = workers.NewPdnsServer(nil, config, logger, stanzaName) + mapCollectors[stanzaName].SetMetrics(metrics) + } + if config.Collectors.FileIngestor.Enable { + mapCollectors[stanzaName] = workers.NewFileIngestor(nil, config, logger, stanzaName) + mapCollectors[stanzaName].SetMetrics(metrics) + } + if config.Collectors.Tzsp.Enable { + mapCollectors[stanzaName] = workers.NewTZSP(nil, config, logger, stanzaName) + mapCollectors[stanzaName].SetMetrics(metrics) + } +} + +func InitPipelines(mapLoggers map[string]workers.Worker, mapCollectors map[string]workers.Worker, config *pkgconfig.Config, logger *logger.Logger, telemetry *telemetry.PrometheusCollector) error { + // check if the name of each stanza is uniq + routesDefined := false + for _, stanza := range config.Pipelines { + if err := StanzaNameIsUniq(stanza.Name, config); err != nil { + return errors.Errorf("stanza with name=[%s] is duplicated", stanza.Name) + } + if len(stanza.RoutingPolicy.Forward) > 0 || len(stanza.RoutingPolicy.Dropped) > 0 { + routesDefined = true + } + } + + if !routesDefined { + return errors.Errorf("no routes are defined") + } + + // check if all routes exists before continue + for _, stanza := range config.Pipelines { + for _, route := range stanza.RoutingPolicy.Forward { + if err := IsRouteExist(route, config); err != nil { + return errors.Errorf("stanza=[%s] forward route=[%s] doest not exist", stanza.Name, route) + } + } + for _, route := range stanza.RoutingPolicy.Dropped { + if err := IsRouteExist(route, config); err != nil { + return errors.Errorf("stanza=[%s] dropped route=[%s] doest not exist", stanza.Name, route) + } + } + } + + // read each stanza and init + for _, stanza := range config.Pipelines { + stanzaConfig := GetStanzaConfig(config, stanza) + CreateStanza(stanza.Name, stanzaConfig, mapCollectors, mapLoggers, logger, telemetry) + + } + + // create routing + for _, stanza := range config.Pipelines { + if mapCollectors[stanza.Name] != nil || mapLoggers[stanza.Name] != nil { + if err := CreateRouting(stanza, mapCollectors, mapLoggers, logger); err != nil { + return errors.Errorf(err.Error()) + } + } else { + return errors.Errorf("routing - stanza=[%v] doest not exist", stanza.Name) + } + } + + return nil +} + +func ReloadPipelines(mapLoggers map[string]workers.Worker, mapCollectors map[string]workers.Worker, config *pkgconfig.Config, logger *logger.Logger) { + for _, stanza := range config.Pipelines { + newCfg := GetStanzaConfig(config, stanza) + if _, ok := mapLoggers[stanza.Name]; ok { + mapLoggers[stanza.Name].ReloadConfig(newCfg) + } else if _, ok := mapCollectors[stanza.Name]; ok { + mapCollectors[stanza.Name].ReloadConfig(newCfg) + } else { + logger.Info("main - reload config stanza=%v doest not exist", stanza.Name) + } + } +} diff --git a/pkginit/pipelines_test.go b/pkginit/pipelines_test.go new file mode 100644 index 00000000..36c8d975 --- /dev/null +++ b/pkginit/pipelines_test.go @@ -0,0 +1,113 @@ +package pkginit + +import ( + "strings" + "testing" + + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/telemetry" + "github.com/dmachard/go-dnscollector/workers" + "github.com/dmachard/go-logger" +) + +func TestPipelines_IsEnabled(t *testing.T) { + // Create a mock configuration for testing + config := &pkgconfig.Config{} + config.Pipelines = []pkgconfig.ConfigPipelines{{Name: "validroute"}} + + if !IsPipelinesEnabled(config) { + t.Errorf("pipelines should be enabled!") + } +} + +func TestPipelines_IsRouteExist(t *testing.T) { + // Create a mock configuration for testing + config := &pkgconfig.Config{} + config.Pipelines = []pkgconfig.ConfigPipelines{ + {Name: "validroute"}, + } + + // Case where the route exists + existingRoute := "validroute" + err := IsRouteExist(existingRoute, config) + if err != nil { + t.Errorf("For the existing route %s, an unexpected error was returned: %v", existingRoute, err) + } + + // Case where the route does not exist + nonExistingRoute := "non-existent-route" + err = IsRouteExist(nonExistingRoute, config) + if err == nil { + t.Errorf("For the non-existing route %s, an expected error was not returned. Received error: %v", nonExistingRoute, err) + } +} + +func TestPipelines_StanzaNameIsUniq(t *testing.T) { + // Create a mock configuration for testing + config := &pkgconfig.Config{} + config.Pipelines = []pkgconfig.ConfigPipelines{ + {Name: "unique-stanza"}, + {Name: "duplicate-stanza"}, + {Name: "duplicate-stanza"}, + } + + // Case where the stanza name is unique + uniqueStanzaName := "unique-stanza" + err := StanzaNameIsUniq(uniqueStanzaName, config) + if err != nil { + t.Errorf("For the unique stanza name %s, an unexpected error was returned: %v", uniqueStanzaName, err) + } + + // Case where the stanza name is not unique + duplicateStanzaName := "duplicate-stanza" + err = StanzaNameIsUniq(duplicateStanzaName, config) + if err == nil { + t.Errorf("For the duplicate stanza name %s, an expected error was not returned. Received error: %v", duplicateStanzaName, err) + } +} + +func TestPipelines_NoRoutesDefined(t *testing.T) { + // Create a mock configuration for testing + config := &pkgconfig.Config{} + config.Pipelines = []pkgconfig.ConfigPipelines{ + {Name: "stanzaA", RoutingPolicy: pkgconfig.PipelinesRouting{Forward: []string{}, Dropped: []string{}}}, + {Name: "stanzaB", RoutingPolicy: pkgconfig.PipelinesRouting{Forward: []string{}, Dropped: []string{}}}, + } + + mapLoggers := make(map[string]workers.Worker) + mapCollectors := make(map[string]workers.Worker) + + metrics := telemetry.NewPrometheusCollector(config) + err := InitPipelines(mapLoggers, mapCollectors, config, logger.New(false), metrics) + if err == nil { + t.Errorf("Want err, got nil") + } else if err.Error() != "no routes are defined" { + t.Errorf("Unexpected error: %s", err.Error()) + } +} + +func TestPipelines_RoutingLoop(t *testing.T) { + // Create a mock configuration for testing + config := &pkgconfig.Config{} + config.Pipelines = []pkgconfig.ConfigPipelines{ + { + Name: "stanzaA", + Params: map[string]interface{}{ + "dnstap": map[string]interface{}{"enable": true}, + }, + RoutingPolicy: pkgconfig.PipelinesRouting{Forward: []string{"stanzaA"}, Dropped: []string{}}, + }, + } + + mapLoggers := make(map[string]workers.Worker) + mapCollectors := make(map[string]workers.Worker) + + metrics := telemetry.NewPrometheusCollector(config) + err := InitPipelines(mapLoggers, mapCollectors, config, logger.New(false), metrics) + if err == nil { + t.Errorf("Want err, got nil") + } else if !strings.Contains(err.Error(), "routing error loop") { + t.Errorf("Unexpected error: %s", err.Error()) + } + +} diff --git a/pkglinker/pipelines.go b/pkglinker/pipelines.go deleted file mode 100644 index 6f9747af..00000000 --- a/pkglinker/pipelines.go +++ /dev/null @@ -1,245 +0,0 @@ -package pkglinker - -import ( - "fmt" - - "github.com/dmachard/go-dnscollector/collectors" - "github.com/dmachard/go-dnscollector/loggers" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-logger" - "github.com/pkg/errors" - "gopkg.in/yaml.v2" -) - -func GetStanzaConfig(config *pkgconfig.Config, item pkgconfig.ConfigPipelines) *pkgconfig.Config { - - cfg := make(map[string]interface{}) - section := "collectors" - - // Enable the provided collector or loggers - for k, p := range item.Params { - // is a logger or collector ? - if !config.Loggers.IsValid(k) && !config.Collectors.IsValid(k) { - panic(fmt.Sprintln("main - get stanza config error")) - } - if config.Loggers.IsValid(k) { - section = "loggers" - } - if p == nil { - item.Params[k] = make(map[string]interface{}) - } - item.Params[k].(map[string]interface{})["enable"] = true - - // ignore other keys - break - } - - // prepare a new config - subcfg := &pkgconfig.Config{} - subcfg.SetDefault() - - cfg[section] = item.Params - cfg[section+"-transformers"] = make(map[string]interface{}) - - // add transformers - for k, v := range item.Transforms { - v.(map[string]interface{})["enable"] = true - cfg[section+"-transformers"].(map[string]interface{})[k] = v - } - - // copy global config - subcfg.Global = config.Global - - yamlcfg, _ := yaml.Marshal(cfg) - if err := yaml.Unmarshal(yamlcfg, subcfg); err != nil { - panic(fmt.Sprintf("main - yaml logger config error: %v", err)) - } - - return subcfg -} - -func StanzaNameIsUniq(name string, config *pkgconfig.Config) (ret error) { - stanzaCounter := 0 - for _, stanza := range config.Pipelines { - if name == stanza.Name { - stanzaCounter += 1 - } - } - - if stanzaCounter > 1 { - return fmt.Errorf("stanza=%s allready exists", name) - } - return nil -} - -func IsRouteExist(target string, config *pkgconfig.Config) (ret error) { - for _, stanza := range config.Pipelines { - if target == stanza.Name { - return nil - } - } - return fmt.Errorf("route=%s doest not exist", target) -} - -func CreateRouting(stanza pkgconfig.ConfigPipelines, mapCollectors map[string]pkgutils.Worker, mapLoggers map[string]pkgutils.Worker, logger *logger.Logger) { - var currentStanza pkgutils.Worker - if collector, ok := mapCollectors[stanza.Name]; ok { - currentStanza = collector - } - if logger, ok := mapLoggers[stanza.Name]; ok { - currentStanza = logger - } - - // TODO raise error when no routes are defined - - // default routing - for _, route := range stanza.RoutingPolicy.Default { - if _, ok := mapCollectors[route]; ok { - currentStanza.AddDefaultRoute(mapCollectors[route]) - logger.Info("main - routing (policy=default) stanza=[%s] to stanza=[%s]", stanza.Name, route) - } else if _, ok := mapLoggers[route]; ok { - currentStanza.AddDefaultRoute(mapLoggers[route]) - logger.Info("main - routing (policy=default) stanza=[%s] to stanza=[%s]", stanza.Name, route) - } else { - logger.Error("main - default routing error from stanza=%s to stanza=%s doest not exist", stanza.Name, route) - break - } - } - - // dropped routing - for _, route := range stanza.RoutingPolicy.Dropped { - if _, ok := mapCollectors[route]; ok { - currentStanza.AddDroppedRoute(mapCollectors[route]) - logger.Info("main - routing (policy=dropped) stanza=[%s] to stanza=[%s]", stanza.Name, route) - } else if _, ok := mapLoggers[route]; ok { - currentStanza.AddDroppedRoute(mapLoggers[route]) - logger.Info("main - routing (policy=dropped) stanza=[%s] to stanza=[%s]", stanza.Name, route) - } else { - logger.Error("main - routing error with dropped messages from stanza=%s to stanza=%s doest not exist", stanza.Name, route) - break - } - } -} - -func CreateStanza(stanzaName string, config *pkgconfig.Config, mapCollectors map[string]pkgutils.Worker, mapLoggers map[string]pkgutils.Worker, logger *logger.Logger) { - // register the logger if enabled - if config.Loggers.RestAPI.Enable { - mapLoggers[stanzaName] = loggers.NewRestAPI(config, logger, stanzaName) - } - if config.Loggers.Prometheus.Enable { - mapLoggers[stanzaName] = loggers.NewPrometheus(config, logger, stanzaName) - } - if config.Loggers.Stdout.Enable { - mapLoggers[stanzaName] = loggers.NewStdOut(config, logger, stanzaName) - } - if config.Loggers.LogFile.Enable { - mapLoggers[stanzaName] = loggers.NewLogFile(config, logger, stanzaName) - } - if config.Loggers.DNSTap.Enable { - mapLoggers[stanzaName] = loggers.NewDnstapSender(config, logger, stanzaName) - } - if config.Loggers.TCPClient.Enable { - mapLoggers[stanzaName] = loggers.NewTCPClient(config, logger, stanzaName) - } - if config.Loggers.Syslog.Enable { - mapLoggers[stanzaName] = loggers.NewSyslog(config, logger, stanzaName) - } - if config.Loggers.Fluentd.Enable { - mapLoggers[stanzaName] = loggers.NewFluentdClient(config, logger, stanzaName) - } - if config.Loggers.InfluxDB.Enable { - mapLoggers[stanzaName] = loggers.NewInfluxDBClient(config, logger, stanzaName) - } - if config.Loggers.LokiClient.Enable { - mapLoggers[stanzaName] = loggers.NewLokiClient(config, logger, stanzaName) - } - if config.Loggers.Statsd.Enable { - mapLoggers[stanzaName] = loggers.NewStatsdClient(config, logger, stanzaName) - } - if config.Loggers.ElasticSearchClient.Enable { - mapLoggers[stanzaName] = loggers.NewElasticSearchClient(config, logger, stanzaName) - } - if config.Loggers.ScalyrClient.Enable { - mapLoggers[stanzaName] = loggers.NewScalyrClient(config, logger, stanzaName) - } - if config.Loggers.RedisPub.Enable { - mapLoggers[stanzaName] = loggers.NewRedisPub(config, logger, stanzaName) - } - if config.Loggers.KafkaProducer.Enable { - mapLoggers[stanzaName] = loggers.NewKafkaProducer(config, logger, stanzaName) - } - if config.Loggers.FalcoClient.Enable { - mapLoggers[stanzaName] = loggers.NewFalcoClient(config, logger, stanzaName) - } - - // register the collector if enabled - if config.Collectors.DNSMessage.Enable { - mapCollectors[stanzaName] = collectors.NewDNSMessage(nil, config, logger, stanzaName) - } - if config.Collectors.Dnstap.Enable { - mapCollectors[stanzaName] = collectors.NewDnstap(nil, config, logger, stanzaName) - } - if config.Collectors.DnstapProxifier.Enable { - mapCollectors[stanzaName] = collectors.NewDnstapProxifier(nil, config, logger, stanzaName) - } - if config.Collectors.AfpacketLiveCapture.Enable { - mapCollectors[stanzaName] = collectors.NewAfpacketSniffer(nil, config, logger, stanzaName) - } - if config.Collectors.XdpLiveCapture.Enable { - mapCollectors[stanzaName] = collectors.NewXDPSniffer(nil, config, logger, stanzaName) - } - if config.Collectors.Tail.Enable { - mapCollectors[stanzaName] = collectors.NewTail(nil, config, logger, stanzaName) - } - if config.Collectors.PowerDNS.Enable { - mapCollectors[stanzaName] = collectors.NewProtobufPowerDNS(nil, config, logger, stanzaName) - } - if config.Collectors.FileIngestor.Enable { - mapCollectors[stanzaName] = collectors.NewFileIngestor(nil, config, logger, stanzaName) - } - if config.Collectors.Tzsp.Enable { - mapCollectors[stanzaName] = collectors.NewTZSP(nil, config, logger, stanzaName) - } -} - -func InitPipelines(mapLoggers map[string]pkgutils.Worker, mapCollectors map[string]pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger) error { - // check if the name of each stanza is uniq - for _, stanza := range config.Pipelines { - if err := StanzaNameIsUniq(stanza.Name, config); err != nil { - return errors.Errorf("stanza with name=[%s] is duplicated", stanza.Name) - } - } - - // check if all routes exists before continue - for _, stanza := range config.Pipelines { - for _, route := range stanza.RoutingPolicy.Default { - if err := IsRouteExist(route, config); err != nil { - return errors.Errorf("stanza=[%s] default route=[%s] doest not exist", stanza.Name, route) - } - } - for _, route := range stanza.RoutingPolicy.Dropped { - if err := IsRouteExist(route, config); err != nil { - return errors.Errorf("stanza=[%s] dropped route=[%s] doest not exist", stanza.Name, route) - } - } - } - - // read each stanza and init - for _, stanza := range config.Pipelines { - stanzaConfig := GetStanzaConfig(config, stanza) - CreateStanza(stanza.Name, stanzaConfig, mapCollectors, mapLoggers, logger) - - } - - // create routing - for _, stanza := range config.Pipelines { - if mapCollectors[stanza.Name] != nil || mapLoggers[stanza.Name] != nil { - CreateRouting(stanza, mapCollectors, mapLoggers, logger) - } else { - return errors.Errorf("stanza=[%v] doest not exist", stanza.Name) - } - } - - return nil -} diff --git a/pkglinker/pipelines_test.go b/pkglinker/pipelines_test.go deleted file mode 100644 index dd62b589..00000000 --- a/pkglinker/pipelines_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package pkglinker - -import ( - "testing" - - "github.com/dmachard/go-dnscollector/pkgconfig" -) - -func TestPipeline_IsRouteExist(t *testing.T) { - // Create a mock configuration for testing - config := &pkgconfig.Config{} - config.Pipelines = []pkgconfig.ConfigPipelines{ - {Name: "validroute"}, - } - - // Case where the route exists - existingRoute := "validroute" - err := IsRouteExist(existingRoute, config) - if err != nil { - t.Errorf("For the existing route %s, an unexpected error was returned: %v", existingRoute, err) - } - - // Case where the route does not exist - nonExistingRoute := "non-existent-route" - err = IsRouteExist(nonExistingRoute, config) - if err == nil { - t.Errorf("For the non-existing route %s, an expected error was not returned. Received error: %v", nonExistingRoute, err) - } -} - -func TestPipeline_StanzaNameIsUniq(t *testing.T) { - // Create a mock configuration for testing - config := &pkgconfig.Config{} - config.Pipelines = []pkgconfig.ConfigPipelines{ - {Name: "unique-stanza"}, - {Name: "duplicate-stanza"}, - {Name: "duplicate-stanza"}, - } - - // Case where the stanza name is unique - uniqueStanzaName := "unique-stanza" - err := StanzaNameIsUniq(uniqueStanzaName, config) - if err != nil { - t.Errorf("For the unique stanza name %s, an unexpected error was returned: %v", uniqueStanzaName, err) - } - - // Case where the stanza name is not unique - duplicateStanzaName := "duplicate-stanza" - err = StanzaNameIsUniq(duplicateStanzaName, config) - if err == nil { - t.Errorf("For the duplicate stanza name %s, an expected error was not returned. Received error: %v", duplicateStanzaName, err) - } -} diff --git a/pkgutils/configchecker.go b/pkgutils/configchecker.go deleted file mode 100644 index afffb1aa..00000000 --- a/pkgutils/configchecker.go +++ /dev/null @@ -1,438 +0,0 @@ -package pkgutils - -import ( - "fmt" - "io" - "os" - "reflect" - "regexp" - "strings" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/pkg/errors" - "gopkg.in/yaml.v3" -) - -var TextFormatSplitter string = " " - -func ReloadConfig(configPath string, config *pkgconfig.Config, dmRef dnsutils.DNSMessage) error { - // Open config file - configFile, err := os.Open(configPath) - if err != nil { - return nil - } - defer configFile.Close() - - // Check config to detect unknown keywords - if err := CheckConfig(configPath, dmRef); err != nil { - return err - } - - // Init new YAML decode - d := yaml.NewDecoder(configFile) - - // Start YAML decoding from file - if err := d.Decode(&config); err != nil { - return err - } - return nil -} - -func LoadConfig(configPath string, dmRef dnsutils.DNSMessage) (*pkgconfig.Config, error) { - // Open config file - configFile, err := os.Open(configPath) - if err != nil { - return nil, err - } - defer configFile.Close() - - // Check config to detect unknown keywords - if err := CheckConfig(configPath, dmRef); err != nil { - return nil, err - } - - // Init new YAML decode - d := yaml.NewDecoder(configFile) - - // Start YAML decoding to go - config := &pkgconfig.Config{} - config.SetDefault() - - if err := d.Decode(&config); err != nil { - return nil, err - } - - return config, nil -} - -func CheckConfig(userConfigPath string, dmRef dnsutils.DNSMessage) error { - - flatDmRef, err := dmRef.Flatten() - if err != nil { - return err - } - - // create default config - // and simulate items in multiplexer and pipelines mode - defaultConfig := &pkgconfig.Config{} - defaultConfig.SetDefault() - defaultConfig.Multiplexer.Routes = append(defaultConfig.Multiplexer.Routes, pkgconfig.MultiplexRoutes{}) - defaultConfig.Multiplexer.Loggers = append(defaultConfig.Multiplexer.Loggers, pkgconfig.MultiplexInOut{}) - defaultConfig.Multiplexer.Collectors = append(defaultConfig.Multiplexer.Collectors, pkgconfig.MultiplexInOut{}) - defaultConfig.Pipelines = append(defaultConfig.Pipelines, pkgconfig.ConfigPipelines{}) - - // Convert default config to map - // And get unique YAML keys - defaultConfigMap, err := convertConfigToMap(defaultConfig) - if err != nil { - return errors.Wrap(err, "error converting default config to map") - } - - defaultKeywords := getUniqueKeywords(defaultConfigMap) - - // add DNSMessage default keys - for k := range flatDmRef { - defaultKeywords[k] = true - } - - // Read user configuration file - // And get unique YAML keys from user config - userConfigMap, err := loadUserConfigToMap(userConfigPath) - if err != nil { - return err - } - userKeywords := getUniqueKeywords(userConfigMap) - - // Check for unknown keys in user config - // ignore dynamic keys as atags.tags.*: google - - // Define regular expressions to match dynamic keys - regexPatterns := []string{`\.\*(\.)?`, `\.(\d+)(\.)?`} - - for key := range userKeywords { - // Ignore dynamic keys that contain ".*" or .[digits]. - matched := false - for _, pattern := range regexPatterns { - match, _ := regexp.MatchString(pattern, key) - if match { - matched = true - break - } - } - if matched { - continue - } - - // search in default keywords - if _, ok := defaultKeywords[key]; !ok { - return errors.Errorf("unknown YAML key `%s` in configuration", key) - } - } - - // detect bad keyword position - err = checkKeywordsPosition(userConfigMap, defaultConfigMap, defaultConfigMap, "") - if err != nil { - return err - } - - // check for invalid text directives - // the text-format key is reserved - // search this key on all keys - err = checkTextFormatKey(userConfigMap, dmRef) - if err != nil { - fmt.Printf("checkTextFormatKey returned: %v\n", err) - return err - } - - return nil -} - -func checkTextFormatKey(data map[string]interface{}, dmRef dnsutils.DNSMessage) error { - key := "text-format" - for k, v := range data { - if k == "global" { - if rec, ok := v.(map[string]interface{}); ok { - for k2, v2 := range rec { - // fmt.Printf("k2: %s , v2 : %s\n" , k2 , v2 ) - if k2 == "text-format-splitter" { - TextFormatSplitter=v2.(string) - } - } - } - } - - if k == key { - if str, ok := v.(string); ok { - myarray := strings.Split(str,TextFormatSplitter) - _, err := dmRef.ToTextLine(myarray , "", "") - if err != nil { - return err - } - } - } - if nestedMap, ok := v.(map[string]interface{}); ok { - err := checkTextFormatKey(nestedMap, dmRef) - if err != nil { - return err - } - } - if nestedSlice, ok := v.([]interface{}); ok { - for _, item := range nestedSlice { - if nestedMap, ok := item.(map[string]interface{}); ok { - err := checkTextFormatKey(nestedMap, dmRef) - if err != nil { - return err - } - } - } - } - } - return nil -} - -func checkKeywordsPosition(nextUserCfg, nextDefCfg map[string]interface{}, defaultConf map[string]interface{}, sectionName string) error { - for k, v := range nextUserCfg { - // Check if the key is present in the default config - if len(nextDefCfg) > 0 { - if _, ok := nextDefCfg[k]; !ok { - if sectionName == "" { - return errors.Errorf("invalid key `%s` at root", k) - } - return errors.Errorf("invalid key `%s` in section `%s`", k, sectionName) - } - } - - // If the value is a map, recursively check for invalid keywords - // Recursive call ? - val := reflect.ValueOf(v) - if val.Kind() == reflect.Map { - nextSectionName := fmt.Sprintf("%s.%s", sectionName, k) - if err := checkKeywordsPosition(v.(map[string]interface{}), nextDefCfg[k].(map[string]interface{}), defaultConf, nextSectionName); err != nil { - return err - } - } - - // If the value is a slice and we are in the multiplexer part - // Multiplixer part is dynamic, we need specific function to check it - if val.Kind() == reflect.Slice && sectionName == ".multiplexer" { - if err := checkMultiplexerConfig(val, nextDefCfg[k].([]interface{}), defaultConf, k); err != nil { - return err - } - } - - // If the value is a slice and we are in the pipelines part - if val.Kind() == reflect.Slice && k == "pipelines" { - if err := checkPipelinesConfig(val, nextDefCfg[k].([]interface{}), defaultConf, k); err != nil { - return err - } - } - } - return nil -} - -func checkPipelinesConfig(currentVal reflect.Value, currentRef []interface{}, defaultConf map[string]interface{}, k string) error { - refLoggers := defaultConf[pkgconfig.KeyLoggers].(map[string]interface{}) - refCollectors := defaultConf[pkgconfig.KeyCollectors].(map[string]interface{}) - refTransforms := defaultConf["collectors-transformers"].(map[string]interface{}) - - for pos, item := range currentVal.Interface().([]interface{}) { - valReflect := reflect.ValueOf(item) - refItem := currentRef[0].(map[string]interface{}) - if valReflect.Kind() == reflect.Map { - for _, key := range valReflect.MapKeys() { - strKey := key.Interface().(string) - mapVal := valReflect.MapIndex(key) - - if _, ok := refItem[strKey]; !ok { - // Check if the key exists in neither loggers nor collectors - loggerExists := refLoggers[strKey] != nil - collectorExists := refCollectors[strKey] != nil - if !loggerExists && !collectorExists { - return errors.Errorf("invalid `%s` in `%s` pipelines at position %d", strKey, k, pos) - } - - // check logger or collectors - if loggerExists || collectorExists { - nextSectionName := fmt.Sprintf("%s[%d].%s", k, pos, strKey) - refMap := refLoggers - if collectorExists { - refMap = refCollectors - } - // Type assertion to check if the value is a map - if value, ok := mapVal.Interface().(map[string]interface{}); ok { - if err := checkKeywordsPosition(value, refMap[strKey].(map[string]interface{}), defaultConf, nextSectionName); err != nil { - return err - } - } else { - return errors.Errorf("invalid `%s` value in `%s` pipelines at position %d", strKey, k, pos) - } - } - } - - // Check transforms section - // Type assertion to check if the value is a map - if strKey == "transforms" { - nextSectionName := fmt.Sprintf("%s.%s", k, strKey) - if value, ok := mapVal.Interface().(map[string]interface{}); ok { - if err := checkKeywordsPosition(value, refTransforms, defaultConf, nextSectionName); err != nil { - return err - } - } else { - return errors.Errorf("invalid `%s` value in `%s` pipelines at position %d", strKey, k, pos) - } - } - } - } else { - return errors.Errorf("invalid item type in pipelines list: %s", valReflect.Kind()) - } - } - return nil -} - -func checkMultiplexerConfig(currentVal reflect.Value, currentRef []interface{}, defaultConf map[string]interface{}, k string) error { - refLoggers := defaultConf[pkgconfig.KeyLoggers].(map[string]interface{}) - refCollectors := defaultConf[pkgconfig.KeyCollectors].(map[string]interface{}) - refTransforms := defaultConf["collectors-transformers"].(map[string]interface{}) - - // iter over the slice - for pos, item := range currentVal.Interface().([]interface{}) { - valReflect := reflect.ValueOf(item) - refItem := currentRef[0].(map[string]interface{}) - if valReflect.Kind() == reflect.Map { - for _, key := range valReflect.MapKeys() { - strKey := key.Interface().(string) - mapVal := valReflect.MapIndex(key) - - // First, check in the initial configuration reference. - // If not found, then look in the logger and collector references. - if _, ok := refItem[strKey]; !ok { - // we are in routes section ? - if !(k == pkgconfig.KeyCollectors || k == pkgconfig.KeyLoggers) { - return errors.Errorf("invalid `%s` in `%s` list at position %d", strKey, k, pos) - } - - // Check if the key exists in neither loggers nor collectors - loggerExists := refLoggers[strKey] != nil - collectorExists := refCollectors[strKey] != nil - if !loggerExists && !collectorExists { - return errors.Errorf("invalid `%s` in `%s` list at position %d", strKey, k, pos) - } - - // check logger or collectors - if k == pkgconfig.KeyLoggers || k == pkgconfig.KeyCollectors { - nextSectionName := fmt.Sprintf("%s[%d].%s", k, pos, strKey) - refMap := refLoggers - if k == pkgconfig.KeyCollectors { - refMap = refCollectors - } - - // Type assertion to check if the value is a map - if value, ok := mapVal.Interface().(map[string]interface{}); ok { - if _, ok := refMap[strKey]; !ok { - return errors.Errorf("invalid logger `%s`", strKey) - } - if err := checkKeywordsPosition(value, refMap[strKey].(map[string]interface{}), defaultConf, nextSectionName); err != nil { - return err - } - } else { - return errors.Errorf("invalid `%s` value in `%s` list at position %d", strKey, k, pos) - } - } - } - - // Check transforms section - // Type assertion to check if the value is a map - if strKey == "transforms" { - nextSectionName := fmt.Sprintf("%s.%s", k, strKey) - if value, ok := mapVal.Interface().(map[string]interface{}); ok { - if err := checkKeywordsPosition(value, refTransforms, defaultConf, nextSectionName); err != nil { - return err - } - } else { - return errors.Errorf("invalid `%s` value in `%s` list at position %d", strKey, k, pos) - } - } - } - } else { - return errors.Errorf("invalid item type in multiplexer list: %s", valReflect.Kind()) - } - } - return nil -} - -func convertConfigToMap(config *pkgconfig.Config) (map[string]interface{}, error) { - // Convert config to YAML - yamlData, err := yaml.Marshal(config) - if err != nil { - return nil, err - } - - // Convert YAML to map - configMap := make(map[string]interface{}) - err = yaml.Unmarshal(yamlData, &configMap) - if err != nil { - return nil, err - } - - return configMap, nil -} - -func loadUserConfigToMap(configPath string) (map[string]interface{}, error) { - // Read user configuration file - configFile, err := os.Open(configPath) - if err != nil { - return nil, err - } - defer configFile.Close() - - // Read config file bytes - configBytes, err := io.ReadAll(configFile) - if err != nil { - return nil, errors.Wrap(err, "Error reading configuration file") - } - - // Unmarshal YAML to map - userConfigMap := make(map[string]interface{}) - err = yaml.Unmarshal(configBytes, &userConfigMap) - if err != nil { - return nil, errors.Wrap(err, "error parsing YAML file") - } - - return userConfigMap, nil -} - -func getUniqueKeywords(s map[string]interface{}) map[string]bool { - keys := extractYamlKeys(s) - uniqueKeys := make(map[string]bool) - for _, key := range keys { - if _, ok := uniqueKeys[key]; ok { - continue - } - uniqueKeys[key] = true - } - return uniqueKeys -} - -func extractYamlKeys(s map[string]interface{}) []string { - keys := []string{} - for k, v := range s { - keys = append(keys, k) - val := reflect.ValueOf(v) - if val.Kind() == reflect.Map { - nextKeys := extractYamlKeys(val.Interface().(map[string]interface{})) - keys = append(keys, nextKeys...) - } - if val.Kind() == reflect.Slice { - for _, v2 := range val.Interface().([]interface{}) { - val2 := reflect.ValueOf(v2) - if val2.Kind() == reflect.Map { - nextKeys := extractYamlKeys(val2.Interface().(map[string]interface{})) - keys = append(keys, nextKeys...) - } - } - } - - } - return keys -} diff --git a/pkgutils/configchecker_test.go b/pkgutils/configchecker_test.go deleted file mode 100644 index 33130427..00000000 --- a/pkgutils/configchecker_test.go +++ /dev/null @@ -1,358 +0,0 @@ -package pkgutils - -import ( - "os" - "testing" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/pkg/errors" -) - -// Valid minimal user configuration -func TestConfig_CheckConfig_Valid(t *testing.T) { - // Create a temporary file for the user configuration - userConfigFile, err := os.CreateTemp("", "user-config.yaml") - if err != nil { - t.Fatal("Error creating temporary file:", err) - } - defer os.Remove(userConfigFile.Name()) - defer userConfigFile.Close() - - validUserConfigContent := ` -global: - trace: false -multiplexer: - routes: - - from: [test-route] - loggers: - - name: test-logger - collectors: - - name: test-collector -` - err = os.WriteFile(userConfigFile.Name(), []byte(validUserConfigContent), 0644) - if err != nil { - t.Fatal("Error writing to user configuration file:", err) - } - - dm := dnsutils.GetReferenceDNSMessage() - if err := CheckConfig(userConfigFile.Name(), dm); err != nil { - t.Errorf("failed: Unexpected error: %v", err) - } -} - -// Invalid user configuration with an unknown key -func TestConfig_CheckConfig_UnknownKeywords(t *testing.T) { - userConfigFile, err := os.CreateTemp("", "user-config.yaml") - if err != nil { - t.Fatal("Error creating temporary file:", err) - } - defer os.Remove(userConfigFile.Name()) - defer userConfigFile.Close() - - userConfigContent := ` -global: - trace: false -multiplexer: - routes: - - from: [test-route] - unknown-key: invalid -` - err = os.WriteFile(userConfigFile.Name(), []byte(userConfigContent), 0644) - if err != nil { - t.Fatal("Error writing to user configuration file:", err) - } - - dm := dnsutils.GetReferenceDNSMessage() - expectedError := errors.Errorf("unknown YAML key `unknown-key` in configuration") - if err := CheckConfig(userConfigFile.Name(), dm); err == nil || err.Error() != expectedError.Error() { - t.Errorf("Expected error %v, but got %v", expectedError, err) - } -} - -// Ignore dynamic keys -func TestConfig_CheckConfig_IgnoreDynamicKeys(t *testing.T) { - userConfigFile, err := os.CreateTemp("", "user-config.yaml") - if err != nil { - t.Fatal("Error creating temporary file:", err) - } - defer os.Remove(userConfigFile.Name()) - defer userConfigFile.Close() - - userConfigContent := ` -global: - trace: false -pipelines: - - name: match - dnsmessage: - matching: - include: - atags.tags.*: test - atags.tags.2: test - dns.resources-records.*: test - dns.resources-records.10.rdata: test - dns.resources-records.*.ttl: test -` - err = os.WriteFile(userConfigFile.Name(), []byte(userConfigContent), 0644) - if err != nil { - t.Fatal("Error writing to user configuration file:", err) - } - - dm := dnsutils.GetReferenceDNSMessage() - if err := CheckConfig(userConfigFile.Name(), dm); err != nil { - t.Errorf("Expected no error, but got %v", err) - } -} - -// Keywork exist but not at the good position -func TestConfig_CheckConfig_BadKeywordPosition(t *testing.T) { - userConfigFile, err := os.CreateTemp("", "user-config.yaml") - if err != nil { - t.Fatal("Error creating temporary file:", err) - } - defer os.Remove(userConfigFile.Name()) - defer userConfigFile.Close() - - userConfigContent := ` -global: - trace: false - logger: bad-position -` - err = os.WriteFile(userConfigFile.Name(), []byte(userConfigContent), 0644) - if err != nil { - t.Fatal("Error writing to user configuration file:", err) - } - - dm := dnsutils.GetReferenceDNSMessage() - if err := CheckConfig(userConfigFile.Name(), dm); err == nil { - t.Errorf("Expected error, but got %v", err) - } -} - -// Valid multiplexer configuration -func TestConfig_CheckMultiplexerConfig_Valid(t *testing.T) { - userConfigFile, err := os.CreateTemp("", "user-config.yaml") - if err != nil { - t.Fatal("Error creating temporary file:", err) - } - defer os.Remove(userConfigFile.Name()) - defer userConfigFile.Close() - - userConfigContent := ` -multiplexer: - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - transforms: - normalize: - qname-lowercase: false - loggers: - - name: console - stdout: - mode: text - routes: - - from: [ tap ] - to: [ console ] -` - err = os.WriteFile(userConfigFile.Name(), []byte(userConfigContent), 0644) - if err != nil { - t.Fatal("Error writing to user configuration file:", err) - } - - dm := dnsutils.GetReferenceDNSMessage() - if err := CheckConfig(userConfigFile.Name(), dm); err != nil { - t.Errorf("failed: Unexpected error: %v", err) - } -} - -// Invalid multiplexer configuration -func TestConfig_CheckMultiplexerConfig_Invalid(t *testing.T) { - userConfigFile, err := os.CreateTemp("", "user-config.yaml") - if err != nil { - t.Fatal("Error creating temporary file:", err) - } - defer os.Remove(userConfigFile.Name()) - defer userConfigFile.Close() - - userConfigContent := ` -global: - trace: false -multiplexer: -- name: block - dnstap: - listen-ip: 0.0.0.0 - transforms: - normalize: - qname-lowercase: true -` - - err = os.WriteFile(userConfigFile.Name(), []byte(userConfigContent), 0644) - if err != nil { - t.Fatal("Error writing to user configuration file:", err) - } - - dm := dnsutils.GetReferenceDNSMessage() - if err := CheckConfig(userConfigFile.Name(), dm); err == nil { - t.Errorf("Expected error, but got %v", err) - } -} - -// https://github.com/dmachard/go-dnscollector/issues/565 -func TestConfig_CheckMultiplexerConfig_InvalidLogger(t *testing.T) { - userConfigFile, err := os.CreateTemp("", "user-config.yaml") - if err != nil { - t.Fatal("Error creating temporary file:", err) - } - defer os.Remove(userConfigFile.Name()) - defer userConfigFile.Close() - - // all keywords in this config are valid but the logger dnstap is not valid in this context - userConfigContent := ` -global: - trace: false -multiplexer: - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - loggers: - - name: tapOut - dnstap: - listen-ip: 0.0.0.0 - routes: - - from: [ tapIn ] - to: [ tapOut ] -` - - err = os.WriteFile(userConfigFile.Name(), []byte(userConfigContent), 0644) - if err != nil { - t.Fatal("Error writing to user configuration file:", err) - } - - dm := dnsutils.GetReferenceDNSMessage() - if err := CheckConfig(userConfigFile.Name(), dm); err == nil { - t.Errorf("Expected error, but got %v", err) - } -} - -// Valid pipeline configuration -func TestConfig_CheckPipelinesConfig_Valid(t *testing.T) { - userConfigFile, err := os.CreateTemp("", "user-config.yaml") - if err != nil { - t.Fatal("Error creating temporary file:", err) - } - defer os.Remove(userConfigFile.Name()) - defer userConfigFile.Close() - - userConfigContent := ` -pipelines: -- name: dnsdist-main - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - routing-policy: - default: [ console ] - -- name: console - stdout: - mode: text -` - err = os.WriteFile(userConfigFile.Name(), []byte(userConfigContent), 0644) - if err != nil { - t.Fatal("Error writing to user configuration file:", err) - } - - dm := dnsutils.GetReferenceDNSMessage() - if err := CheckConfig(userConfigFile.Name(), dm); err != nil { - t.Errorf("failed: Unexpected error: %v", err) - } -} - -// Invalid pipeline configuration -func TestConfig_CheckPipelinesConfig_Invalid(t *testing.T) { - userConfigFile, err := os.CreateTemp("", "user-config.yaml") - if err != nil { - t.Fatal("Error creating temporary file:", err) - } - defer os.Remove(userConfigFile.Name()) - defer userConfigFile.Close() - - userConfigContent := ` -pipelines: -- name: dnsdist-main - dnstap: - listen-ip: 0.0.0.0 - transforms: - normalize: - qname-lowercase: true - routing-policy: - default: [ console ] -` - - err = os.WriteFile(userConfigFile.Name(), []byte(userConfigContent), 0644) - if err != nil { - t.Fatal("Error writing to user configuration file:", err) - } - - dm := dnsutils.GetReferenceDNSMessage() - if err := CheckConfig(userConfigFile.Name(), dm); err == nil { - t.Errorf("Expected error, but got %v", err) - } -} - -// Invalid directives -func TestConfig_CheckMultiplexer_InvalidTextDirective(t *testing.T) { - userConfigFile, err := os.CreateTemp("", "user-config.yaml") - if err != nil { - t.Fatal("Error creating temporary file:", err) - } - defer os.Remove(userConfigFile.Name()) - defer userConfigFile.Close() - - userConfigContent := ` -multiplexer: - loggers: - - name: dnsdist-main - stdout: - text-format: "qtype latency reducer-occurences" -` - - err = os.WriteFile(userConfigFile.Name(), []byte(userConfigContent), 0644) - if err != nil { - t.Fatal("Error writing to user configuration file:", err) - } - - dm := dnsutils.GetReferenceDNSMessage() - if err = CheckConfig(userConfigFile.Name(), dm); err == nil { - t.Errorf("Expected error, but got nil") - } -} - -func TestConfig_CheckPipelines_InvalidTextDirective(t *testing.T) { - userConfigFile, err := os.CreateTemp("", "user-config.yaml") - if err != nil { - t.Fatal("Error creating temporary file:", err) - } - defer os.Remove(userConfigFile.Name()) - defer userConfigFile.Close() - - userConfigContent := ` -pipelines: -- name: dnsdist-main - stdout: - text-format: "qtype latency reducer-occurences" - routing-policy: - default: [ console ] -` - - err = os.WriteFile(userConfigFile.Name(), []byte(userConfigContent), 0644) - if err != nil { - t.Fatal("Error writing to user configuration file:", err) - } - - dm := dnsutils.GetReferenceDNSMessage() - if err = CheckConfig(userConfigFile.Name(), dm); err == nil { - t.Errorf("Expected error, but got nil") - } -} diff --git a/pkgutils/constant.go b/pkgutils/constant.go deleted file mode 100644 index 70ce345d..00000000 --- a/pkgutils/constant.go +++ /dev/null @@ -1,9 +0,0 @@ -package pkgutils - -var ( - PrefixLogProcessor = "processor - " - PrefixLogCollector = "collector - " - PrefixLogLogger = "logger - " - PrefixLogRouting = "routing - " - PrefixLogTransformer = "transformer - " -) diff --git a/pkgutils/routing.go b/pkgutils/routing.go deleted file mode 100644 index d3debdce..00000000 --- a/pkgutils/routing.go +++ /dev/null @@ -1,134 +0,0 @@ -package pkgutils - -import ( - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-logger" -) - -func GetRoutes(routes []Worker) ([]chan dnsutils.DNSMessage, []string) { - channels := []chan dnsutils.DNSMessage{} - names := []string{} - for _, p := range routes { - if c := p.GetInputChannel(); c != nil { - channels = append(channels, c) - names = append(names, p.GetName()) - } else { - panic("default routing to stanza=[" + p.GetName() + "] not supported") - } - } - return channels, names -} - -func GetName(name string) string { - return "[" + name + "] - " -} - -type RoutingHandler struct { - name string - logger *logger.Logger - config *pkgconfig.Config - stopRun chan bool - doneRun chan bool - droppedCount map[string]int - dropped chan string - droppedRoutes []Worker - defaultRoutes []Worker -} - -func NewRoutingHandler(config *pkgconfig.Config, console *logger.Logger, name string) RoutingHandler { - console.Info("routing - [%s] - initialization...", name) - rh := RoutingHandler{ - name: name, - logger: console, - config: config, - stopRun: make(chan bool), - doneRun: make(chan bool), - dropped: make(chan string), - droppedCount: map[string]int{}, - } - go rh.Run() - return rh -} - -func (rh *RoutingHandler) LogInfo(msg string, v ...interface{}) { - rh.logger.Info(PrefixLogRouting+GetName(rh.name)+msg, v...) -} - -func (rh *RoutingHandler) LogError(msg string, v ...interface{}) { - rh.logger.Error(PrefixLogRouting+GetName(rh.name)+msg, v...) -} - -func (rh *RoutingHandler) LogFatal(msg string) { - rh.logger.Error(PrefixLogRouting + GetName(rh.name) + msg) -} - -func (rh *RoutingHandler) AddDroppedRoute(wrk Worker) { - rh.droppedRoutes = append(rh.droppedRoutes, wrk) -} - -func (rh *RoutingHandler) AddDefaultRoute(wrk Worker) { - rh.defaultRoutes = append(rh.defaultRoutes, wrk) -} - -func (rh *RoutingHandler) SetDefaultRoutes(workers []Worker) { - rh.defaultRoutes = workers -} - -func (rh *RoutingHandler) GetDefaultRoutes() ([]chan dnsutils.DNSMessage, []string) { - return GetRoutes(rh.defaultRoutes) -} - -func (rh *RoutingHandler) GetDroppedRoutes() ([]chan dnsutils.DNSMessage, []string) { - return GetRoutes(rh.droppedRoutes) -} - -func (rh *RoutingHandler) Stop() { - rh.LogInfo("stopping to run...") - rh.stopRun <- true - <-rh.doneRun -} - -func (rh *RoutingHandler) Run() { - rh.LogInfo("running in background...") - nextBufferInterval := 10 * time.Second - nextBufferFull := time.NewTimer(nextBufferInterval) - -RUN_LOOP: - for { - select { - case <-rh.stopRun: - nextBufferFull.Stop() - rh.doneRun <- true - break RUN_LOOP - case stanzaName := <-rh.dropped: - if _, ok := rh.droppedCount[stanzaName]; !ok { - rh.droppedCount[stanzaName] = 1 - } else { - rh.droppedCount[stanzaName]++ - } - case <-nextBufferFull.C: - for v, k := range rh.droppedCount { - if k > 0 { - rh.LogError("stanza=%s buffer is full, %d packet(s) dropped", v, k) - rh.droppedCount[v] = 0 - } - } - nextBufferFull.Reset(nextBufferInterval) - } - } - - rh.LogInfo("run terminated") -} - -func (rh *RoutingHandler) SendTo(routes []chan dnsutils.DNSMessage, routesName []string, dm dnsutils.DNSMessage) { - for i := range routes { - select { - case routes[i] <- dm: - default: - rh.dropped <- routesName[i] - } - } -} diff --git a/pkgutils/routing_test.go b/pkgutils/routing_test.go deleted file mode 100644 index af6f7aa1..00000000 --- a/pkgutils/routing_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package pkgutils - -import ( - "fmt" - "regexp" - "testing" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-logger" -) - -const ( - ExpectedQname = "dns.collector" -) - -func Test_RoutingHandler_AddDefaultRoute(t *testing.T) { - // redirect stdout output to bytes buffer - logsChan := make(chan logger.LogEntry, 10) - lg := logger.New(true) - lg.SetOutputChannel((logsChan)) - - // create routing handler - rh := NewRoutingHandler(pkgconfig.GetFakeConfig(), lg, "test") - - // add default routes - nxt := NewFakeLogger() - rh.AddDefaultRoute(nxt) - - // get default routes - defaultRoutes, defaultNames := rh.GetDefaultRoutes() - - // send dns message - dmIn := dnsutils.GetFakeDNSMessage() - rh.SendTo(defaultRoutes, defaultNames, dmIn) - - // read dns message from next item - dmOut := <-nxt.GetInputChannel() - if dmOut.DNS.Qname != ExpectedQname { - t.Errorf("invalid qname in dns message: %s", dmOut.DNS.Qname) - } - - // stop - rh.Stop() -} - -func Test_RoutingHandler_BufferIsFull(t *testing.T) { - // redirect stdout output to bytes buffer - logsChan := make(chan logger.LogEntry, 10) - lg := logger.New(true) - lg.SetOutputChannel((logsChan)) - - // create routing handler - rh := NewRoutingHandler(pkgconfig.GetFakeConfig(), lg, "test") - - // add default routes - nxt := NewFakeLoggerWithBufferSize(1) - rh.AddDefaultRoute(nxt) - - // add a shot of dnsmessages to collector - defaultRoutes, defaultNames := rh.GetDefaultRoutes() - dmIn := dnsutils.GetFakeDNSMessage() - for i := 0; i < 512; i++ { - rh.SendTo(defaultRoutes, defaultNames, dmIn) - } - - // waiting monitor to run in consumer - time.Sleep(12 * time.Second) - - for entry := range logsChan { - fmt.Println(entry) - pattern := regexp.MustCompile(".*buffer is full, 511.*") - if pattern.MatchString(entry.Message) { - break - } - } - - // read dns message from dnstap consumer - dmOut := <-nxt.GetInputChannel() - if dmOut.DNS.Qname != ExpectedQname { - t.Errorf("invalid qname in dns message: %s", dmOut.DNS.Qname) - } - - // send second shot of packets to consumer - for i := 0; i < 1024; i++ { - rh.SendTo(defaultRoutes, defaultNames, dmIn) - } - - // waiting monitor to run in consumer - time.Sleep(12 * time.Second) - for entry := range logsChan { - fmt.Println(entry) - pattern := regexp.MustCompile(".*buffer is full, 1023.*") - if pattern.MatchString(entry.Message) { - break - } - } - - // read dns message from dnstap consumer - dmOut2 := <-nxt.GetInputChannel() - if dmOut2.DNS.Qname != ExpectedQname { - t.Errorf("invalid qname in second dns message: %s", dmOut2.DNS.Qname) - } - - // stop - rh.Stop() -} diff --git a/pkgutils/utils.go b/pkgutils/utils.go deleted file mode 100644 index 415db0ee..00000000 --- a/pkgutils/utils.go +++ /dev/null @@ -1,62 +0,0 @@ -package pkgutils - -import ( - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" -) - -type Worker interface { - AddDefaultRoute(wrk Worker) - AddDroppedRoute(wrk Worker) - SetLoggers(loggers []Worker) - GetName() string - Stop() - Run() - GetInputChannel() chan dnsutils.DNSMessage - ReadConfig() - ReloadConfig(config *pkgconfig.Config) -} - -type FakeLogger struct { - inputChan chan dnsutils.DNSMessage - outputChan chan dnsutils.DNSMessage - name string -} - -func NewFakeLogger() *FakeLogger { - o := &FakeLogger{ - inputChan: make(chan dnsutils.DNSMessage, 512), - outputChan: make(chan dnsutils.DNSMessage, 512), - name: "fake", - } - return o -} - -func NewFakeLoggerWithBufferSize(bufferSize int) *FakeLogger { - o := &FakeLogger{ - inputChan: make(chan dnsutils.DNSMessage, bufferSize), - outputChan: make(chan dnsutils.DNSMessage, bufferSize), - name: "fake", - } - return o -} - -func (c *FakeLogger) GetName() string { return c.name } - -func (c *FakeLogger) AddDefaultRoute(wrk Worker) {} - -func (c *FakeLogger) AddDroppedRoute(wrk Worker) {} - -func (c *FakeLogger) SetLoggers(loggers []Worker) {} - -func (c *FakeLogger) ReadConfig() {} - -func (c *FakeLogger) ReloadConfig(config *pkgconfig.Config) {} - -func (c *FakeLogger) Stop() {} - -func (c *FakeLogger) GetInputChannel() chan dnsutils.DNSMessage { - return c.inputChan -} - -func (c *FakeLogger) Run() {} diff --git a/pkgutils/utils_test.go b/pkgutils/utils_test.go deleted file mode 100644 index e6d34b7e..00000000 --- a/pkgutils/utils_test.go +++ /dev/null @@ -1,7 +0,0 @@ -package pkgutils - -import "testing" - -func TestFakeLoggerImplementsWorkerInterface(t *testing.T) { - var _ Worker = &FakeLogger{} -} diff --git a/processors/constants.go b/processors/constants.go deleted file mode 100644 index f8ba88f2..00000000 --- a/processors/constants.go +++ /dev/null @@ -1,9 +0,0 @@ -package processors - -const ( - ExpectedQname = "dnscollector.dev" - ExpectedQname2 = "dns.collector" - ExpectedBufferMsg511 = ".*buffer is full, 511.*" - ExpectedBufferMsg1023 = ".*buffer is full, 1023.*" - ExpectedIdentity = "powerdnspb" -) diff --git a/processors/dns.go b/processors/dns.go deleted file mode 100644 index 9ca33681..00000000 --- a/processors/dns.go +++ /dev/null @@ -1,218 +0,0 @@ -package processors - -import ( - "fmt" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-logger" - "github.com/miekg/dns" -) - -func GetFakeDNS() ([]byte, error) { - dnsmsg := new(dns.Msg) - dnsmsg.SetQuestion("dns.collector.", dns.TypeA) - return dnsmsg.Pack() -} - -type DNSProcessor struct { - doneRun chan bool - stopRun chan bool - doneMonitor chan bool - stopMonitor chan bool - recvFrom chan dnsutils.DNSMessage - logger *logger.Logger - config *pkgconfig.Config - ConfigChan chan *pkgconfig.Config - name string - RoutingHandler pkgutils.RoutingHandler - dropped chan string - droppedCount map[string]int -} - -func NewDNSProcessor(config *pkgconfig.Config, logger *logger.Logger, name string, size int) DNSProcessor { - logger.Info(pkgutils.PrefixLogProcessor+"[%s] dns - initialization...", name) - d := DNSProcessor{ - doneMonitor: make(chan bool), - doneRun: make(chan bool), - stopMonitor: make(chan bool), - stopRun: make(chan bool), - recvFrom: make(chan dnsutils.DNSMessage, size), - logger: logger, - config: config, - ConfigChan: make(chan *pkgconfig.Config), - name: name, - dropped: make(chan string), - droppedCount: map[string]int{}, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - } - return d -} - -func (d *DNSProcessor) LogInfo(msg string, v ...interface{}) { - d.logger.Info(pkgutils.PrefixLogProcessor+"["+d.name+"] dns - "+msg, v...) -} - -func (d *DNSProcessor) LogError(msg string, v ...interface{}) { - d.logger.Error(pkgutils.PrefixLogProcessor+"["+d.name+"] dns - "+msg, v...) -} - -func (d *DNSProcessor) GetChannel() chan dnsutils.DNSMessage { - return d.recvFrom -} - -func (d *DNSProcessor) GetChannelList() []chan dnsutils.DNSMessage { - channel := []chan dnsutils.DNSMessage{} - channel = append(channel, d.recvFrom) - return channel -} - -func (d *DNSProcessor) Stop() { - d.LogInfo("stopping processor...") - d.RoutingHandler.Stop() - - d.LogInfo("stopping to process...") - d.stopRun <- true - <-d.doneRun - - d.LogInfo("stopping monitor...") - d.stopMonitor <- true - <-d.doneMonitor -} - -func (d *DNSProcessor) Run(defaultWorkers []pkgutils.Worker, droppedworkers []pkgutils.Worker) { - // prepare next channels - defaultRoutes, defaultNames := pkgutils.GetRoutes(defaultWorkers) - droppedRoutes, droppedNames := pkgutils.GetRoutes(droppedworkers) - - // prepare enabled transformers - transforms := transformers.NewTransforms(&d.config.IngoingTransformers, d.logger, d.name, defaultRoutes, 0) - - // start goroutine to count dropped messsages - go d.MonitorLoggers() - - // read incoming dns message - d.LogInfo("waiting dns message to process...") -RUN_LOOP: - for { - select { - case cfg := <-d.ConfigChan: - d.config = cfg - transforms.ReloadConfig(&cfg.IngoingTransformers) - - case <-d.stopRun: - transforms.Reset() - d.doneRun <- true - break RUN_LOOP - - case dm, opened := <-d.recvFrom: - if !opened { - d.LogInfo("channel closed, exit") - return - } - - // init dns message with additionnals parts - transforms.InitDNSMessageFormat(&dm) - - // compute timestamp - ts := time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)) - dm.DNSTap.Timestamp = ts.UnixNano() - dm.DNSTap.TimestampRFC3339 = ts.UTC().Format(time.RFC3339Nano) - - // decode the dns payload - dnsHeader, err := dnsutils.DecodeDNS(dm.DNS.Payload) - if err != nil { - dm.DNS.MalformedPacket = true - d.LogError("dns parser malformed packet: %s - %v+", err, dm) - } - - // dns reply ? - if dnsHeader.Qr == 1 { - dm.DNSTap.Operation = "CLIENT_RESPONSE" - dm.DNS.Type = dnsutils.DNSReply - qip := dm.NetworkInfo.QueryIP - qport := dm.NetworkInfo.QueryPort - dm.NetworkInfo.QueryIP = dm.NetworkInfo.ResponseIP - dm.NetworkInfo.QueryPort = dm.NetworkInfo.ResponsePort - dm.NetworkInfo.ResponseIP = qip - dm.NetworkInfo.ResponsePort = qport - } else { - dm.DNS.Type = dnsutils.DNSQuery - dm.DNSTap.Operation = dnsutils.DNSTapClientQuery - } - - if err = dnsutils.DecodePayload(&dm, &dnsHeader, d.config); err != nil { - d.LogError("%v - %v", err, dm) - } - - if dm.DNS.MalformedPacket { - if d.config.Global.Trace.LogMalformed { - d.LogInfo("payload: %v", dm.DNS.Payload) - } - } - - // apply all enabled transformers - if transforms.ProcessMessage(&dm) == transformers.ReturnDrop { - for i := range droppedRoutes { - select { - case droppedRoutes[i] <- dm: // Successful send to logger channel - default: - d.dropped <- droppedNames[i] - } - } - continue - } - - // convert latency to human - dm.DNSTap.LatencySec = fmt.Sprintf("%.6f", dm.DNSTap.Latency) - - // dispatch dns message to all generators - for i := range defaultRoutes { - select { - case defaultRoutes[i] <- dm: // Successful send to logger channel - default: - d.dropped <- defaultNames[i] - } - } - - } - } - d.LogInfo("processing terminated") -} - -func (d *DNSProcessor) MonitorLoggers() { - watchInterval := 10 * time.Second - bufferFull := time.NewTimer(watchInterval) -FOLLOW_LOOP: - for { - select { - case <-d.stopMonitor: - close(d.dropped) - bufferFull.Stop() - d.doneMonitor <- true - break FOLLOW_LOOP - - case loggerName := <-d.dropped: - if _, ok := d.droppedCount[loggerName]; !ok { - d.droppedCount[loggerName] = 1 - } else { - d.droppedCount[loggerName]++ - } - - case <-bufferFull.C: - - for v, k := range d.droppedCount { - if k > 0 { - d.LogError("logger[%s] buffer is full, %d packet(s) dropped", v, k) - d.droppedCount[v] = 0 - } - } - bufferFull.Reset(watchInterval) - - } - } - d.LogInfo("monitor terminated") -} diff --git a/processors/dnstap.go b/processors/dnstap.go deleted file mode 100644 index 75712c71..00000000 --- a/processors/dnstap.go +++ /dev/null @@ -1,399 +0,0 @@ -package processors - -import ( - "fmt" - "net" - "strconv" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/transformers" - "github.com/dmachard/go-dnstap-protobuf" - "github.com/dmachard/go-logger" - "google.golang.org/protobuf/proto" -) - -func GetFakeDNSTap(dnsquery []byte) *dnstap.Dnstap { - dtQuery := &dnstap.Dnstap{} - - dt := dnstap.Dnstap_MESSAGE - dtQuery.Identity = []byte("dnstap-generator") - dtQuery.Version = []byte("-") - dtQuery.Type = &dt - - mt := dnstap.Message_CLIENT_QUERY - sf := dnstap.SocketFamily_INET - sp := dnstap.SocketProtocol_UDP - - now := time.Now() - tsec := uint64(now.Unix()) - tnsec := uint32(uint64(now.UnixNano()) - uint64(now.Unix())*1e9) - - rport := uint32(53) - qport := uint32(5300) - - msg := &dnstap.Message{Type: &mt} - msg.SocketFamily = &sf - msg.SocketProtocol = &sp - msg.QueryAddress = net.ParseIP("127.0.0.1") - msg.QueryPort = &qport - msg.ResponseAddress = net.ParseIP("127.0.0.2") - msg.ResponsePort = &rport - - msg.QueryMessage = dnsquery - msg.QueryTimeSec = &tsec - msg.QueryTimeNsec = &tnsec - - dtQuery.Message = msg - return dtQuery -} - -type DNSTapProcessor struct { - ConnID int - doneRun chan bool - stopRun chan bool - doneMonitor chan bool - stopMonitor chan bool - recvFrom chan []byte - logger *logger.Logger - config *pkgconfig.Config - ConfigChan chan *pkgconfig.Config - name string - chanSize int - RoutingHandler pkgutils.RoutingHandler - dropped chan string - droppedCount map[string]int -} - -// func NewDNSTapProcessor(connID int, config *pkgconfig.Config, logger *logger.Logger, name string, size int) DNSTapProcessor { -func NewDNSTapProcessor( - connID int, - config *pkgconfig.Config, - logger *logger.Logger, - name string, - size int, -) DNSTapProcessor { - - logger.Info(pkgutils.PrefixLogProcessor+"[%s] dnstap - conn #%d - initialization...", name, connID) - - d := DNSTapProcessor{ - ConnID: connID, - doneMonitor: make(chan bool), - doneRun: make(chan bool), - stopMonitor: make(chan bool), - stopRun: make(chan bool), - recvFrom: make(chan []byte, size), - chanSize: size, - logger: logger, - config: config, - ConfigChan: make(chan *pkgconfig.Config), - name: name, - dropped: make(chan string), - droppedCount: map[string]int{}, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - } - - return d -} - -func (d *DNSTapProcessor) LogInfo(msg string, v ...interface{}) { - var log string - if d.ConnID == 0 { - log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] dnstap - ", d.name) - } else { - log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] dnstap - conn #%d - ", d.name, d.ConnID) - } - d.logger.Info(log+msg, v...) -} - -func (d *DNSTapProcessor) LogError(msg string, v ...interface{}) { - var log string - if d.ConnID == 0 { - log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] dnstap - ", d.name) - } else { - log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] dnstap - conn #%d - ", d.name, d.ConnID) - } - d.logger.Error(log+msg, v...) -} - -func (d *DNSTapProcessor) GetChannel() chan []byte { - return d.recvFrom -} - -func (d *DNSTapProcessor) Stop() { - d.LogInfo("stopping processor...") - d.RoutingHandler.Stop() - - d.LogInfo("stopping to process...") - d.stopRun <- true - <-d.doneRun - - d.LogInfo("stopping monitor...") - d.stopMonitor <- true - <-d.doneMonitor -} - -func (d *DNSTapProcessor) Run(defaultWorkers []pkgutils.Worker, droppedworkers []pkgutils.Worker) { - dt := &dnstap.Dnstap{} - d.LogInfo(dt.String()) - edt := &dnsutils.ExtendedDnstap{} - - // prepare next channels - defaultRoutes, defaultNames := pkgutils.GetRoutes(defaultWorkers) - droppedRoutes, droppedNames := pkgutils.GetRoutes(droppedworkers) - - // prepare enabled transformers - transforms := transformers.NewTransforms(&d.config.IngoingTransformers, d.logger, d.name, defaultRoutes, d.ConnID) - - // start goroutine to count dropped messsages - go d.MonitorLoggers() - - // read incoming dns message - d.LogInfo("waiting dns message to process...") -RUN_LOOP: - for { - select { - case cfg := <-d.ConfigChan: - d.config = cfg - transforms.ReloadConfig(&cfg.IngoingTransformers) - - case <-d.stopRun: - transforms.Reset() - d.doneRun <- true - break RUN_LOOP - - case data, opened := <-d.recvFrom: - if !opened { - d.LogInfo("channel closed, exit") - return - } - - err := proto.Unmarshal(data, dt) - if err != nil { - continue - } - - // init dns message - dm := dnsutils.DNSMessage{} - dm.Init() - - // init dns message with additionnals parts - transforms.InitDNSMessageFormat(&dm) - - identity := dt.GetIdentity() - if len(identity) > 0 { - dm.DNSTap.Identity = string(identity) - } - version := dt.GetVersion() - if len(version) > 0 { - dm.DNSTap.Version = string(version) - } - dm.DNSTap.Operation = dt.GetMessage().GetType().String() - - // extended extra field ? - if d.config.Collectors.Dnstap.ExtendedSupport { - err := proto.Unmarshal(dt.GetExtra(), edt) - if err != nil { - continue - } - - // get original extra value - originalExtra := string(edt.GetOriginalDnstapExtra()) - if len(originalExtra) > 0 { - dm.DNSTap.Extra = originalExtra - } - - // get atags - atags := edt.GetAtags() - if atags != nil { - dm.ATags = &dnsutils.TransformATags{ - Tags: atags.GetTags(), - } - } - - // get public suffix - norm := edt.GetNormalize() - if norm != nil { - dm.PublicSuffix = &dnsutils.TransformPublicSuffix{} - if len(norm.GetTld()) > 0 { - dm.PublicSuffix.QnamePublicSuffix = norm.GetTld() - } - if len(norm.GetEtldPlusOne()) > 0 { - dm.PublicSuffix.QnameEffectiveTLDPlusOne = norm.GetEtldPlusOne() - } - } - - // filtering - sampleRate := edt.GetFiltering() - if sampleRate != nil { - dm.Filtering = &dnsutils.TransformFiltering{} - dm.Filtering.SampleRate = int(sampleRate.SampleRate) - } - } else { - extra := string(dt.GetExtra()) - if len(extra) > 0 { - dm.DNSTap.Extra = extra - } - } - - if ipVersion, valid := netlib.IPVersion[dt.GetMessage().GetSocketFamily().String()]; valid { - dm.NetworkInfo.Family = ipVersion - } else { - dm.NetworkInfo.Family = pkgconfig.StrUnknown - } - - dm.NetworkInfo.Protocol = dt.GetMessage().GetSocketProtocol().String() - - // decode query address and port - queryip := dt.GetMessage().GetQueryAddress() - if len(queryip) > 0 { - dm.NetworkInfo.QueryIP = net.IP(queryip).String() - } - queryport := dt.GetMessage().GetQueryPort() - if queryport > 0 { - dm.NetworkInfo.QueryPort = strconv.FormatUint(uint64(queryport), 10) - } - - // decode response address and port - responseip := dt.GetMessage().GetResponseAddress() - if len(responseip) > 0 { - dm.NetworkInfo.ResponseIP = net.IP(responseip).String() - } - responseport := dt.GetMessage().GetResponsePort() - if responseport > 0 { - dm.NetworkInfo.ResponsePort = strconv.FormatUint(uint64(responseport), 10) - } - - // get dns payload and timestamp according to the type (query or response) - op := dnstap.Message_Type_value[dm.DNSTap.Operation] - if op%2 == 1 { - dnsPayload := dt.GetMessage().GetQueryMessage() - dm.DNS.Payload = dnsPayload - dm.DNS.Length = len(dnsPayload) - dm.DNS.Type = dnsutils.DNSQuery - dm.DNSTap.TimeSec = int(dt.GetMessage().GetQueryTimeSec()) - dm.DNSTap.TimeNsec = int(dt.GetMessage().GetQueryTimeNsec()) - } else { - dnsPayload := dt.GetMessage().GetResponseMessage() - dm.DNS.Payload = dnsPayload - dm.DNS.Length = len(dnsPayload) - dm.DNS.Type = dnsutils.DNSReply - dm.DNSTap.TimeSec = int(dt.GetMessage().GetResponseTimeSec()) - dm.DNSTap.TimeNsec = int(dt.GetMessage().GetResponseTimeNsec()) - } - - // policy - policyType := dt.GetMessage().GetPolicy().GetType() - if len(policyType) > 0 { - dm.DNSTap.PolicyType = policyType - } - - policyRule := string(dt.GetMessage().GetPolicy().GetRule()) - if len(policyRule) > 0 { - dm.DNSTap.PolicyRule = policyRule - } - - policyAction := dt.GetMessage().GetPolicy().GetAction().String() - if len(policyAction) > 0 { - dm.DNSTap.PolicyAction = policyAction - } - - policyMatch := dt.GetMessage().GetPolicy().GetMatch().String() - if len(policyMatch) > 0 { - dm.DNSTap.PolicyMatch = policyMatch - } - - policyValue := string(dt.GetMessage().GetPolicy().GetValue()) - if len(policyValue) > 0 { - dm.DNSTap.PolicyValue = policyValue - } - - // compute timestamp - ts := time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)) - dm.DNSTap.Timestamp = ts.UnixNano() - dm.DNSTap.TimestampRFC3339 = ts.UTC().Format(time.RFC3339Nano) - - if !d.config.Collectors.Dnstap.DisableDNSParser { - // decode the dns payload to get id, rcode and the number of question - // number of answer, ignore invalid packet - dnsHeader, err := dnsutils.DecodeDNS(dm.DNS.Payload) - if err != nil { - // parser error - dm.DNS.MalformedPacket = true - d.LogInfo("dns parser malformed packet: %s", err) - } - - if err = dnsutils.DecodePayload(&dm, &dnsHeader, d.config); err != nil { - // decoding error - if d.config.Global.Trace.LogMalformed { - d.LogError("%v - %v", err, dm) - d.LogError("dump invalid dns payload: %v", dm.DNS.Payload) - } - } - } - - // apply all enabled transformers - if transforms.ProcessMessage(&dm) == transformers.ReturnDrop { - for i := range droppedRoutes { - select { - case droppedRoutes[i] <- dm: // Successful send to logger channel - default: - d.dropped <- droppedNames[i] - } - } - continue - } - - // convert latency to human - dm.DNSTap.LatencySec = fmt.Sprintf("%.6f", dm.DNSTap.Latency) - - // dispatch dns message to connected routes - for i := range defaultRoutes { - select { - case defaultRoutes[i] <- dm: // Successful send to logger channel - default: - d.dropped <- defaultNames[i] - } - } - - } - } - - d.LogInfo("processing terminated") -} - -func (d *DNSTapProcessor) MonitorLoggers() { - watchInterval := 10 * time.Second - bufferFull := time.NewTimer(watchInterval) -MONITOR_LOOP: - for { - select { - case <-d.stopMonitor: - close(d.dropped) - bufferFull.Stop() - d.doneMonitor <- true - break MONITOR_LOOP - - case loggerName := <-d.dropped: - if _, ok := d.droppedCount[loggerName]; !ok { - d.droppedCount[loggerName] = 1 - } else { - d.droppedCount[loggerName]++ - } - - case <-bufferFull.C: - for v, k := range d.droppedCount { - if k > 0 { - d.LogError("logger[%s] buffer is full, %d packet(s) dropped", v, k) - d.droppedCount[v] = 0 - } - } - bufferFull.Reset(watchInterval) - - } - } - d.LogInfo("monitor terminated") -} diff --git a/processors/dnstap_test.go b/processors/dnstap_test.go deleted file mode 100644 index 442236df..00000000 --- a/processors/dnstap_test.go +++ /dev/null @@ -1,352 +0,0 @@ -package processors - -import ( - "bytes" - "fmt" - "regexp" - "testing" - "time" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnstap-protobuf" - "github.com/dmachard/go-logger" - "github.com/miekg/dns" - "google.golang.org/protobuf/proto" -) - -func Test_DnstapProcessor(t *testing.T) { - logger := logger.New(true) - var o bytes.Buffer - logger.SetOutput(&o) - - // init the dnstap consumer - consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), logger, "test", 512) - - // prepare dns query - dnsmsg := new(dns.Msg) - dnsmsg.SetQuestion(ExpectedQname+".", dns.TypeA) - dnsquestion, _ := dnsmsg.Pack() - - // prepare dnstap - dt := &dnstap.Dnstap{} - dt.Type = dnstap.Dnstap_Type.Enum(1) - - dt.Message = &dnstap.Message{} - dt.Message.Type = dnstap.Message_Type.Enum(5) - dt.Message.QueryMessage = dnsquestion - - data, _ := proto.Marshal(dt) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - dm := <-fl.GetInputChannel() - if dm.DNS.Qname != ExpectedQname { - t.Errorf("invalid qname in dns message: %s", dm.DNS.Qname) - } -} - -func Test_DnstapProcessor_MalformedDnsHeader(t *testing.T) { - logger := logger.New(true) - var o bytes.Buffer - logger.SetOutput(&o) - - // init the dnstap consumer - consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), logger, "test", 512) - // chanTo := make(chan dnsutils.DNSMessage, 512) - - // prepare dns query - dnsmsg := new(dns.Msg) - dnsmsg.SetQuestion("www.google.fr.", dns.TypeA) - dnsquestion, _ := dnsmsg.Pack() - - // prepare dnstap - dt := &dnstap.Dnstap{} - dt.Type = dnstap.Dnstap_Type.Enum(1) - - dt.Message = &dnstap.Message{} - dt.Message.Type = dnstap.Message_Type.Enum(5) - dt.Message.QueryMessage = dnsquestion[:4] - - data, _ := proto.Marshal(dt) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // go consumer.Run([]chan dnsutils.DNSMessage{chanTo}, []string{"test"}) - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - dm := <-fl.GetInputChannel() - if dm.DNS.MalformedPacket == false { - t.Errorf("malformed packet not detected") - } -} - -func Test_DnstapProcessor_MalformedDnsQuestion(t *testing.T) { - logger := logger.New(true) - var o bytes.Buffer - logger.SetOutput(&o) - - // init the dnstap consumer - consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), logger, "test", 512) - // chanTo := make(chan dnsutils.DNSMessage, 512) - - // prepare dns query - dnsquestion := []byte{88, 27, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 15, 100, 110, 115, 116, 97, 112, - 99, 111, 108, 108, 101, 99, 116, 111, 114, 4, 116, 101, 115, 116, 0} - - // prepare dnstap - dt := &dnstap.Dnstap{} - dt.Type = dnstap.Dnstap_Type.Enum(1) - - dt.Message = &dnstap.Message{} - dt.Message.Type = dnstap.Message_Type.Enum(5) - dt.Message.QueryMessage = dnsquestion - - data, _ := proto.Marshal(dt) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // go consumer.Run([]chan dnsutils.DNSMessage{chanTo}, []string{"test"}) - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - dm := <-fl.GetInputChannel() - if dm.DNS.MalformedPacket == false { - t.Errorf("malformed packet not detected") - } -} - -func Test_DnstapProcessor_MalformedDnsAnswer(t *testing.T) { - logger := logger.New(true) - var o bytes.Buffer - logger.SetOutput(&o) - - // init the dnstap consumer - consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), logger, "test", 512) - // chanTo := make(chan dnsutils.DNSMessage, 512) - - // prepare dns query - dnsanswer := []byte{46, 172, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 15, 100, 110, 115, 116, 97, 112, 99, 111, 108, 108, 101, 99, 116, - 111, 114, 4, 116, 101, 115, 116, 0, 0, 1, 0, 1, 15, 100, 110, 115, 116, 97, 112, 99, 111, 108, 108, 101, 99, 116, - 111, 114, 4, 116, 101, 115, 116, 0, 0, 1, 0, 1, 0, 0, 14, 16, 0, 4, 127, 0} - - // prepare dnstap - dt := &dnstap.Dnstap{} - dt.Type = dnstap.Dnstap_Type.Enum(1) - - dt.Message = &dnstap.Message{} - dt.Message.Type = dnstap.Message_Type.Enum(6) - dt.Message.ResponseMessage = dnsanswer - - data, _ := proto.Marshal(dt) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // go consumer.Run([]chan dnsutils.DNSMessage{chanTo}, []string{"test"}) - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - dm := <-fl.GetInputChannel() - if dm.DNS.MalformedPacket == false { - t.Errorf("malformed packet not detected") - } -} - -func Test_DnstapProcessor_DisableDNSParser(t *testing.T) { - logger := logger.New(true) - var o bytes.Buffer - logger.SetOutput(&o) - - // init the dnstap consumer - cfg := pkgconfig.GetFakeConfig() - cfg.Collectors.Dnstap.DisableDNSParser = true - - consumer := NewDNSTapProcessor(0, "peertest", cfg, logger, "test", 512) - - // prepare dns query - dnsmsg := new(dns.Msg) - dnsmsg.SetQuestion("www.google.fr.", dns.TypeA) - dnsquestion, _ := dnsmsg.Pack() - - // prepare dnstap - dt := &dnstap.Dnstap{} - dt.Type = dnstap.Dnstap_Type.Enum(1) - - dt.Message = &dnstap.Message{} - dt.Message.Type = dnstap.Message_Type.Enum(5) - dt.Message.QueryMessage = dnsquestion - - data, _ := proto.Marshal(dt) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - dm := <-fl.GetInputChannel() - if dm.DNS.ID != 0 { - t.Errorf("DNS ID should be equal to zero: %d", dm.DNS.ID) - } -} - -// test to decode the extended part -func Test_DnstapProcessor_Extended(t *testing.T) { - logger := logger.New(true) - var o bytes.Buffer - logger.SetOutput(&o) - - // init the dnstap consumer - cfg := pkgconfig.GetFakeConfig() - cfg.Collectors.Dnstap.ExtendedSupport = true - - consumer := NewDNSTapProcessor(0, "peertest", cfg, logger, "test", 512) - - // prepare dns query - dnsmsg := new(dns.Msg) - dnsmsg.SetQuestion("www.google.fr.", dns.TypeA) - dnsquestion, _ := dnsmsg.Pack() - - // prepare dnstap - dt := &dnstap.Dnstap{} - dt.Type = dnstap.Dnstap_Type.Enum(1) - - dt.Message = &dnstap.Message{} - dt.Message.Type = dnstap.Message_Type.Enum(5) - dt.Message.QueryMessage = dnsquestion - - edt := &dnsutils.ExtendedDnstap{} - edt.Atags = &dnsutils.ExtendedATags{ - Tags: []string{"atags:value"}, - } - edt.OriginalDnstapExtra = []byte("originalextrafield") - edt.Normalize = &dnsutils.ExtendedNormalize{ - Tld: "org", - EtldPlusOne: "dnscollector.org", - } - edt.Filtering = &dnsutils.ExtendedFiltering{ - SampleRate: 30, - } - edtData, _ := proto.Marshal(edt) - dt.Extra = edtData - - data, _ := proto.Marshal(dt) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packet to consumer - consumer.GetChannel() <- data - - // read dns message from dnstap consumer - dm := <-fl.GetInputChannel() - if dm.DNSTap.Extra != "originalextrafield" { - t.Errorf("invalid extra field: %s", dm.DNSTap.Extra) - } - if dm.ATags.Tags[0] != "atags:value" { - t.Errorf("invalid atags: %s", dm.ATags.Tags[0]) - } - if dm.PublicSuffix.QnameEffectiveTLDPlusOne != "dnscollector.org" { - t.Errorf("invalid etld+1: %s", dm.PublicSuffix.QnameEffectiveTLDPlusOne) - } - if dm.PublicSuffix.QnamePublicSuffix != "org" { - t.Errorf("invalid tld: %s", dm.PublicSuffix.QnamePublicSuffix) - } - if dm.Filtering.SampleRate != 30 { - t.Errorf("invalid sample rate: %d", dm.Filtering.SampleRate) - } -} - -// test for issue https://github.com/dmachard/go-dnscollector/issues/568 -func Test_DnstapProcessor_BufferLoggerIsFull(t *testing.T) { - // redirect stdout output to bytes buffer - logsChan := make(chan logger.LogEntry, 10) - lg := logger.New(true) - lg.SetOutputChannel((logsChan)) - - // init the dnstap consumer - consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetFakeConfig(), lg, "test", 512) - - // prepare dns query - dnsmsg := new(dns.Msg) - dnsmsg.SetQuestion(ExpectedQname+".", dns.TypeA) - dnsquestion, _ := dnsmsg.Pack() - - // prepare dnstap - dt := &dnstap.Dnstap{} - dt.Type = dnstap.Dnstap_Type.Enum(1) - - dt.Message = &dnstap.Message{} - dt.Message.Type = dnstap.Message_Type.Enum(5) - dt.Message.QueryMessage = dnsquestion - - data, _ := proto.Marshal(dt) - - // run the consumer with a fake logger - fl := pkgutils.NewFakeLoggerWithBufferSize(1) - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) - - // add packets to consumer - for i := 0; i < 512; i++ { - consumer.GetChannel() <- data - } - - // waiting monitor to run in consumer - time.Sleep(12 * time.Second) - - for entry := range logsChan { - fmt.Println(entry) - pattern := regexp.MustCompile(ExpectedBufferMsg511) - if pattern.MatchString(entry.Message) { - break - } - } - - // read dns message from dnstap consumer - dm := <-fl.GetInputChannel() - if dm.DNS.Qname != ExpectedQname { - t.Errorf("invalid qname in dns message: %s", dm.DNS.Qname) - } - - // send second shot of packets to consumer - for i := 0; i < 1024; i++ { - consumer.GetChannel() <- data - } - - // waiting monitor to run in consumer - time.Sleep(12 * time.Second) - - for entry := range logsChan { - fmt.Println(entry) - pattern := regexp.MustCompile(ExpectedBufferMsg1023) - if pattern.MatchString(entry.Message) { - break - } - } - - // read dns message from dnstap consumer - dm2 := <-fl.GetInputChannel() - if dm2.DNS.Qname != ExpectedQname { - t.Errorf("invalid qname in second dns message: %s", dm2.DNS.Qname) - } -} diff --git a/telemetry/prometheus.go b/telemetry/prometheus.go new file mode 100644 index 00000000..2c9ff99a --- /dev/null +++ b/telemetry/prometheus.go @@ -0,0 +1,252 @@ +package telemetry + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "net/http" + "os" + "regexp" + "sync" + "time" + + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors/version" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +/* +OpenMetrics and the Prometheus exposition format require the metric name +to consist only of alphanumericals and "_", ":" and they must not start +with digits. +*/ +var metricNameRegex = regexp.MustCompile(`_*[^0-9A-Za-z_]+_*`) + +func SanitizeMetricName(metricName string) string { + return metricNameRegex.ReplaceAllString(metricName, "_") +} + +type WorkerStats struct { + Name string + TotalIngress int + TotalEgress int + TotalForwardedPolicy int + TotalDroppedPolicy int + TotalDiscarded int +} + +type PrometheusCollector struct { + sync.Mutex + config *pkgconfig.Config + metrics map[string]*prometheus.Desc + Record chan WorkerStats + data map[string]WorkerStats // To store the worker stats + stop chan struct{} // Channel to signal stopping + promPrefix string +} + +func NewPrometheusCollector(config *pkgconfig.Config) *PrometheusCollector { + t := &PrometheusCollector{ + config: config, + Record: make(chan WorkerStats), + data: make(map[string]WorkerStats), + stop: make(chan struct{}), + } + + t.promPrefix = SanitizeMetricName(config.Global.Telemetry.PromPrefix) + + t.metrics = map[string]*prometheus.Desc{ + "worker_ingress_total": prometheus.NewDesc( + fmt.Sprintf("%s_worker_ingress_traffic_total", t.promPrefix), + "Ingress traffic associated to each worker", []string{"worker"}, nil), + "worker_egress_total": prometheus.NewDesc( + fmt.Sprintf("%s_worker_egress_traffic_total", t.promPrefix), + "Egress traffic associated to each worker", []string{"worker"}, nil), + "worker_discarded_total": prometheus.NewDesc( + fmt.Sprintf("%s_worker_discarded_traffic_total", t.promPrefix), + "Discarded traffic associated to each worker", []string{"worker"}, nil), + "policy_forwarded_total": prometheus.NewDesc( + fmt.Sprintf("%s_policy_forwarded_total", t.promPrefix), + "Total number of forwarded policy", []string{"worker"}, nil), + "policy_dropped_total": prometheus.NewDesc( + fmt.Sprintf("%s_policy_dropped_total", t.promPrefix), + "Total number of dropped policy", []string{"worker"}, nil), + } + return t +} + +func (t *PrometheusCollector) UpdateStats() { + for { + select { + case ws := <-t.Record: + t.Lock() + if _, ok := t.data[ws.Name]; !ok { + t.data[ws.Name] = ws + } else { + updatedWs := t.data[ws.Name] + updatedWs.TotalForwardedPolicy += ws.TotalForwardedPolicy + updatedWs.TotalDroppedPolicy += ws.TotalDroppedPolicy + updatedWs.TotalIngress += ws.TotalIngress + updatedWs.TotalEgress += ws.TotalEgress + updatedWs.TotalDiscarded += ws.TotalDiscarded + t.data[ws.Name] = updatedWs + } + t.Unlock() + case <-t.stop: + // Received stop signal, exit the goroutine + return + } + } +} +func (t *PrometheusCollector) Collect(ch chan<- prometheus.Metric) { + t.Lock() + defer t.Unlock() + + // Collect the forwarded and dropped metrics for each worker + for _, ws := range t.data { + ch <- prometheus.MustNewConstMetric( + t.metrics["worker_discarded_total"], + prometheus.CounterValue, + float64(ws.TotalDiscarded), + ws.Name, + ) + ch <- prometheus.MustNewConstMetric( + t.metrics["worker_ingress_total"], + prometheus.CounterValue, + float64(ws.TotalIngress), + ws.Name, + ) + ch <- prometheus.MustNewConstMetric( + t.metrics["worker_egress_total"], + prometheus.CounterValue, + float64(ws.TotalEgress), + ws.Name, + ) + ch <- prometheus.MustNewConstMetric( + t.metrics["policy_forwarded_total"], + prometheus.CounterValue, + float64(ws.TotalForwardedPolicy), + ws.Name, + ) + ch <- prometheus.MustNewConstMetric( + t.metrics["policy_dropped_total"], + prometheus.CounterValue, + float64(ws.TotalDroppedPolicy), + ws.Name, + ) + } +} + +func (t *PrometheusCollector) Describe(ch chan<- *prometheus.Desc) { + for _, m := range t.metrics { + ch <- m + } +} + +func (t *PrometheusCollector) Stop() { + close(t.stop) // Signal the stop channel to stop the goroutine +} + +func InitTelemetryServer(config *pkgconfig.Config, logger *logger.Logger) (*http.Server, *PrometheusCollector, chan error) { + // channel for error + errChan := make(chan error) + + // Prometheus collectors + metrics := NewPrometheusCollector(config) + + // HTTP server + promServer := &http.Server{ + Addr: config.Global.Telemetry.WebListen, + ReadHeaderTimeout: 5 * time.Second, + } + + if config.Global.Telemetry.Enabled { + go func() { + // start metrics + go metrics.UpdateStats() + + // register metrics + prometheus.MustRegister(metrics) + prometheus.MustRegister(version.NewCollector(config.Global.Telemetry.PromPrefix)) + + // handle /metrics + http.Handle(config.Global.Telemetry.WebPath, promhttp.Handler()) + + // handle http error + http.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) { + _, err := w.Write([]byte(` + DNScollector Exporter + +

DNScollector Exporter

+

Metrics

+ + `)) + if err != nil { + errChan <- err + } + }) + + if config.Global.Telemetry.TLSSupport { + // Load server certificate and key + cert, err := tls.LoadX509KeyPair(config.Global.Telemetry.TLSCertFile, config.Global.Telemetry.TLSKeyFile) + if err != nil { + errChan <- fmt.Errorf("failed to load server certificate and key: %w", err) + return + } + + // Load client CA certificate + clientCACert, err := os.ReadFile(config.Global.Telemetry.ClientCAFile) + if err != nil { + errChan <- fmt.Errorf("failed to load client CA certificate: %w", err) + return + } + clientCAs := x509.NewCertPool() + clientCAs.AppendCertsFromPEM(clientCACert) + + // Configure TLS + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientCAs: clientCAs, + ClientAuth: tls.RequireAndVerifyClientCert, + } + + // Update the promServer with TLS configuration and handler + promServer.TLSConfig = tlsConfig + } + + if config.Global.Telemetry.BasicAuthEnable { + promServer.Handler = basicAuthMiddleware(http.DefaultServeMux, config.Global.Telemetry.BasicAuthLogin, config.Global.Telemetry.BasicAuthPwd) + } else { + promServer.Handler = http.DefaultServeMux + } + + // start https server + if config.Global.Telemetry.TLSSupport { + if err := promServer.ListenAndServeTLS("", ""); err != nil && err != http.ErrServerClosed { + errChan <- err + } + } else { + if err := promServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { + errChan <- err + } + } + }() + } + + return promServer, metrics, errChan +} + +// BasicAuth middleware +func basicAuthMiddleware(next http.Handler, username, password string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + u, p, ok := r.BasicAuth() + if !ok || u != username || p != password { + w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) + http.Error(w, "Unauthorized", http.StatusUnauthorized) + return + } + next.ServeHTTP(w, r) + }) +} diff --git a/telemetry/prometheus_test.go b/telemetry/prometheus_test.go new file mode 100644 index 00000000..b618ed82 --- /dev/null +++ b/telemetry/prometheus_test.go @@ -0,0 +1,50 @@ +package telemetry + +import ( + "testing" + + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/stretchr/testify/assert" +) + +func TestTelemetry_SanitizeMetricName(t *testing.T) { + testCases := []struct { + input string + expected string + }{ + {"metric:name", "metric_name"}, + {"metric-name", "metric_name"}, + {"metric.name", "metric_name"}, + } + + for _, tc := range testCases { + actual := SanitizeMetricName(tc.input) + assert.Equal(t, tc.expected, actual) + } +} + +func TestTelemetry_PrometheusCollectorUpdateStats(t *testing.T) { + config := pkgconfig.Config{} + + collector := NewPrometheusCollector(&config) + + // Create a sample WorkerStats + ws := WorkerStats{ + Name: "worker1", + TotalIngress: 10, TotalEgress: 5, + TotalForwardedPolicy: 2, TotalDroppedPolicy: 1, TotalDiscarded: 3, + } + + // Send the stats to the collector + go collector.UpdateStats() + collector.Record <- ws + + // Verify that the stats were updated + storedWS, ok := collector.data["worker1"] + assert.True(t, ok, "Worker stats should be present in the collector") + assert.Equal(t, ws.TotalIngress, storedWS.TotalIngress) + assert.Equal(t, ws.TotalEgress, storedWS.TotalEgress) + assert.Equal(t, ws.TotalForwardedPolicy, storedWS.TotalForwardedPolicy) + assert.Equal(t, ws.TotalDroppedPolicy, storedWS.TotalDroppedPolicy) + assert.Equal(t, ws.TotalDiscarded, storedWS.TotalDiscarded) +} diff --git a/tests/bench.py b/tests/bench.py index 74b198c6..c9286e23 100644 --- a/tests/bench.py +++ b/tests/bench.py @@ -54,7 +54,7 @@ def test_stdout_recv(self): async def run(): # run collector is_listening = asyncio.Future() - args = ( "./go-dnscollector", "-config", "./testsdata/config_bench.yml",) + args = ( "./go-dnscollector", "-config", "./tests/testsdata/config_bench.yml",) transport_collector, protocol_collector = await self.loop.subprocess_exec(lambda: CollectorProc(is_listening), *args, stdout=asyncio.subprocess.PIPE) diff --git a/tests/clientquery_dnstaptcp.py b/tests/clientquery_dnstaptcp.py index 41bb8756..0e96b064 100644 --- a/tests/clientquery_dnstaptcp.py +++ b/tests/clientquery_dnstaptcp.py @@ -60,7 +60,7 @@ def connection_made(self, transport): def pipe_data_received(self, fd, data): print(data.decode(), end="") - if b"is listening on" in data: + if b"listening on" in data: self.is_listening.set_result(True) if b"CLIENT_QUERY NOERROR" in data: @@ -83,7 +83,7 @@ async def run(): # run collector is_listening = asyncio.Future() is_clientquery = asyncio.Future() - args = ( "./go-dnscollector", "-config", "./testsdata/config_stdout_dnstaptcp.yml",) + args = ( "./go-dnscollector", "-config", "./tests/testsdata/config_stdout_dnstaptcp.yml",) transport_collector, protocol_collector = await self.loop.subprocess_exec(lambda: ProcessProtocol(is_listening, is_clientquery), *args, stdout=asyncio.subprocess.PIPE) diff --git a/tests/clientquery_dnstapunix.py b/tests/clientquery_dnstapunix.py index 763b212c..65ee836c 100644 --- a/tests/clientquery_dnstapunix.py +++ b/tests/clientquery_dnstapunix.py @@ -60,7 +60,7 @@ def connection_made(self, transport): def pipe_data_received(self, fd, data): print(data.decode(), end="") - if b"is listening on" in data: + if b"listening on" in data: self.is_listening.set_result(True) if b"CLIENT_QUERY NOERROR" in data: @@ -83,7 +83,7 @@ async def run(): # run collector is_listening = asyncio.Future() is_clientquery = asyncio.Future() - args = ( "./go-dnscollector", "-config", "./testsdata/config_stdout_dnstapunix.yml",) + args = ( "./go-dnscollector", "-config", "./tests/testsdata/config_stdout_dnstapunix.yml",) transport_collector, protocol_collector = await self.loop.subprocess_exec(lambda: ProcessProtocol(is_listening, is_clientquery), *args, stdout=asyncio.subprocess.PIPE) diff --git a/tests/config.py b/tests/config.py index 2ba338bb..8ed17b8c 100644 --- a/tests/config.py +++ b/tests/config.py @@ -16,7 +16,7 @@ def connection_made(self, transport): def pipe_data_received(self, fd, data): print(data.decode(), end="") - if b"main - starting" in data: + if b"main - running" in data: self.is_configvalid.set_result(True) self.kill() @@ -38,7 +38,7 @@ def test1_valid(self): async def run(): # run collector is_configvalid= asyncio.Future() - args = ( "./go-dnscollector", "-config", "./testsdata/config_verbose.yml",) + args = ( "./go-dnscollector", "-config", "./tests/testsdata/config_verbose.yml",) transport_collector, protocol_collector = await self.loop.subprocess_exec(lambda: ProcessProtocol(is_configvalid, None), *args, stdout=asyncio.subprocess.PIPE) @@ -60,7 +60,7 @@ def test2_invalid(self): async def run(): # run collector is_configinvalid= asyncio.Future() - args = ( "./go-dnscollector", "-config", "./testsdata/config_invalid.yml",) + args = ( "./go-dnscollector", "-config", "./tests/testsdata/config_invalid.yml",) transport_collector, protocol_collector = await self.loop.subprocess_exec(lambda: ProcessProtocol(None, is_configinvalid), *args, stdout=asyncio.subprocess.PIPE) diff --git a/tests/dnsquery_dnstapdoq.py b/tests/dnsquery_dnstapdoq.py new file mode 100644 index 00000000..479ccfce --- /dev/null +++ b/tests/dnsquery_dnstapdoq.py @@ -0,0 +1,109 @@ +import unittest +import asyncio +import requests +import re + +class CollectorProc(asyncio.SubprocessProtocol): + def __init__(self, is_ready, is_clientresponse): + self.is_ready = is_ready + self.is_clientresponse = is_clientresponse + self.transport = None + self.proc = None + + def connection_made(self, transport): + self.transport = transport + self.proc = transport.get_extra_info('subprocess') + + def pipe_data_received(self, fd, data): + print(data.decode(), end="") + + if b"receiver framestream initialized" in data: + self.is_ready.set_result(True) + + if not self.is_clientresponse.done(): + if b" DOQ " in data: + self.is_clientresponse.set_result(True) + self.kill() + + def kill(self): + try: + self.proc.kill() + except ProcessLookupError: pass + +class DoQClient(asyncio.SubprocessProtocol): + def __init__(self, exit_future): + self.exit_future = exit_future + self.transport = None + self.proc = None + + def connection_made(self, transport): + self.transport = transport + self.proc = transport.get_extra_info('subprocess') + + def pipe_data_received(self, fd, data): + print(data.decode(), end="") + + def process_exited(self): + self.exit_future.set_result(True) + + def kill(self): + try: + self.proc.kill() + except ProcessLookupError: pass + +class TestDnstap(unittest.TestCase): + def setUp(self): + self.loop = asyncio.get_event_loop() + + def test_stdout_recv(self): + """test to receive dnstap DOQ response in stdou""" + async def run(): + # run collector + is_ready = asyncio.Future() + is_clientresponse = asyncio.Future() + args = ( "./go-dnscollector", "-config", "./tests/testsdata/config_stdout_dnstaptcp.yml",) + transport_collector, protocol_collector = await self.loop.subprocess_exec(lambda: CollectorProc(is_ready, is_clientresponse), + *args, stdout=asyncio.subprocess.PIPE) + + # make doq resolution + for i in range(10): + is_existed = asyncio.Future() + args = ( "./q", "www.github.com", "A", "@quic://127.0.0.1:5853", "--tls-insecure-skip-verify") + transport_client, protocol_client = await self.loop.subprocess_exec(lambda: DoQClient(is_existed), *args, stdout=asyncio.subprocess.PIPE) + await is_existed + + protocol_client.kill() + transport_client.close() + + # waiting for connection between collector and dns server is ok + try: + await asyncio.wait_for(is_ready, timeout=5.0) + except asyncio.TimeoutError: + protocol_collector.kill() + transport_collector.close() + self.fail("collector framestream timeout") + + # make again doq resolution + for i in range(10): + is_existed = asyncio.Future() + args = ( "./q", "www.github.com", "A", "@quic://127.0.0.1:5853", "--tls-insecure-skip-verify") + transport_client, protocol_client = await self.loop.subprocess_exec(lambda: DoQClient(is_existed), *args, stdout=asyncio.subprocess.PIPE) + await is_existed + + protocol_client.kill() + transport_client.close() + + # wait client response on collector + try: + await asyncio.wait_for(is_clientresponse, timeout=30.0) + except asyncio.TimeoutError: + protocol_collector.kill() + transport_collector.close() + self.fail("dnstap client response expected") + + # Shutdown all + protocol_collector.kill() + transport_collector.close() + + + self.loop.run_until_complete(run()) diff --git a/tests/dnsquery_dnstaptcp.py b/tests/dnsquery_dnstaptcp.py index 2f5e54b0..670eeae7 100644 --- a/tests/dnsquery_dnstaptcp.py +++ b/tests/dnsquery_dnstaptcp.py @@ -46,7 +46,7 @@ async def run(): # run collector is_ready = asyncio.Future() is_clientresponse = asyncio.Future() - args = ( "./go-dnscollector", "-config", "./testsdata/config_stdout_dnstaptcp.yml",) + args = ( "./go-dnscollector", "-config", "./tests/testsdata/config_stdout_dnstaptcp.yml",) transport_collector, protocol_collector = await self.loop.subprocess_exec(lambda: ProcessProtocol(is_ready, is_clientresponse), *args, stdout=asyncio.subprocess.PIPE) diff --git a/tests/dnsquery_dnstaptls.py b/tests/dnsquery_dnstaptls.py index 427bba3a..6baaa70d 100644 --- a/tests/dnsquery_dnstaptls.py +++ b/tests/dnsquery_dnstaptls.py @@ -46,7 +46,7 @@ async def run(): # run collector is_ready = asyncio.Future() is_clientresponse = asyncio.Future() - args = ( "./go-dnscollector", "-config", "./testsdata/config_stdout_dnstaptls.yml",) + args = ( "./go-dnscollector", "-config", "./tests/testsdata/config_stdout_dnstaptls.yml",) transport_collector, protocol_collector = await self.loop.subprocess_exec(lambda: ProcessProtocol(is_ready, is_clientresponse), *args, stdout=asyncio.subprocess.PIPE) diff --git a/tests/dnsquery_dnstapunix.py b/tests/dnsquery_dnstapunix.py index 865cc7a2..03a4e284 100644 --- a/tests/dnsquery_dnstapunix.py +++ b/tests/dnsquery_dnstapunix.py @@ -50,7 +50,7 @@ async def run(): # run collector is_ready = asyncio.Future() is_clientresponse = asyncio.Future() - args = ( "sudo", "-u", COLLECTOR_USER, "-s", "./go-dnscollector", "-config", "./testsdata/config_stdout_dnstapunix.yml",) + args = ( "sudo", "-u", COLLECTOR_USER, "-s", "./go-dnscollector", "-config", "./tests/testsdata/config_stdout_dnstapunix.yml",) transport_collector, protocol_collector = await self.loop.subprocess_exec(lambda: ProcessProtocol(is_ready, is_clientresponse), *args, stdout=asyncio.subprocess.PIPE) diff --git a/tests/dnsquery_powerdns.py b/tests/dnsquery_powerdns.py index 142cade3..730dad6c 100644 --- a/tests/dnsquery_powerdns.py +++ b/tests/dnsquery_powerdns.py @@ -46,7 +46,7 @@ async def run(): # run collector is_ready = asyncio.Future() is_clientresponse = asyncio.Future() - args = ( "./go-dnscollector", "-config", "./testsdata/config_stdout_powerdns.yml",) + args = ( "./go-dnscollector", "-config", "./tests/testsdata/config_stdout_powerdns.yml",) transport_collector, protocol_collector = await self.loop.subprocess_exec(lambda: ProcessProtocol(is_ready, is_clientresponse), *args, stdout=asyncio.subprocess.PIPE) diff --git a/testsdata/GeoLite2-ASN.mmdb b/tests/testsdata/GeoLite2-ASN.mmdb similarity index 100% rename from testsdata/GeoLite2-ASN.mmdb rename to tests/testsdata/GeoLite2-ASN.mmdb diff --git a/testsdata/GeoLite2-Country.mmdb b/tests/testsdata/GeoLite2-Country.mmdb similarity index 100% rename from testsdata/GeoLite2-Country.mmdb rename to tests/testsdata/GeoLite2-Country.mmdb diff --git a/testsdata/certs/ca.crt b/tests/testsdata/certs/ca.crt similarity index 100% rename from testsdata/certs/ca.crt rename to tests/testsdata/certs/ca.crt diff --git a/testsdata/certs/ca.key b/tests/testsdata/certs/ca.key similarity index 100% rename from testsdata/certs/ca.key rename to tests/testsdata/certs/ca.key diff --git a/testsdata/certs/client.crt b/tests/testsdata/certs/client.crt similarity index 100% rename from testsdata/certs/client.crt rename to tests/testsdata/certs/client.crt diff --git a/testsdata/certs/client.key b/tests/testsdata/certs/client.key similarity index 100% rename from testsdata/certs/client.key rename to tests/testsdata/certs/client.key diff --git a/testsdata/certs/server.crt b/tests/testsdata/certs/server.crt similarity index 100% rename from testsdata/certs/server.crt rename to tests/testsdata/certs/server.crt diff --git a/testsdata/certs/server.key b/tests/testsdata/certs/server.key similarity index 100% rename from testsdata/certs/server.key rename to tests/testsdata/certs/server.key diff --git a/tests/testsdata/config_bench.yml b/tests/testsdata/config_bench.yml new file mode 100644 index 00000000..db9f538f --- /dev/null +++ b/tests/testsdata/config_bench.yml @@ -0,0 +1,20 @@ +global: + trace: + verbose: true + +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ web ] + dropped: [] + + - name: web + webserver: + listen-ip: 0.0.0.0 + listen-port: 8080 + top-max-items: 100 + basic-auth-login: admin + basic-auth-pwd: changeme \ No newline at end of file diff --git a/testsdata/config_invalid.yml b/tests/testsdata/config_invalid.yml similarity index 100% rename from testsdata/config_invalid.yml rename to tests/testsdata/config_invalid.yml diff --git a/tests/testsdata/config_metrics_dnstaptcp.yml b/tests/testsdata/config_metrics_dnstaptcp.yml new file mode 100644 index 00000000..be46f3e8 --- /dev/null +++ b/tests/testsdata/config_metrics_dnstaptcp.yml @@ -0,0 +1,24 @@ +global: + trace: + verbose: true + +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ console, api ] + dropped: [] + + - name: console + stdout: + mode: text + + - name: api + restapi: + listen-ip: 0.0.0.0 + listen-port: 8080 + top-n: 100 + basic-auth-login: admin + basic-auth-pwd: changeme diff --git a/docs/_examples/use-case-2.pipeline.yml b/tests/testsdata/config_prom.yml similarity index 52% rename from docs/_examples/use-case-2.pipeline.yml rename to tests/testsdata/config_prom.yml index f6e11cb5..eb828b5f 100644 --- a/docs/_examples/use-case-2.pipeline.yml +++ b/tests/testsdata/config_prom.yml @@ -1,6 +1,3 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000, -# and computes Prometheus metrics for analysis. - global: trace: verbose: true @@ -11,9 +8,10 @@ pipelines: listen-ip: 0.0.0.0 listen-port: 6000 routing-policy: - default: [ prom ] + forward: [ prom ] + dropped: [] - name: prom prometheus: listen-ip: 0.0.0.0 - listen-port: 8080 \ No newline at end of file + listen-port: 8081 \ No newline at end of file diff --git a/docs/_examples/use-case-21.pipeline.yml b/tests/testsdata/config_stdout_dnstaptcp.yml similarity index 51% rename from docs/_examples/use-case-21.pipeline.yml rename to tests/testsdata/config_stdout_dnstaptcp.yml index e5b4ad2b..f8bb7ebb 100644 --- a/docs/_examples/use-case-21.pipeline.yml +++ b/tests/testsdata/config_stdout_dnstaptcp.yml @@ -1,6 +1,3 @@ -# This configuration sets up DNS traffic monitoring through DNStap on port 6000; -# and log the console as PCAP format - global: trace: verbose: true @@ -11,8 +8,9 @@ pipelines: listen-ip: 0.0.0.0 listen-port: 6000 routing-policy: - default: [ console ] + forward: [ console ] + dropped: [] - name: console stdout: - mode: pcap \ No newline at end of file + mode: text \ No newline at end of file diff --git a/tests/testsdata/config_stdout_dnstaptls.yml b/tests/testsdata/config_stdout_dnstaptls.yml new file mode 100644 index 00000000..d8a73b5b --- /dev/null +++ b/tests/testsdata/config_stdout_dnstaptls.yml @@ -0,0 +1,19 @@ +global: + trace: + verbose: true + +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + tls-support: true + cert-file: "./tests/testsdata/dnscollector.crt" + key-file: "./tests/testsdata/dnscollector.key" + routing-policy: + forward: [ console ] + dropped: [] + + - name: console + stdout: + mode: text \ No newline at end of file diff --git a/tests/testsdata/config_stdout_dnstapunix.yml b/tests/testsdata/config_stdout_dnstapunix.yml new file mode 100644 index 00000000..bb2599f3 --- /dev/null +++ b/tests/testsdata/config_stdout_dnstapunix.yml @@ -0,0 +1,15 @@ +global: + trace: + verbose: true + +pipelines: + - name: tap + dnstap: + sock-path: /tmp/dnstap.sock + routing-policy: + forward: [ console ] + dropped: [] + + - name: console + stdout: + mode: text diff --git a/tests/testsdata/config_stdout_powerdns.yml b/tests/testsdata/config_stdout_powerdns.yml new file mode 100644 index 00000000..9fb5f849 --- /dev/null +++ b/tests/testsdata/config_stdout_powerdns.yml @@ -0,0 +1,16 @@ +global: + trace: + verbose: true + +pipelines: + - name: pdns + powerdns: + listen-ip: 0.0.0.0 + listen-port: 6001 + routing-policy: + forward: [ console ] + dropped: [] + + - name: console + stdout: + mode: text \ No newline at end of file diff --git a/tests/testsdata/config_verbose.yml b/tests/testsdata/config_verbose.yml new file mode 100644 index 00000000..397e39fe --- /dev/null +++ b/tests/testsdata/config_verbose.yml @@ -0,0 +1,19 @@ +global: + trace: + verbose: true + +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + transforms: + normalize: + qname-lowercase: true + routing-policy: + forward: [ console ] + dropped: [] + + - name: console + stdout: + mode: text \ No newline at end of file diff --git a/tests/testsdata/config_webapi.yml b/tests/testsdata/config_webapi.yml new file mode 100644 index 00000000..7872f355 --- /dev/null +++ b/tests/testsdata/config_webapi.yml @@ -0,0 +1,17 @@ +global: + trace: + verbose: true + +pipelines: + - name: tap + dnstap: + listen-ip: 0.0.0.0 + listen-port: 6000 + routing-policy: + forward: [ console ] + dropped: [] + + - name: api + restapi: + listen-ip: 0.0.0.0 + listen-port: 8080 \ No newline at end of file diff --git a/testsdata/coredns/coredns_tcp.conf b/tests/testsdata/coredns/coredns_tcp.conf similarity index 100% rename from testsdata/coredns/coredns_tcp.conf rename to tests/testsdata/coredns/coredns_tcp.conf diff --git a/testsdata/coredns/coredns_tls.conf b/tests/testsdata/coredns/coredns_tls.conf similarity index 100% rename from testsdata/coredns/coredns_tls.conf rename to tests/testsdata/coredns/coredns_tls.conf diff --git a/testsdata/coredns/coredns_unix.conf b/tests/testsdata/coredns/coredns_unix.conf similarity index 100% rename from testsdata/coredns/coredns_unix.conf rename to tests/testsdata/coredns/coredns_unix.conf diff --git a/tests/testsdata/dnstap/dnstap.fstrm b/tests/testsdata/dnstap/dnstap.fstrm new file mode 100644 index 00000000..4e848421 Binary files /dev/null and b/tests/testsdata/dnstap/dnstap.fstrm differ diff --git a/testsdata/filtering_fqdn.txt b/tests/testsdata/filtering_fqdn.txt similarity index 100% rename from testsdata/filtering_fqdn.txt rename to tests/testsdata/filtering_fqdn.txt diff --git a/testsdata/filtering_fqdn_regex.txt b/tests/testsdata/filtering_fqdn_regex.txt similarity index 100% rename from testsdata/filtering_fqdn_regex.txt rename to tests/testsdata/filtering_fqdn_regex.txt diff --git a/testsdata/filtering_keep_domains.txt b/tests/testsdata/filtering_keep_domains.txt similarity index 100% rename from testsdata/filtering_keep_domains.txt rename to tests/testsdata/filtering_keep_domains.txt diff --git a/testsdata/filtering_keep_domains_regex.txt b/tests/testsdata/filtering_keep_domains_regex.txt similarity index 100% rename from testsdata/filtering_keep_domains_regex.txt rename to tests/testsdata/filtering_keep_domains_regex.txt diff --git a/testsdata/filtering_queryip.txt b/tests/testsdata/filtering_queryip.txt similarity index 100% rename from testsdata/filtering_queryip.txt rename to tests/testsdata/filtering_queryip.txt diff --git a/testsdata/filtering_queryip_keep.txt b/tests/testsdata/filtering_queryip_keep.txt similarity index 100% rename from testsdata/filtering_queryip_keep.txt rename to tests/testsdata/filtering_queryip_keep.txt diff --git a/testsdata/filtering_rdataip_keep.txt b/tests/testsdata/filtering_rdataip_keep.txt similarity index 100% rename from testsdata/filtering_rdataip_keep.txt rename to tests/testsdata/filtering_rdataip_keep.txt diff --git a/testsdata/knotresolver/knotresolver_unix.conf b/tests/testsdata/knotresolver/knotresolver_unix.conf similarity index 100% rename from testsdata/knotresolver/knotresolver_unix.conf rename to tests/testsdata/knotresolver/knotresolver_unix.conf diff --git a/testsdata/pcap/dnsdump_ip4_fragmented+udp.pcap b/tests/testsdata/pcap/dnsdump_ip4_fragmented+udp.pcap similarity index 100% rename from testsdata/pcap/dnsdump_ip4_fragmented+udp.pcap rename to tests/testsdata/pcap/dnsdump_ip4_fragmented+udp.pcap diff --git a/tests/testsdata/pcap/dnsdump_ip4_fragmented_query.pcap b/tests/testsdata/pcap/dnsdump_ip4_fragmented_query.pcap new file mode 100644 index 00000000..65aa7eda Binary files /dev/null and b/tests/testsdata/pcap/dnsdump_ip4_fragmented_query.pcap differ diff --git a/testsdata/pcap/dnsdump_ip6_fragmented+udp.pcap b/tests/testsdata/pcap/dnsdump_ip6_fragmented+udp.pcap similarity index 100% rename from testsdata/pcap/dnsdump_ip6_fragmented+udp.pcap rename to tests/testsdata/pcap/dnsdump_ip6_fragmented+udp.pcap diff --git a/tests/testsdata/pcap/dnsdump_ip6_fragmented_query.pcap b/tests/testsdata/pcap/dnsdump_ip6_fragmented_query.pcap new file mode 100644 index 00000000..c922a275 Binary files /dev/null and b/tests/testsdata/pcap/dnsdump_ip6_fragmented_query.pcap differ diff --git a/testsdata/pcap/dnsdump_tcp.pcap b/tests/testsdata/pcap/dnsdump_tcp.pcap similarity index 100% rename from testsdata/pcap/dnsdump_tcp.pcap rename to tests/testsdata/pcap/dnsdump_tcp.pcap diff --git a/testsdata/pcap/dnsdump_tcp_fastopen.pcap b/tests/testsdata/pcap/dnsdump_tcp_fastopen.pcap similarity index 100% rename from testsdata/pcap/dnsdump_tcp_fastopen.pcap rename to tests/testsdata/pcap/dnsdump_tcp_fastopen.pcap diff --git a/testsdata/pcap/dnsdump_udp+tcp.pcap b/tests/testsdata/pcap/dnsdump_udp+tcp.pcap similarity index 100% rename from testsdata/pcap/dnsdump_udp+tcp.pcap rename to tests/testsdata/pcap/dnsdump_udp+tcp.pcap diff --git a/testsdata/pcap/dnsdump_udp.pcap b/tests/testsdata/pcap/dnsdump_udp.pcap similarity index 100% rename from testsdata/pcap/dnsdump_udp.pcap rename to tests/testsdata/pcap/dnsdump_udp.pcap diff --git a/testsdata/pcap/dnsdump_udp_truncated+tcp_fragmented.pcap b/tests/testsdata/pcap/dnsdump_udp_truncated+tcp_fragmented.pcap similarity index 100% rename from testsdata/pcap/dnsdump_udp_truncated+tcp_fragmented.pcap rename to tests/testsdata/pcap/dnsdump_udp_truncated+tcp_fragmented.pcap diff --git a/testsdata/powerdns/dnsdist_dnstaptcp.conf b/tests/testsdata/powerdns/dnsdist_dnstaptcp.conf similarity index 100% rename from testsdata/powerdns/dnsdist_dnstaptcp.conf rename to tests/testsdata/powerdns/dnsdist_dnstaptcp.conf diff --git a/testsdata/powerdns/dnsdist_dnstapunix.conf b/tests/testsdata/powerdns/dnsdist_dnstapunix.conf similarity index 100% rename from testsdata/powerdns/dnsdist_dnstapunix.conf rename to tests/testsdata/powerdns/dnsdist_dnstapunix.conf diff --git a/tests/testsdata/powerdns/dnsdist_dox.conf b/tests/testsdata/powerdns/dnsdist_dox.conf new file mode 100644 index 00000000..7b431702 --- /dev/null +++ b/tests/testsdata/powerdns/dnsdist_dox.conf @@ -0,0 +1,13 @@ +setLocal('0.0.0.0:5553') + +addDOQLocal('0.0.0.0:5853', '/etc/dnsdist/conf.d/server.crt', '/etc/dnsdist/conf.d/server.key') +addDOHLocal('0.0.0.0:5443', '/etc/dnsdist/conf.d/server.crt', '/etc/dnsdist/conf.d/server.key') +addDOH3Local('0.0.0.0:5443', '/etc/dnsdist/conf.d/server.crt', '/etc/dnsdist/conf.d/server.key') + +fstl = newFrameStreamTcpLogger("127.0.0.1:6000") + +addAction(AllRule(), DnstapLogAction("dnsdist", fstl)) +addResponseAction(AllRule(), DnstapLogResponseAction("dnsdist", fstl)) +addCacheHitResponseAction(AllRule(), DnstapLogResponseAction("dnsdist", fstl)) + +newServer('8.8.8.8') \ No newline at end of file diff --git a/testsdata/powerdns/dnsdist_protobuf.conf b/tests/testsdata/powerdns/dnsdist_protobuf.conf similarity index 100% rename from testsdata/powerdns/dnsdist_protobuf.conf rename to tests/testsdata/powerdns/dnsdist_protobuf.conf diff --git a/testsdata/powerdns/dnsdist_protobuf_metadata.conf b/tests/testsdata/powerdns/dnsdist_protobuf_metadata.conf similarity index 100% rename from testsdata/powerdns/dnsdist_protobuf_metadata.conf rename to tests/testsdata/powerdns/dnsdist_protobuf_metadata.conf diff --git a/testsdata/powerdns/pdns_recursor.conf b/tests/testsdata/powerdns/pdns_recursor.conf similarity index 100% rename from testsdata/powerdns/pdns_recursor.conf rename to tests/testsdata/powerdns/pdns_recursor.conf diff --git a/testsdata/powerdns/pdns_recursor.lua b/tests/testsdata/powerdns/pdns_recursor.lua similarity index 100% rename from testsdata/powerdns/pdns_recursor.lua rename to tests/testsdata/powerdns/pdns_recursor.lua diff --git a/testsdata/unbound/unbound_tcp.conf b/tests/testsdata/unbound/unbound_tcp.conf similarity index 100% rename from testsdata/unbound/unbound_tcp.conf rename to tests/testsdata/unbound/unbound_tcp.conf diff --git a/testsdata/config_bench.yml b/testsdata/config_bench.yml deleted file mode 100644 index 68b414c8..00000000 --- a/testsdata/config_bench.yml +++ /dev/null @@ -1,23 +0,0 @@ -global: - trace: - verbose: true - -multiplexer: - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - - loggers: - - name: web - webserver: - listen-ip: 0.0.0.0 - listen-port: 8080 - top-max-items: 100 - basic-auth-login: admin - basic-auth-pwd: changeme - - routes: - - from: [tap] - to: [web] diff --git a/testsdata/config_metrics_dnstaptcp.yml b/testsdata/config_metrics_dnstaptcp.yml deleted file mode 100644 index d2315d61..00000000 --- a/testsdata/config_metrics_dnstaptcp.yml +++ /dev/null @@ -1,26 +0,0 @@ -global: - trace: - verbose: true - -multiplexer: - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - - loggers: - - name: console - stdout: - mode: text - - name: api - restapi: - listen-ip: 0.0.0.0 - listen-port: 8080 - top-n: 100 - basic-auth-login: admin - basic-auth-pwd: changeme - - routes: - - from: [tap] - to: [console, api] diff --git a/testsdata/config_prom.yml b/testsdata/config_prom.yml deleted file mode 100644 index e4a8ff4d..00000000 --- a/testsdata/config_prom.yml +++ /dev/null @@ -1,20 +0,0 @@ -global: - trace: - verbose: true - -multiplexer: - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - - loggers: - - name: prom - prometheus: - listen-ip: 0.0.0.0 - listen-port: 8081 - - routes: - - from: [tap] - to: [prom] diff --git a/testsdata/config_stdout_dnstaptcp.yml b/testsdata/config_stdout_dnstaptcp.yml deleted file mode 100644 index b34dc59b..00000000 --- a/testsdata/config_stdout_dnstaptcp.yml +++ /dev/null @@ -1,19 +0,0 @@ -global: - trace: - verbose: true - -multiplexer: - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - - loggers: - - name: console - stdout: - mode: text - - routes: - - from: [tap] - to: [console] diff --git a/testsdata/config_stdout_dnstapunix.yml b/testsdata/config_stdout_dnstapunix.yml deleted file mode 100644 index 97499b82..00000000 --- a/testsdata/config_stdout_dnstapunix.yml +++ /dev/null @@ -1,18 +0,0 @@ -global: - trace: - verbose: true - -multiplexer: - collectors: - - name: tap - dnstap: - sock-path: /tmp/dnstap.sock - - loggers: - - name: console - stdout: - mode: text - - routes: - - from: [tap] - to: [console] diff --git a/testsdata/config_stdout_powerdns.yml b/testsdata/config_stdout_powerdns.yml deleted file mode 100644 index 73cbda6e..00000000 --- a/testsdata/config_stdout_powerdns.yml +++ /dev/null @@ -1,19 +0,0 @@ -global: - trace: - verbose: true - -multiplexer: - collectors: - - name: pdns - powerdns: - listen-ip: 0.0.0.0 - listen-port: 6001 - - loggers: - - name: console - stdout: - mode: text - - routes: - - from: [pdns] - to: [console] diff --git a/testsdata/config_webapi.yml b/testsdata/config_webapi.yml deleted file mode 100644 index 8a84d65d..00000000 --- a/testsdata/config_webapi.yml +++ /dev/null @@ -1,20 +0,0 @@ -global: - trace: - verbose: true - -multiplexer: - collectors: - - name: tap - dnstap: - listen-ip: 0.0.0.0 - listen-port: 6000 - - loggers: - - name: api - restapi: - listen-ip: 0.0.0.0 - listen-port: 8080 - - routes: - - from: [tap] - to: [api] diff --git a/transformers/atags.go b/transformers/atags.go index aff6e0b2..28925c52 100644 --- a/transformers/atags.go +++ b/transformers/atags.go @@ -6,52 +6,28 @@ import ( "github.com/dmachard/go-logger" ) -type ATagsProcessor struct { - config *pkgconfig.ConfigTransformers - logger *logger.Logger - name string - instance int - outChannels []chan dnsutils.DNSMessage - logInfo func(msg string, v ...interface{}) - logError func(msg string, v ...interface{}) +type ATagsTransform struct { + GenericTransformer } -func NewATagsSubprocessor(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, - instance int, outChannels []chan dnsutils.DNSMessage, - logInfo func(msg string, v ...interface{}), logError func(msg string, v ...interface{})) ATagsProcessor { - s := ATagsProcessor{ - config: config, - logger: logger, - name: name, - instance: instance, - outChannels: outChannels, - logInfo: logInfo, - logError: logError, - } - - return s +func NewATagsTransform(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, instance int, nextWorkers []chan dnsutils.DNSMessage) *ATagsTransform { + t := &ATagsTransform{GenericTransformer: NewTransformer(config, logger, "atags", name, instance, nextWorkers)} + return t } -func (p *ATagsProcessor) ReloadConfig(config *pkgconfig.ConfigTransformers) { - p.config = config +func (t *ATagsTransform) GetTransforms() ([]Subtransform, error) { + subtransforms := []Subtransform{} + if len(t.config.ATags.AddTags) > 0 { + subtransforms = append(subtransforms, Subtransform{name: "atags:add", processFunc: t.addTags}) + } + return subtransforms, nil } -func (p *ATagsProcessor) InitDNSMessage(dm *dnsutils.DNSMessage) { +func (t *ATagsTransform) addTags(dm *dnsutils.DNSMessage) (int, error) { if dm.ATags == nil { - dm.ATags = &dnsutils.TransformATags{ - Tags: []string{}, - } - + dm.ATags = &dnsutils.TransformATags{Tags: []string{}} } -} - -func (p *ATagsProcessor) IsEnabled() bool { - return p.config.ATags.Enable -} -func (p *ATagsProcessor) AddTags(dm *dnsutils.DNSMessage) int { - if p.config.ATags.Enable { - dm.ATags.Tags = append(dm.ATags.Tags, p.config.ATags.Tags...) - } - return ReturnSuccess + dm.ATags.Tags = append(dm.ATags.Tags, t.config.ATags.AddTags...) + return ReturnKeep, nil } diff --git a/transformers/atags_test.go b/transformers/atags_test.go new file mode 100644 index 00000000..56bcfc22 --- /dev/null +++ b/transformers/atags_test.go @@ -0,0 +1,33 @@ +package transformers + +import ( + "testing" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" +) + +func TestATags_AddTag(t *testing.T) { + // enable feature + config := pkgconfig.GetFakeConfigTransformers() + config.ATags.Enable = true + config.ATags.AddTags = append(config.ATags.AddTags, "tag1") + config.ATags.AddTags = append(config.ATags.AddTags, "tag2") + + // init the processor + outChans := []chan dnsutils.DNSMessage{} + atags := NewATagsTransform(config, logger.New(false), "test", 0, outChans) + + // add tags + dm := dnsutils.GetFakeDNSMessage() + atags.addTags(&dm) + + // check results + if dm.ATags == nil { + t.Errorf("DNSMessage.Atags should be not nil") + } + if len(dm.ATags.Tags) != 2 { + t.Errorf("incorrect number of tag in DNSMessage") + } +} diff --git a/transformers/extract.go b/transformers/extract.go index 1f7afb90..baf0a569 100644 --- a/transformers/extract.go +++ b/transformers/extract.go @@ -6,49 +6,28 @@ import ( "github.com/dmachard/go-logger" ) -type ExtractProcessor struct { - config *pkgconfig.ConfigTransformers - logger *logger.Logger - name string - instance int - outChannels []chan dnsutils.DNSMessage - logInfo func(msg string, v ...interface{}) - logError func(msg string, v ...interface{}) +type ExtractTransform struct { + GenericTransformer } -func NewExtractSubprocessor(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, - instance int, outChannels []chan dnsutils.DNSMessage, - logInfo func(msg string, v ...interface{}), logError func(msg string, v ...interface{})) ExtractProcessor { - s := ExtractProcessor{ - config: config, - logger: logger, - name: name, - instance: instance, - outChannels: outChannels, - logInfo: logInfo, - logError: logError, - } - - return s +func NewExtractTransform(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, instance int, nextWorkers []chan dnsutils.DNSMessage) *ExtractTransform { + t := &ExtractTransform{GenericTransformer: NewTransformer(config, logger, "extract", name, instance, nextWorkers)} + return t } -func (p *ExtractProcessor) ReloadConfig(config *pkgconfig.ConfigTransformers) { - p.config = config +func (t *ExtractTransform) GetTransforms() ([]Subtransform, error) { + subtransforms := []Subtransform{} + if t.config.Extract.AddPayload { + subtransforms = append(subtransforms, Subtransform{name: "extract:add-base64payload", processFunc: t.addBase64Payload}) + } + return subtransforms, nil } -func (p *ExtractProcessor) InitDNSMessage(dm *dnsutils.DNSMessage) { +func (t *ExtractTransform) addBase64Payload(dm *dnsutils.DNSMessage) (int, error) { if dm.Extracted == nil { - dm.Extracted = &dnsutils.TransformExtracted{ - Base64Payload: []byte("-"), - } + dm.Extracted = &dnsutils.TransformExtracted{Base64Payload: []byte("-")} } -} - -func (p *ExtractProcessor) IsEnabled() bool { - return p.config.Extract.Enable -} -func (p *ExtractProcessor) AddBase64Payload(dm *dnsutils.DNSMessage) []byte { - // Encode to base64 is done automatically by the json encoder ([]byte) - return dm.DNS.Payload + dm.Extracted.Base64Payload = dm.DNS.Payload + return ReturnKeep, nil } diff --git a/transformers/extract_test.go b/transformers/extract_test.go index a476c201..2fac8770 100644 --- a/transformers/extract_test.go +++ b/transformers/extract_test.go @@ -3,6 +3,7 @@ package transformers import ( "encoding/base64" "encoding/json" + "fmt" "reflect" "testing" @@ -14,26 +15,27 @@ import ( func TestExtract_Json(t *testing.T) { // enable feature config := pkgconfig.GetFakeConfigTransformers() - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} outChans = append(outChans, make(chan dnsutils.DNSMessage, 1)) - // get fake - dm := dnsutils.GetFakeDNSMessage() - dm.Init() + // get dns message + dm := dnsutils.GetFakeDNSMessageWithPayload() // init subproccesor - extract := NewExtractSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - extract.InitDNSMessage(&dm) + extract := NewExtractTransform(config, logger.New(false), "test", 0, outChans) + extract.GetTransforms() + extract.addBase64Payload(&dm) + + encodedPayload := base64.StdEncoding.EncodeToString(dm.DNS.Payload) // expected json - refJSON := ` + refJSON := fmt.Sprintf(` { "extracted":{ - "dns_payload": "LQ==" + "dns_payload": "%s" } } - ` + `, encodedPayload) var dmMap map[string]interface{} err := json.Unmarshal([]byte(dm.ToJSON()), &dmMap) @@ -55,30 +57,3 @@ func TestExtract_Json(t *testing.T) { t.Errorf("json format different from reference") } } - -func TestExtract_AddPayload(t *testing.T) { - // enable geoip - config := pkgconfig.GetFakeConfigTransformers() - config.Extract.Enable = true - config.Extract.AddPayload = true - - log := logger.New(false) - outChans := []chan dnsutils.DNSMessage{} - - // init the processor - extract := NewExtractSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - - // feature is enabled ? - if !extract.IsEnabled() { - t.Fatalf("extract should be enabled") - } - - dm := dnsutils.GetFakeDNSMessage() - src := []byte("P6CBgAABAAEAAAABD29yYW5nZS1zYW5ndWluZQJmcgAAAQABwAwAAQABAABUYAAEwcvvUQAAKQTQAAAAAAAA") - dst := make([]byte, base64.StdEncoding.DecodedLen(len(src))) - base64.StdEncoding.Decode(dst, src) - dm.DNS.Payload = dst - if reflect.DeepEqual(extract.AddBase64Payload(&dm), src) { - t.Errorf("dns payload base64 encoding should match.") - } -} diff --git a/transformers/filtering.go b/transformers/filtering.go index ae6970e0..36fba299 100644 --- a/transformers/filtering.go +++ b/transformers/filtering.go @@ -10,267 +10,208 @@ import ( "github.com/dmachard/go-dnscollector/dnsutils" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-logger" - "gopkg.in/fsnotify.v1" "inet.af/netaddr" ) -type FilteringProcessor struct { - config *pkgconfig.ConfigTransformers - logger *logger.Logger - dropDomains bool - keepDomains bool - mapRcodes map[string]bool - ipsetDrop *netaddr.IPSet - ipsetKeep *netaddr.IPSet - rDataIpsetKeep *netaddr.IPSet - listFqdns map[string]bool - listDomainsRegex map[string]*regexp.Regexp - listKeepFqdns map[string]bool - listKeepDomainsRegex map[string]*regexp.Regexp - fileWatcher *fsnotify.Watcher - name string - downsample int - downsampleCount int - activeFilters []func(dm *dnsutils.DNSMessage) bool - instance int - outChannels []chan dnsutils.DNSMessage - logInfo func(msg string, v ...interface{}) - logError func(msg string, v ...interface{}) +type FilteringTransform struct { + GenericTransformer + mapRcodes map[string]bool + ipsetDrop, ipsetKeep, rDataIpsetKeep *netaddr.IPSet + listFqdns, listKeepFqdns map[string]bool + listDomainsRegex, listKeepDomainsRegex map[string]*regexp.Regexp + downsample, downsampleCount int } -func NewFilteringProcessor(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, - instance int, outChannels []chan dnsutils.DNSMessage, - logInfo func(msg string, v ...interface{}), logError func(msg string, v ...interface{}), -) FilteringProcessor { - // creates a new file watcher - watcher, err := fsnotify.NewWatcher() - if err != nil { - fmt.Println("ERROR", err) - } - defer watcher.Close() - - d := FilteringProcessor{ - config: config, - logger: logger, - mapRcodes: make(map[string]bool), - ipsetDrop: &netaddr.IPSet{}, - ipsetKeep: &netaddr.IPSet{}, - rDataIpsetKeep: &netaddr.IPSet{}, - listFqdns: make(map[string]bool), - listDomainsRegex: make(map[string]*regexp.Regexp), - listKeepFqdns: make(map[string]bool), - listKeepDomainsRegex: make(map[string]*regexp.Regexp), - fileWatcher: watcher, - name: name, - instance: instance, - outChannels: outChannels, - logInfo: logInfo, - logError: logError, - } - return d -} - -func (p *FilteringProcessor) ReloadConfig(config *pkgconfig.ConfigTransformers) { - p.config = config +func NewFilteringTransform(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, instance int, nextWorkers []chan dnsutils.DNSMessage) *FilteringTransform { + t := &FilteringTransform{GenericTransformer: NewTransformer(config, logger, "filtering", name, instance, nextWorkers)} + t.mapRcodes = make(map[string]bool) + t.ipsetDrop = &netaddr.IPSet{} + t.ipsetKeep = &netaddr.IPSet{} + t.rDataIpsetKeep = &netaddr.IPSet{} + t.listFqdns = make(map[string]bool) + t.listDomainsRegex = make(map[string]*regexp.Regexp) + t.listKeepFqdns = make(map[string]bool) + t.listKeepDomainsRegex = make(map[string]*regexp.Regexp) + return t } -func (p *FilteringProcessor) LogInfo(msg string, v ...interface{}) { - log := fmt.Sprintf("filtering#%d - ", p.instance) - p.logInfo(log+msg, v...) -} - -func (p *FilteringProcessor) LogError(msg string, v ...interface{}) { - log := fmt.Sprintf("filtering#%d - ", p.instance) - p.logError(log+msg, v...) -} +func (t *FilteringTransform) GetTransforms() ([]Subtransform, error) { + subtransforms := []Subtransform{} -func (p *FilteringProcessor) LoadActiveFilters() { - // TODO: Change to iteration through Filtering to add filters in custom order. - - // clean the slice - p.activeFilters = p.activeFilters[:0] - - if !p.config.Filtering.LogQueries { - p.activeFilters = append(p.activeFilters, p.ignoreQueryFilter) - p.LogInfo("drop queries subprocessor is enabled") + if err := t.LoadRcodes(); err != nil { + return nil, err } - - if !p.config.Filtering.LogReplies { - p.activeFilters = append(p.activeFilters, p.ignoreReplyFilter) - p.LogInfo("drop replies subprocessor is enabled") + if err := t.LoadDomainsList(); err != nil { + return nil, err } - - if len(p.mapRcodes) > 0 { - p.activeFilters = append(p.activeFilters, p.rCodeFilter) + if err := t.LoadQueryIPList(); err != nil { + return nil, err } - - if len(p.config.Filtering.KeepQueryIPFile) > 0 { - p.activeFilters = append(p.activeFilters, p.keepQueryIPFilter) + if err := t.LoadrDataIPList(); err != nil { + return nil, err } - if len(p.config.Filtering.DropQueryIPFile) > 0 { - p.activeFilters = append(p.activeFilters, p.DropQueryIPFilter) + if !t.config.Filtering.LogQueries { + subtransforms = append(subtransforms, Subtransform{name: "filtering:drop-queries", processFunc: t.dropQueryFilter}) } - - if len(p.config.Filtering.KeepRdataFile) > 0 { - p.activeFilters = append(p.activeFilters, p.keepRdataFilter) + if !t.config.Filtering.LogReplies { + subtransforms = append(subtransforms, Subtransform{name: "filtering:drop-replies", processFunc: t.dropReplyFilter}) } - - if len(p.listFqdns) > 0 { - p.activeFilters = append(p.activeFilters, p.dropFqdnFilter) + if len(t.mapRcodes) > 0 { + subtransforms = append(subtransforms, Subtransform{name: "filtering:drop-rcode", processFunc: t.dropRCodeFilter}) } - - if len(p.listDomainsRegex) > 0 { - p.activeFilters = append(p.activeFilters, p.dropDomainRegexFilter) + if len(t.config.Filtering.KeepQueryIPFile) > 0 { + subtransforms = append(subtransforms, Subtransform{name: "filtering:keep-queryip", processFunc: t.keepQueryIPFilter}) } - - if len(p.listKeepFqdns) > 0 { - p.activeFilters = append(p.activeFilters, p.keepFqdnFilter) + if len(t.config.Filtering.DropQueryIPFile) > 0 { + subtransforms = append(subtransforms, Subtransform{name: "filtering:drop-queryip", processFunc: t.dropQueryIPFilter}) } - - if len(p.listKeepDomainsRegex) > 0 { - p.activeFilters = append(p.activeFilters, p.keepDomainRegexFilter) + if len(t.config.Filtering.KeepRdataFile) > 0 { + subtransforms = append(subtransforms, Subtransform{name: "filtering:keep-rdata", processFunc: t.keepRdataFilter}) } - - // set downsample if desired - if p.config.Filtering.Downsample > 0 { - p.downsample = p.config.Filtering.Downsample - p.downsampleCount = 0 - p.activeFilters = append(p.activeFilters, p.downsampleFilter) - p.LogInfo("down sampling subprocessor is enabled") + if len(t.listFqdns) > 0 { + subtransforms = append(subtransforms, Subtransform{name: "filtering:drop-fqdn", processFunc: t.dropFqdnFilter}) } + if len(t.listDomainsRegex) > 0 { + subtransforms = append(subtransforms, Subtransform{name: "filtering:drop-domain", processFunc: t.dropDomainRegexFilter}) + } + if len(t.listKeepFqdns) > 0 { + subtransforms = append(subtransforms, Subtransform{name: "filtering:keep-fqdn", processFunc: t.keepFqdnFilter}) + } + if len(t.listKeepDomainsRegex) > 0 { + subtransforms = append(subtransforms, Subtransform{name: "filtering:keep-domain", processFunc: t.keepDomainRegexFilter}) + } + if t.config.Filtering.Downsample > 0 { + t.downsample = t.config.Filtering.Downsample + t.downsampleCount = 0 + subtransforms = append(subtransforms, Subtransform{name: "filtering:downsampling", processFunc: t.downsampleFilter}) + } + return subtransforms, nil } -func (p *FilteringProcessor) LoadRcodes() { +func (t *FilteringTransform) LoadRcodes() error { // empty - for key := range p.mapRcodes { - delete(p.mapRcodes, key) + for key := range t.mapRcodes { + delete(t.mapRcodes, key) } // add - for _, v := range p.config.Filtering.DropRcodes { - p.mapRcodes[v] = true + for _, v := range t.config.Filtering.DropRcodes { + t.mapRcodes[v] = true } + return nil } -func (p *FilteringProcessor) LoadQueryIPList() { - if len(p.config.Filtering.DropQueryIPFile) > 0 { - read, err := p.loadQueryIPList(p.config.Filtering.DropQueryIPFile, true) +func (t *FilteringTransform) LoadQueryIPList() error { + if len(t.config.Filtering.DropQueryIPFile) > 0 { + read, err := t.loadQueryIPList(t.config.Filtering.DropQueryIPFile, true) if err != nil { - p.LogError("unable to open query ip file: ", err) + return fmt.Errorf("unable to open query ip file: %w", err) } - p.LogInfo("loaded with %d query ip to the drop list", read) + t.LogInfo("loaded with %d query ip to the drop list", read) } - if len(p.config.Filtering.KeepQueryIPFile) > 0 { - read, err := p.loadQueryIPList(p.config.Filtering.KeepQueryIPFile, false) + if len(t.config.Filtering.KeepQueryIPFile) > 0 { + read, err := t.loadQueryIPList(t.config.Filtering.KeepQueryIPFile, false) if err != nil { - p.LogError("unable to open query ip file: ", err) + return fmt.Errorf("unable to open query ip file: %w", err) } - p.LogInfo("loaded with %d query ip to the keep list", read) + t.LogInfo("loaded with %d query ip to the keep list", read) } + return nil } -func (p *FilteringProcessor) LoadrDataIPList() { - if len(p.config.Filtering.KeepRdataFile) > 0 { - read, err := p.loadKeepRdataIPList(p.config.Filtering.KeepRdataFile) +func (t *FilteringTransform) LoadrDataIPList() error { + if len(t.config.Filtering.KeepRdataFile) > 0 { + read, err := t.loadKeepRdataIPList(t.config.Filtering.KeepRdataFile) if err != nil { - p.LogError("unable to open rdata ip file: ", err) + return fmt.Errorf("unable to open rdata ip file: %w", err) } - p.LogInfo("loaded with %d rdata ip to the keep list", read) + t.LogInfo("loaded with %d rdata ip to the keep list", read) } + return nil } -func (p *FilteringProcessor) LoadDomainsList() { +func (t *FilteringTransform) LoadDomainsList() error { // before to start, reset all maps - p.dropDomains = false - p.keepDomains = false - - for key := range p.listFqdns { - delete(p.listFqdns, key) + for key := range t.listFqdns { + delete(t.listFqdns, key) } - for key := range p.listDomainsRegex { - delete(p.listDomainsRegex, key) + for key := range t.listDomainsRegex { + delete(t.listDomainsRegex, key) } - for key := range p.listKeepFqdns { - delete(p.listKeepFqdns, key) + for key := range t.listKeepFqdns { + delete(t.listKeepFqdns, key) } - for key := range p.listKeepDomainsRegex { - delete(p.listKeepDomainsRegex, key) + for key := range t.listKeepDomainsRegex { + delete(t.listKeepDomainsRegex, key) } - if len(p.config.Filtering.DropFqdnFile) > 0 { - file, err := os.Open(p.config.Filtering.DropFqdnFile) + if len(t.config.Filtering.DropFqdnFile) > 0 { + file, err := os.Open(t.config.Filtering.DropFqdnFile) if err != nil { - p.LogError("unable to open fqdn file: ", err) - p.dropDomains = true + return fmt.Errorf("unable to open fqdn file: %w", err) } else { scanner := bufio.NewScanner(file) for scanner.Scan() { fqdn := strings.ToLower(scanner.Text()) - p.listFqdns[fqdn] = true + t.listFqdns[fqdn] = true } - p.LogInfo("loaded with %d fqdn to the drop list", len(p.listFqdns)) - p.dropDomains = true + t.LogInfo("loaded with %d fqdn to the drop list", len(t.listFqdns)) } } - if len(p.config.Filtering.DropDomainFile) > 0 { - file, err := os.Open(p.config.Filtering.DropDomainFile) + if len(t.config.Filtering.DropDomainFile) > 0 { + file, err := os.Open(t.config.Filtering.DropDomainFile) if err != nil { - p.LogError("unable to open regex list file: ", err) - p.dropDomains = true + return fmt.Errorf("unable to open regex list file: %w", err) } else { scanner := bufio.NewScanner(file) for scanner.Scan() { domain := strings.ToLower(scanner.Text()) - p.listDomainsRegex[domain] = regexp.MustCompile(domain) + t.listDomainsRegex[domain] = regexp.MustCompile(domain) } - p.LogInfo("loaded with %d domains to the drop list", len(p.listDomainsRegex)) - p.dropDomains = true + t.LogInfo("loaded with %d domains to the drop list", len(t.listDomainsRegex)) } } - if len(p.config.Filtering.KeepFqdnFile) > 0 { - file, err := os.Open(p.config.Filtering.KeepFqdnFile) + if len(t.config.Filtering.KeepFqdnFile) > 0 { + file, err := os.Open(t.config.Filtering.KeepFqdnFile) if err != nil { - p.LogError("unable to open KeepFqdnFile file: ", err) - p.keepDomains = false + return fmt.Errorf("unable to open KeepFqdnFile file: %w", err) } else { scanner := bufio.NewScanner(file) for scanner.Scan() { keepDomain := strings.ToLower(scanner.Text()) - p.listKeepFqdns[keepDomain] = true + t.listKeepFqdns[keepDomain] = true } - p.LogInfo("loaded with %d fqdns to the keep list", len(p.listKeepFqdns)) - p.keepDomains = true + t.LogInfo("loaded with %d fqdn(s) to the keep list", len(t.listKeepFqdns)) } } - if len(p.config.Filtering.KeepDomainFile) > 0 { - file, err := os.Open(p.config.Filtering.KeepDomainFile) + if len(t.config.Filtering.KeepDomainFile) > 0 { + file, err := os.Open(t.config.Filtering.KeepDomainFile) if err != nil { - p.LogError("unable to open KeepDomainFile file: ", err) - p.keepDomains = false + return fmt.Errorf("unable to open KeepDomainFile file: %w", err) } else { scanner := bufio.NewScanner(file) for scanner.Scan() { keepDomain := strings.ToLower(scanner.Text()) - p.listKeepDomainsRegex[keepDomain] = regexp.MustCompile(keepDomain) + t.listKeepDomainsRegex[keepDomain] = regexp.MustCompile(keepDomain) } - p.LogInfo("loaded with %d domains to the keep list", len(p.listKeepDomainsRegex)) - p.keepDomains = true + t.LogInfo("loaded with %d domains to the keep list", len(t.listKeepDomainsRegex)) } } + return nil } -func (p *FilteringProcessor) loadQueryIPList(fname string, drop bool) (uint64, error) { +func (t *FilteringTransform) loadQueryIPList(fname string, drop bool) (uint64, error) { var emptyIPSet *netaddr.IPSet - p.ipsetDrop = emptyIPSet - p.ipsetKeep = emptyIPSet + t.ipsetDrop = emptyIPSet + t.ipsetKeep = emptyIPSet file, err := os.Open(fname) if err != nil { @@ -287,7 +228,7 @@ func (p *FilteringProcessor) loadQueryIPList(fname string, drop bool) (uint64, e if err != nil { ip, err := netaddr.ParseIP(ipOrPrefix) if err != nil { - p.LogError("%s in in %s is neither an IP address nor a prefix", ipOrPrefix, fname) + t.LogError("%s in in %s is neither an IP address nor a prefix", ipOrPrefix, fname) continue } ipsetbuilder.Add(ip) @@ -299,17 +240,17 @@ func (p *FilteringProcessor) loadQueryIPList(fname string, drop bool) (uint64, e file.Close() if drop { - p.ipsetDrop, err = ipsetbuilder.IPSet() + t.ipsetDrop, err = ipsetbuilder.IPSet() } else { - p.ipsetKeep, err = ipsetbuilder.IPSet() + t.ipsetKeep, err = ipsetbuilder.IPSet() } return read, err } -func (p *FilteringProcessor) loadKeepRdataIPList(fname string) (uint64, error) { +func (t *FilteringTransform) loadKeepRdataIPList(fname string) (uint64, error) { var emptyIPSet *netaddr.IPSet - p.rDataIpsetKeep = emptyIPSet + t.rDataIpsetKeep = emptyIPSet file, err := os.Open(fname) if err != nil { @@ -326,7 +267,7 @@ func (p *FilteringProcessor) loadKeepRdataIPList(fname string) (uint64, error) { if err != nil { ip, err := netaddr.ParseIP(ipOrPrefix) if err != nil { - p.LogError("%s in in %s is neither an IP address nor a prefix", ipOrPrefix, fname) + t.LogError("%s in in %s is neither an IP address nor a prefix", ipOrPrefix, fname) continue } ipsetbuilder.Add(ip) @@ -337,143 +278,121 @@ func (p *FilteringProcessor) loadKeepRdataIPList(fname string) (uint64, error) { file.Close() - p.rDataIpsetKeep, err = ipsetbuilder.IPSet() + t.rDataIpsetKeep, err = ipsetbuilder.IPSet() return read, err } -func (p *FilteringProcessor) Run() { - for { - select { - // watch for events - case event := <-p.fileWatcher.Events: - fmt.Printf("EVENT! %#v\n", event) - - // watch for errors - case err := <-p.fileWatcher.Errors: - fmt.Println("ERROR", err) - } +func (t *FilteringTransform) dropQueryFilter(dm *dnsutils.DNSMessage) (int, error) { + if dm.DNS.Type == dnsutils.DNSQuery { + return ReturnDrop, nil } + return ReturnKeep, nil } -func (p *FilteringProcessor) ignoreQueryFilter(dm *dnsutils.DNSMessage) bool { - return dm.DNS.Type == dnsutils.DNSQuery -} - -func (p *FilteringProcessor) ignoreReplyFilter(dm *dnsutils.DNSMessage) bool { - return dm.DNS.Type == dnsutils.DNSReply +func (t *FilteringTransform) dropReplyFilter(dm *dnsutils.DNSMessage) (int, error) { + if dm.DNS.Type == dnsutils.DNSReply { + return ReturnDrop, nil + } + return ReturnKeep, nil } -func (p *FilteringProcessor) rCodeFilter(dm *dnsutils.DNSMessage) bool { +func (t *FilteringTransform) dropRCodeFilter(dm *dnsutils.DNSMessage) (int, error) { // drop according to the rcode ? - if _, ok := p.mapRcodes[dm.DNS.Rcode]; ok { - return true + if _, ok := t.mapRcodes[dm.DNS.Rcode]; ok { + return ReturnDrop, nil } - return false + return ReturnKeep, nil } -func (p *FilteringProcessor) keepQueryIPFilter(dm *dnsutils.DNSMessage) bool { +func (t *FilteringTransform) keepQueryIPFilter(dm *dnsutils.DNSMessage) (int, error) { ip, _ := netaddr.ParseIP(dm.NetworkInfo.QueryIP) - return !p.ipsetKeep.Contains(ip) + if t.ipsetKeep.Contains(ip) { + return ReturnKeep, nil + } + return ReturnDrop, nil } -func (p *FilteringProcessor) DropQueryIPFilter(dm *dnsutils.DNSMessage) bool { +func (t *FilteringTransform) dropQueryIPFilter(dm *dnsutils.DNSMessage) (int, error) { ip, _ := netaddr.ParseIP(dm.NetworkInfo.QueryIP) - return p.ipsetDrop.Contains(ip) + if t.ipsetDrop.Contains(ip) { + return ReturnDrop, nil + } + return ReturnKeep, nil } -func (p *FilteringProcessor) keepRdataFilter(dm *dnsutils.DNSMessage) bool { +func (t *FilteringTransform) keepRdataFilter(dm *dnsutils.DNSMessage) (int, error) { if len(dm.DNS.DNSRRs.Answers) > 0 { // If even one exists in filter list then pass through filter for _, answer := range dm.DNS.DNSRRs.Answers { if answer.Rdatatype == "A" || answer.Rdatatype == "AAAA" { ip, _ := netaddr.ParseIP(answer.Rdata) - if p.rDataIpsetKeep.Contains(ip) { - return false + if t.rDataIpsetKeep.Contains(ip) { + return ReturnKeep, nil } } } } - return true + return ReturnDrop, nil } -func (p *FilteringProcessor) dropFqdnFilter(dm *dnsutils.DNSMessage) bool { - if _, ok := p.listFqdns[dm.DNS.Qname]; ok { - return true +func (t *FilteringTransform) dropFqdnFilter(dm *dnsutils.DNSMessage) (int, error) { + if _, ok := t.listFqdns[dm.DNS.Qname]; ok { + return ReturnDrop, nil } - return false + return ReturnKeep, nil } -func (p *FilteringProcessor) dropDomainRegexFilter(dm *dnsutils.DNSMessage) bool { +func (t *FilteringTransform) dropDomainRegexFilter(dm *dnsutils.DNSMessage) (int, error) { // partial fqdn with regexp - for _, d := range p.listDomainsRegex { + for _, d := range t.listDomainsRegex { if d.MatchString(dm.DNS.Qname) { - return true + return ReturnDrop, nil } } - return false + return ReturnKeep, nil } -func (p *FilteringProcessor) keepFqdnFilter(dm *dnsutils.DNSMessage) bool { - if _, ok := p.listKeepFqdns[dm.DNS.Qname]; ok { - return false +func (t *FilteringTransform) keepFqdnFilter(dm *dnsutils.DNSMessage) (int, error) { + if _, ok := t.listKeepFqdns[dm.DNS.Qname]; ok { + return ReturnKeep, nil } - return true + return ReturnDrop, nil } -func (p *FilteringProcessor) keepDomainRegexFilter(dm *dnsutils.DNSMessage) bool { +func (t *FilteringTransform) keepDomainRegexFilter(dm *dnsutils.DNSMessage) (int, error) { // partial fqdn with regexp - for _, d := range p.listKeepDomainsRegex { + for _, d := range t.listKeepDomainsRegex { if d.MatchString(dm.DNS.Qname) { - return false + return ReturnKeep, nil } } - return true + return ReturnDrop, nil } // drop all except every nth entry -func (p *FilteringProcessor) downsampleFilter(dm *dnsutils.DNSMessage) bool { +func (t *FilteringTransform) downsampleFilter(dm *dnsutils.DNSMessage) (int, error) { + if dm.Filtering == nil { + dm.Filtering = &dnsutils.TransformFiltering{} + } + // Increment the downsampleCount for each processed DNS message. - p.downsampleCount += 1 + t.downsampleCount += 1 // Calculate the remainder once and add sampling rate to DNS message - remainder := p.downsampleCount % p.downsample + remainder := t.downsampleCount % t.downsample if dm.Filtering != nil { - dm.Filtering.SampleRate = p.downsample + dm.Filtering.SampleRate = t.downsample } switch remainder { // If the remainder is zero, reset the downsampleCount to 0 and drop the DNS message by returning false. case 0: - p.downsampleCount = 0 - return false + t.downsampleCount = 0 + return ReturnDrop, nil // If the remainder is not zero, keep the DNS message and return true. default: - return true - } -} - -func (p *FilteringProcessor) CheckIfDrop(dm *dnsutils.DNSMessage) bool { - if len(p.activeFilters) == 0 { - return false - } - - var value bool - for _, fn := range p.activeFilters { - value = fn(dm) - if value { - return true - } - } - - return false -} - -func (p *FilteringProcessor) InitDNSMessage(dm *dnsutils.DNSMessage) { - if dm.Filtering == nil { - dm.Filtering = &dnsutils.TransformFiltering{ - SampleRate: 0, - } + return ReturnKeep, nil } } diff --git a/transformers/filtering_test.go b/transformers/filtering_test.go index e256c0a4..7eca2158 100644 --- a/transformers/filtering_test.go +++ b/transformers/filtering_test.go @@ -20,23 +20,26 @@ func TestFilteringQR(t *testing.T) { config.Filtering.LogQueries = false config.Filtering.LogReplies = false - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - filtering := NewFilteringProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - filtering.LoadActiveFilters() + filtering := NewFilteringTransform(config, logger.New(false), "test", 0, outChans) + + // get tranforms + subtransforms, _ := filtering.GetTransforms() + if len(subtransforms) != 2 { + t.Errorf("invalid number of subtransforms enabled") + } dm := dnsutils.GetFakeDNSMessage() - if !filtering.CheckIfDrop(&dm) { - t.Errorf("dns query should be ignored") + if result, _ := filtering.dropQueryFilter(&dm); result != ReturnDrop { + t.Errorf("dns query should be dropped") } dm.DNS.Type = dnsutils.DNSReply - if !filtering.CheckIfDrop(&dm) { - t.Errorf("dns reply should be ignored") + if result, _ := filtering.dropReplyFilter(&dm); result != ReturnDrop { + t.Errorf("dns reply should be dropped") } - } func TestFilteringByRcodeNOERROR(t *testing.T) { @@ -45,19 +48,21 @@ func TestFilteringByRcodeNOERROR(t *testing.T) { config.Filtering.Enable = true config.Filtering.DropRcodes = []string{"NOERROR"} - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - filtering := NewFilteringProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - filtering.LoadRcodes() - filtering.LoadActiveFilters() + filtering := NewFilteringTransform(config, logger.New(false), "test", 0, outChans) + + // get tranforms + subtransforms, _ := filtering.GetTransforms() + if len(subtransforms) != 1 { + t.Errorf("invalid number of subtransforms enabled") + } dm := dnsutils.GetFakeDNSMessage() - if filtering.CheckIfDrop(&dm) == false { + if result, _ := filtering.dropRCodeFilter(&dm); result != ReturnDrop { t.Errorf("dns query should be dropped") } - } func TestFilteringByRcodeEmpty(t *testing.T) { @@ -66,17 +71,15 @@ func TestFilteringByRcodeEmpty(t *testing.T) { config.Filtering.Enable = true config.Filtering.DropRcodes = []string{} - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - filtering := NewFilteringProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - filtering.LoadRcodes() - filtering.LoadActiveFilters() + filtering := NewFilteringTransform(config, logger.New(false), "test", 0, outChans) - dm := dnsutils.GetFakeDNSMessage() - if filtering.CheckIfDrop(&dm) == true { - t.Errorf("dns query should not be dropped!") + // get tranforms + subtransforms, _ := filtering.GetTransforms() + if len(subtransforms) != 0 { + t.Errorf("no subtransforms should be enabled") } } @@ -84,61 +87,66 @@ func TestFilteringByKeepQueryIp(t *testing.T) { // config config := pkgconfig.GetFakeConfigTransformers() config.Filtering.Enable = true - config.Filtering.KeepQueryIPFile = "../testsdata/filtering_queryip_keep.txt" + config.Filtering.KeepQueryIPFile = "../tests/testsdata/filtering_queryip_keep.txt" - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - filtering := NewFilteringProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - filtering.LoadQueryIPList() - filtering.LoadActiveFilters() + filtering := NewFilteringTransform(config, logger.New(false), "test", 0, outChans) + + // get tranforms + subtransforms, _ := filtering.GetTransforms() + if len(subtransforms) != 1 { + t.Errorf("invalid number of subtransforms enabled") + } dm := dnsutils.GetFakeDNSMessage() dm.NetworkInfo.QueryIP = "192.168.0.1" - if filtering.CheckIfDrop(&dm) == false { + if result, _ := filtering.keepQueryIPFilter(&dm); result != ReturnDrop { t.Errorf("dns query should be dropped!") } dm.NetworkInfo.QueryIP = "192.168.1.10" - if filtering.CheckIfDrop(&dm) == true { + if result, _ := filtering.keepQueryIPFilter(&dm); result != ReturnKeep { t.Errorf("dns query should not be dropped!") } dm.NetworkInfo.QueryIP = "192.3.2.1" // kept by subnet - if filtering.CheckIfDrop(&dm) == true { + if result, _ := filtering.keepQueryIPFilter(&dm); result != ReturnKeep { t.Errorf("dns query should not be dropped!") } - } func TestFilteringByDropQueryIp(t *testing.T) { // config config := pkgconfig.GetFakeConfigTransformers() config.Filtering.Enable = true - config.Filtering.DropQueryIPFile = "../testsdata/filtering_queryip.txt" + config.Filtering.DropQueryIPFile = "../tests/testsdata/filtering_queryip.txt" - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - filtering := NewFilteringProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - filtering.LoadQueryIPList() - filtering.LoadActiveFilters() + filtering := NewFilteringTransform(config, logger.New(false), "test", 0, outChans) + + // get tranforms + subtransforms, _ := filtering.GetTransforms() + if len(subtransforms) != 1 { + t.Errorf("invalid number of subtransforms enabled") + } dm := dnsutils.GetFakeDNSMessage() dm.NetworkInfo.QueryIP = "192.168.0.1" - if filtering.CheckIfDrop(&dm) == true { + if result, _ := filtering.dropQueryIPFilter(&dm); result != ReturnKeep { t.Errorf("dns query should not be dropped!") } dm.NetworkInfo.QueryIP = "192.168.1.15" - if filtering.CheckIfDrop(&dm) == false { + if result, _ := filtering.dropQueryIPFilter(&dm); result != ReturnDrop { t.Errorf("dns query should be dropped!") } dm.NetworkInfo.QueryIP = "192.0.2.3" // dropped by subnet - if filtering.CheckIfDrop(&dm) == false { + if result, _ := filtering.dropQueryIPFilter(&dm); result != ReturnDrop { t.Errorf("dns query should be dropped!") } @@ -148,15 +156,18 @@ func TestFilteringByKeepRdataIp(t *testing.T) { // config config := pkgconfig.GetFakeConfigTransformers() config.Filtering.Enable = true - config.Filtering.KeepRdataFile = "../testsdata/filtering_rdataip_keep.txt" + config.Filtering.KeepRdataFile = "../tests/testsdata/filtering_rdataip_keep.txt" - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - filtering := NewFilteringProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - filtering.LoadrDataIPList() - filtering.LoadActiveFilters() + filtering := NewFilteringTransform(config, logger.New(false), "test", 0, outChans) + + // get tranforms + subtransforms, _ := filtering.GetTransforms() + if len(subtransforms) != 1 { + t.Errorf("invalid number of subtransforms enabled") + } dm := dnsutils.GetFakeDNSMessage() dm.DNS.DNSRRs.Answers = []dnsutils.DNSAnswer{ @@ -165,7 +176,7 @@ func TestFilteringByKeepRdataIp(t *testing.T) { Rdata: "192.168.0.1", }, } - if filtering.CheckIfDrop(&dm) == false { + if result, _ := filtering.keepRdataFilter(&dm); result != ReturnDrop { t.Errorf("dns query should be dropped!") } @@ -175,7 +186,7 @@ func TestFilteringByKeepRdataIp(t *testing.T) { Rdata: "192.168.1.10", }, } - if filtering.CheckIfDrop(&dm) == true { + if result, _ := filtering.keepRdataFilter(&dm); result != ReturnKeep { t.Errorf("dns query should not be dropped!") } @@ -185,7 +196,7 @@ func TestFilteringByKeepRdataIp(t *testing.T) { Rdata: "192.168.1.11", // included in subnet }, } - if filtering.CheckIfDrop(&dm) == true { + if result, _ := filtering.keepRdataFilter(&dm); result != ReturnKeep { t.Errorf("dns query should not be dropped!") } @@ -195,7 +206,7 @@ func TestFilteringByKeepRdataIp(t *testing.T) { Rdata: "192.0.2.3", // dropped by subnet }, } - if filtering.CheckIfDrop(&dm) == false { + if result, _ := filtering.keepRdataFilter(&dm); result != ReturnDrop { t.Errorf("dns query should be dropped!") } @@ -205,7 +216,7 @@ func TestFilteringByKeepRdataIp(t *testing.T) { Rdata: "192.0.2.1", }, } - if filtering.CheckIfDrop(&dm) == true { + if result, _ := filtering.keepRdataFilter(&dm); result != ReturnKeep { t.Errorf("dns query should not be dropped!") } @@ -215,7 +226,7 @@ func TestFilteringByKeepRdataIp(t *testing.T) { Rdata: "2001:db8:85a3::8a2e:370:7334", }, } - if filtering.CheckIfDrop(&dm) == true { + if result, _ := filtering.keepRdataFilter(&dm); result != ReturnKeep { t.Errorf("dns query should not be dropped!") } @@ -225,7 +236,7 @@ func TestFilteringByKeepRdataIp(t *testing.T) { Rdata: "2041::7334", }, } - if filtering.CheckIfDrop(&dm) == false { + if result, _ := filtering.keepRdataFilter(&dm); result != ReturnDrop { t.Errorf("dns query should be dropped!") } @@ -235,7 +246,7 @@ func TestFilteringByKeepRdataIp(t *testing.T) { Rdata: "2001:0dbd:85a3::0001", }, } - if filtering.CheckIfDrop(&dm) == true { + if result, _ := filtering.keepRdataFilter(&dm); result != ReturnKeep { t.Errorf("dns query should not be dropped!") } } @@ -244,24 +255,27 @@ func TestFilteringByFqdn(t *testing.T) { // config config := pkgconfig.GetFakeConfigTransformers() config.Filtering.Enable = true - config.Filtering.DropFqdnFile = "../testsdata/filtering_fqdn.txt" + config.Filtering.DropFqdnFile = "../tests/testsdata/filtering_fqdn.txt" - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - filtering := NewFilteringProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - filtering.LoadDomainsList() - filtering.LoadActiveFilters() + filtering := NewFilteringTransform(config, logger.New(false), "test", 0, outChans) + + // get tranforms + subtransforms, _ := filtering.GetTransforms() + if len(subtransforms) != 1 { + t.Errorf("invalid number of subtransforms enabled") + } dm := dnsutils.GetFakeDNSMessage() dm.DNS.Qname = "www.microsoft.com" - if filtering.CheckIfDrop(&dm) == true { + if result, _ := filtering.dropFqdnFilter(&dm); result != ReturnKeep { t.Errorf("dns query should not be dropped!") } dm.DNS.Qname = testURL1 - if filtering.CheckIfDrop(&dm) == false { + if result, _ := filtering.dropFqdnFilter(&dm); result != ReturnDrop { t.Errorf("dns query should be dropped!") } } @@ -270,29 +284,32 @@ func TestFilteringByDomainRegex(t *testing.T) { // config config := pkgconfig.GetFakeConfigTransformers() config.Filtering.Enable = true - config.Filtering.DropDomainFile = "../testsdata/filtering_fqdn_regex.txt" + config.Filtering.DropDomainFile = "../tests/testsdata/filtering_fqdn_regex.txt" - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - filtering := NewFilteringProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - filtering.LoadDomainsList() - filtering.LoadActiveFilters() + filtering := NewFilteringTransform(config, logger.New(false), "test", 0, outChans) + + // get tranforms + subtransforms, _ := filtering.GetTransforms() + if len(subtransforms) != 1 { + t.Errorf("invalid number of subtransforms enabled") + } dm := dnsutils.GetFakeDNSMessage() dm.DNS.Qname = testURL1 - if filtering.CheckIfDrop(&dm) == false { + if result, _ := filtering.dropDomainRegexFilter(&dm); result != ReturnDrop { t.Errorf("dns query should be dropped!") } dm.DNS.Qname = testURL2 - if filtering.CheckIfDrop(&dm) == false { + if result, _ := filtering.dropDomainRegexFilter(&dm); result != ReturnDrop { t.Errorf("dns query should be dropped!") } dm.DNS.Qname = "github.fr" - if filtering.CheckIfDrop(&dm) == true { + if result, _ := filtering.dropDomainRegexFilter(&dm); result != ReturnKeep { t.Errorf("dns query should not be dropped!") } } @@ -301,36 +318,39 @@ func TestFilteringByKeepDomain(t *testing.T) { // config config := pkgconfig.GetFakeConfigTransformers() - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // file contains google.fr, test.github.com config.Filtering.Enable = true - config.Filtering.KeepDomainFile = "../testsdata/filtering_keep_domains.txt" + config.Filtering.KeepFqdnFile = "../tests/testsdata/filtering_keep_domains.txt" // init subproccesor - filtering := NewFilteringProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - filtering.LoadDomainsList() - filtering.LoadActiveFilters() + filtering := NewFilteringTransform(config, logger.New(false), "test", 0, outChans) + + // get tranforms + subtransforms, _ := filtering.GetTransforms() + if len(subtransforms) != 1 { + t.Errorf("invalid number of subtransforms enabled") + } dm := dnsutils.GetFakeDNSMessage() dm.DNS.Qname = testURL1 - if filtering.CheckIfDrop(&dm) == false { + if result, _ := filtering.keepFqdnFilter(&dm); result != ReturnDrop { t.Errorf("dns query should be dropped! Domain: %s", dm.DNS.Qname) } dm.DNS.Qname = "example.com" - if filtering.CheckIfDrop(&dm) == false { + if result, _ := filtering.keepFqdnFilter(&dm); result != ReturnDrop { t.Errorf("dns query should be dropped! Domain: %s", dm.DNS.Qname) } dm.DNS.Qname = testURL2 - if filtering.CheckIfDrop(&dm) == true { + if result, _ := filtering.keepFqdnFilter(&dm); result != ReturnKeep { t.Errorf("dns query should not be dropped!") } dm.DNS.Qname = "google.fr" - if filtering.CheckIfDrop(&dm) == true { + if result, _ := filtering.keepFqdnFilter(&dm); result != ReturnKeep { t.Errorf("dns query should not be dropped!") } } @@ -339,7 +359,6 @@ func TestFilteringByKeepDomainRegex(t *testing.T) { // config config := pkgconfig.GetFakeConfigTransformers() - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} /* file contains: @@ -348,168 +367,55 @@ func TestFilteringByKeepDomainRegex(t *testing.T) { .+.google.com$ */ config.Filtering.Enable = true - config.Filtering.KeepDomainFile = "../testsdata/filtering_keep_domains_regex.txt" + config.Filtering.KeepDomainFile = "../tests/testsdata/filtering_keep_domains_regex.txt" // init subproccesor - filtering := NewFilteringProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - filtering.LoadDomainsList() - filtering.LoadActiveFilters() + filtering := NewFilteringTransform(config, logger.New(false), "test", 0, outChans) + + // get tranforms + subtransforms, _ := filtering.GetTransforms() + if len(subtransforms) != 1 { + t.Errorf("invalid number of subtransforms enabled") + } dm := dnsutils.GetFakeDNSMessage() dm.DNS.Qname = testURL1 - if filtering.CheckIfDrop(&dm) == true { + if result, _ := filtering.keepDomainRegexFilter(&dm); result != ReturnKeep { t.Errorf("dns query should not be dropped!") } dm.DNS.Qname = "test.google.com.ru" - if filtering.CheckIfDrop(&dm) == false { + if result, _ := filtering.keepDomainRegexFilter(&dm); result != ReturnDrop { // If this passes then these are not terminated. t.Errorf("dns query should be dropped!") } dm.DNS.Qname = testURL2 - if filtering.CheckIfDrop(&dm) == true { + if result, _ := filtering.keepDomainRegexFilter(&dm); result != ReturnKeep { t.Errorf("dns query should not be dropped!") } dm.DNS.Qname = "test.github.com.malware.ru" - if filtering.CheckIfDrop(&dm) == false { + if result, _ := filtering.keepDomainRegexFilter(&dm); result != ReturnDrop { t.Errorf("dns query should be dropped!") } } -func TestFilteringByDownsampleDisabled(t *testing.T) { - // config, down sample is disabled by default - config := pkgconfig.GetFakeConfigTransformers() - config.Filtering.Enable = true - - log := logger.New(false) - outChans := []chan dnsutils.DNSMessage{} - - // init subproccesor - filtering := NewFilteringProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - filtering.LoadActiveFilters() - - // init DNS Message - dm := dnsutils.GetFakeDNSMessage() - - // test for default behavior when downsample is set to 0 - filtering = NewFilteringProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - - if filtering.CheckIfDrop(&dm) == true { - t.Errorf("dns query should not be dropped! downsampling rate is set to 0 and should not downsample.") - } - if filtering.CheckIfDrop(&dm) == true { - t.Errorf("dns query should not be dropped! downsampling rate is set to 0 and should not downsample.") - } -} - -func TestFilteringByDownsample(t *testing.T) { - // config - config := pkgconfig.GetFakeConfigTransformers() - config.Filtering.Enable = true - config.Filtering.Downsample = 2 - - log := logger.New(false) - outChans := []chan dnsutils.DNSMessage{} - - // init subproccesor - filtering := NewFilteringProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - filtering.LoadActiveFilters() - - // init DNS Message - dm := dnsutils.GetFakeDNSMessage() - - // filtering.downsampleCount - if filtering.CheckIfDrop(&dm) == false { - t.Errorf("dns query should be dropped! downsampled should exclude first hit.") - } - - if filtering.CheckIfDrop(&dm) == true { - t.Errorf("dns query should not be dropped! downsampled one record and then should include the next if downsample rate is 2") - } - - if filtering.CheckIfDrop(&dm) == false { - t.Errorf("dns query should be dropped! downsampled should exclude first hit.") - } - - if filtering.CheckIfDrop(&dm) == true { - t.Errorf("dns query should not be dropped! downsampled one record and then should include the next if downsample rate is 2") - } -} - -func TestFilteringByDownsampleUpdateJSONModel(t *testing.T) { - // config - config := pkgconfig.GetFakeConfigTransformers() - config.Filtering.Enable = true - config.Filtering.Downsample = 2 - - log := logger.New(false) - outChans := []chan dnsutils.DNSMessage{} - - // init subproccesor - filtering := NewFilteringProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - filtering.LoadActiveFilters() - - // init DNS Message - dm := dnsutils.GetFakeDNSMessage() - filtering.InitDNSMessage(&dm) - - // filtering.downsampleCount - if filtering.CheckIfDrop(&dm) == false { - t.Errorf("dns query should be dropped! downsampled should exclude first hit.") - } - - // test json model - - jsonRef := dnsutils.DNSMessage{ - Filtering: &dnsutils.TransformFiltering{SampleRate: 2}, - } - if dm.Filtering.SampleRate != jsonRef.Filtering.SampleRate { - t.Errorf("DNS message invalid sample rate: Want=%d, Get=%d", jsonRef.Filtering.SampleRate, dm.Filtering.SampleRate) - } -} - func TestFilteringMultipleFilters(t *testing.T) { // config config := pkgconfig.GetFakeConfigTransformers() config.Filtering.Enable = true - config.Filtering.DropDomainFile = "../testsdata/filtering_fqdn_regex.txt" - config.Filtering.DropQueryIPFile = "../testsdata/filtering_queryip.txt" + config.Filtering.DropDomainFile = "../tests/testsdata/filtering_fqdn_regex.txt" + config.Filtering.DropQueryIPFile = "../tests/testsdata/filtering_queryip.txt" - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - filtering := NewFilteringProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - filtering.LoadQueryIPList() - filtering.LoadDomainsList() - filtering.LoadActiveFilters() - - dm := dnsutils.GetFakeDNSMessage() - dm.DNS.Qname = testURL1 - if filtering.CheckIfDrop(&dm) == false { - t.Errorf("dns query should be dropped!") - } + filtering := NewFilteringTransform(config, logger.New(false), "test", 0, outChans) + subtransforms, _ := filtering.GetTransforms() - dm.DNS.Qname = testURL2 - if filtering.CheckIfDrop(&dm) == false { - t.Errorf("dns query should be dropped!") - } - - dm.DNS.Qname = "github.fr" - if filtering.CheckIfDrop(&dm) == true { - t.Errorf("dns query should not be dropped!") - } - - dm.NetworkInfo.QueryIP = "192.168.1.15" - if filtering.CheckIfDrop(&dm) == false { - t.Errorf("dns query should be dropped!") - } - - dm.NetworkInfo.QueryIP = "192.0.2.3" // dropped by subnet - if filtering.CheckIfDrop(&dm) == false { - t.Errorf("dns query should be dropped!") + if len(subtransforms) != 2 { + t.Errorf("invalid number of subtransforms enabled") } } diff --git a/transformers/geoip.go b/transformers/geoip.go index 773f0523..02dd8545 100644 --- a/transformers/geoip.go +++ b/transformers/geoip.go @@ -26,135 +26,86 @@ type MaxminddbRecord struct { } type GeoRecord struct { - Continent string - CountryISOCode string - City string - ASN string - ASO string + Continent, CountryISOCode, City, ASN, ASO string } -type GeoIPProcessor struct { - config *pkgconfig.ConfigTransformers - logger *logger.Logger - dbCountry *maxminddb.Reader - dbCity *maxminddb.Reader - dbAsn *maxminddb.Reader - enabled bool - name string - instance int - outChannels []chan dnsutils.DNSMessage - logInfo func(msg string, v ...interface{}) - logError func(msg string, v ...interface{}) +type GeoIPTransform struct { + GenericTransformer + dbCountry, dbCity, dbAsn *maxminddb.Reader } -func NewDNSGeoIPProcessor(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, - instance int, outChannels []chan dnsutils.DNSMessage, - logInfo func(msg string, v ...interface{}), logError func(msg string, v ...interface{}), -) GeoIPProcessor { - d := GeoIPProcessor{ - config: config, - logger: logger, - name: name, - instance: instance, - outChannels: outChannels, - logInfo: logInfo, - logError: logError, - } - - return d -} - -func (p *GeoIPProcessor) ReloadConfig(config *pkgconfig.ConfigTransformers) { - p.config = config -} - -func (p *GeoIPProcessor) LogInfo(msg string, v ...interface{}) { - log := fmt.Sprintf("geoip#%d - ", p.instance) - p.logInfo(log+msg, v...) +func NewDNSGeoIPTransform(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, instance int, nextWorkers []chan dnsutils.DNSMessage) *GeoIPTransform { + t := &GeoIPTransform{GenericTransformer: NewTransformer(config, logger, "geoip", name, instance, nextWorkers)} + return t } -func (p *GeoIPProcessor) LogError(msg string, v ...interface{}) { - log := fmt.Sprintf("geoip#%d - ", p.instance) - p.logError(log+msg, v...) +func (t *GeoIPTransform) GetTransforms() ([]Subtransform, error) { + subtransforms := []Subtransform{} + if t.config.GeoIP.Enable { + if err := t.Open(); err != nil { + return nil, fmt.Errorf("open error %w", err) + } + subtransforms = append(subtransforms, Subtransform{name: "geoip:lookup", processFunc: t.geoipTransform}) + } + return subtransforms, nil } -func (p *GeoIPProcessor) InitDNSMessage(dm *dnsutils.DNSMessage) { - if dm.Geo == nil { - dm.Geo = &dnsutils.TransformDNSGeo{ - CountryIsoCode: "-", - City: "-", - Continent: "-", - AutonomousSystemNumber: "-", - AutonomousSystemOrg: "-", - } +func (t *GeoIPTransform) Reset() { + if t.config.GeoIP.Enable { + t.Close() } } -func (p *GeoIPProcessor) Open() (err error) { +func (t *GeoIPTransform) Open() (err error) { // before to open, close all files // because open can be called also on reload - p.enabled = false - p.Close() + t.Close() // open files ? - if len(p.config.GeoIP.DBCountryFile) > 0 { - p.dbCountry, err = maxminddb.Open(p.config.GeoIP.DBCountryFile) + if len(t.config.GeoIP.DBCountryFile) > 0 { + t.dbCountry, err = maxminddb.Open(t.config.GeoIP.DBCountryFile) if err != nil { - p.enabled = false - return + return err } - p.enabled = true - p.LogInfo("country database loaded (%d records)", p.dbCountry.Metadata.NodeCount) + t.LogInfo("country database loaded (%d records)", t.dbCountry.Metadata.NodeCount) } - if len(p.config.GeoIP.DBCityFile) > 0 { - p.dbCity, err = maxminddb.Open(p.config.GeoIP.DBCityFile) + if len(t.config.GeoIP.DBCityFile) > 0 { + t.dbCity, err = maxminddb.Open(t.config.GeoIP.DBCityFile) if err != nil { - p.enabled = false - return + return err } - p.enabled = true - p.LogInfo("city database loaded (%d records)", p.dbCity.Metadata.NodeCount) + t.LogInfo("city database loaded (%d records)", t.dbCity.Metadata.NodeCount) } - if len(p.config.GeoIP.DBASNFile) > 0 { - p.dbAsn, err = maxminddb.Open(p.config.GeoIP.DBASNFile) + if len(t.config.GeoIP.DBASNFile) > 0 { + t.dbAsn, err = maxminddb.Open(t.config.GeoIP.DBASNFile) if err != nil { - p.enabled = false - return + return err } - p.enabled = true - p.LogInfo("asn database loaded (%d records)", p.dbAsn.Metadata.NodeCount) + t.LogInfo("asn database loaded (%d records)", t.dbAsn.Metadata.NodeCount) } return nil } -func (p *GeoIPProcessor) IsEnabled() bool { - return p.enabled -} - -func (p *GeoIPProcessor) Close() { - if p.dbCountry != nil { - p.dbCountry.Close() +func (t *GeoIPTransform) Close() { + if t.dbCountry != nil { + t.dbCountry.Close() } - if p.dbCity != nil { - p.dbCity.Close() + if t.dbCity != nil { + t.dbCity.Close() } - if p.dbAsn != nil { - p.dbAsn.Close() + if t.dbAsn != nil { + t.dbAsn.Close() } } -func (p *GeoIPProcessor) Lookup(ip string) (GeoRecord, error) { +func (t *GeoIPTransform) Lookup(ip string) (GeoRecord, error) { record := &MaxminddbRecord{} - rec := GeoRecord{Continent: "-", - CountryISOCode: "-", - City: "-", - ASN: "-", - ASO: "-"} - - if p.dbAsn != nil { - err := p.dbAsn.Lookup(net.ParseIP(ip), &record) + rec := GeoRecord{Continent: "-", CountryISOCode: "-", City: "-", ASN: "-", ASO: "-"} + + if t.dbAsn != nil { + err := t.dbAsn.Lookup(net.ParseIP(ip), &record) if err != nil { return rec, err } @@ -162,8 +113,8 @@ func (p *GeoIPProcessor) Lookup(ip string) (GeoRecord, error) { rec.ASO = record.AutonomousSystemOrganization } - if p.dbCity != nil { - err := p.dbCity.Lookup(net.ParseIP(ip), &record) + if t.dbCity != nil { + err := t.dbCity.Lookup(net.ParseIP(ip), &record) if err != nil { return rec, err } @@ -171,14 +122,32 @@ func (p *GeoIPProcessor) Lookup(ip string) (GeoRecord, error) { rec.CountryISOCode = record.Country.ISOCode rec.Continent = record.Continent.Code - } else if p.dbCountry != nil { - err := p.dbCountry.Lookup(net.ParseIP(ip), &record) + } else if t.dbCountry != nil { + err := t.dbCountry.Lookup(net.ParseIP(ip), &record) if err != nil { return rec, err } rec.CountryISOCode = record.Country.ISOCode rec.Continent = record.Continent.Code } - return rec, nil } + +func (t *GeoIPTransform) geoipTransform(dm *dnsutils.DNSMessage) (int, error) { + if dm.Geo == nil { + dm.Geo = &dnsutils.TransformDNSGeo{CountryIsoCode: "-", City: "-", Continent: "-", AutonomousSystemNumber: "-", AutonomousSystemOrg: "-"} + } + + geoInfo, err := t.Lookup(dm.NetworkInfo.QueryIP) + if err != nil { + return ReturnKeep, err + } + + dm.Geo.Continent = geoInfo.Continent + dm.Geo.CountryIsoCode = geoInfo.CountryISOCode + dm.Geo.City = geoInfo.City + dm.Geo.AutonomousSystemNumber = geoInfo.ASN + dm.Geo.AutonomousSystemOrg = geoInfo.ASO + + return ReturnKeep, nil +} diff --git a/transformers/geoip_test.go b/transformers/geoip_test.go index 532efa5b..03e65061 100644 --- a/transformers/geoip_test.go +++ b/transformers/geoip_test.go @@ -13,8 +13,6 @@ import ( func TestGeoIP_Json(t *testing.T) { // enable feature config := pkgconfig.GetFakeConfigTransformers() - - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // get fake @@ -22,12 +20,14 @@ func TestGeoIP_Json(t *testing.T) { dm.Init() // init subproccesor - geoip := NewDNSGeoIPProcessor(config, logger.New(true), "test", 0, outChans, log.Info, log.Error) + geoip := NewDNSGeoIPTransform(config, logger.New(true), "test", 0, outChans) if err := geoip.Open(); err != nil { t.Fatalf("geoip init failed: %v+", err) } defer geoip.Close() - geoip.InitDNSMessage(&dm) + + geoip.GetTransforms() + geoip.geoipTransform(&dm) // expected json refJSON := ` @@ -66,54 +66,53 @@ func TestGeoIP_Json(t *testing.T) { func TestGeoIP_LookupCountry(t *testing.T) { // enable geoip config := pkgconfig.GetFakeConfigTransformers() - config.GeoIP.DBCountryFile = "../testsdata/GeoLite2-Country.mmdb" + config.GeoIP.Enable = true + config.GeoIP.DBCountryFile = "../tests/testsdata/GeoLite2-Country.mmdb" - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init the processor - geoip := NewDNSGeoIPProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - if err := geoip.Open(); err != nil { + geoip := NewDNSGeoIPTransform(config, logger.New(false), "test", 0, outChans) + _, err := geoip.GetTransforms() + if err != nil { t.Fatalf("geoip init failed: %v+", err) } defer geoip.Close() - // feature is enabled ? - if !geoip.IsEnabled() { - t.Fatalf("geoip should be enabled") - } + // create test message + dm := dnsutils.GetFakeDNSMessage() + dm.NetworkInfo.QueryIP = "83.112.146.176" - // lookup - geoInfo, err := geoip.Lookup("92.184.1.1") + // apply subprocessors + returnCode, err := geoip.geoipTransform(&dm) if err != nil { - t.Errorf("geoip loopkup failed: %v+", err) + t.Errorf("process transform err: %v", err) + } + + if dm.Geo.CountryIsoCode != "FR" { + t.Errorf("country invalid want: FR got: %s", dm.Geo.CountryIsoCode) } - if geoInfo.CountryISOCode != "FR" { - t.Errorf("country invalid want: XX got: %s", geoInfo.CountryISOCode) + if returnCode != ReturnKeep { + t.Errorf("Return code is %v and not RETURN_KEEP (%v)", returnCode, ReturnKeep) } } func TestGeoIP_LookupAsn(t *testing.T) { // enable geoip config := pkgconfig.GetFakeConfigTransformers() - config.GeoIP.DBASNFile = "../testsdata/GeoLite2-ASN.mmdb" + config.GeoIP.Enable = true + config.GeoIP.DBASNFile = "../tests/testsdata/GeoLite2-ASN.mmdb" - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init the processor - geoip := NewDNSGeoIPProcessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + geoip := NewDNSGeoIPTransform(config, logger.New(false), "test", 0, outChans) if err := geoip.Open(); err != nil { t.Fatalf("geoip init failed: %v", err) } defer geoip.Close() - // feature is enabled ? - if !geoip.IsEnabled() { - t.Fatalf("geoip should be enabled") - } - // lookup geoInfo, err := geoip.Lookup("83.112.146.176") if err != nil { diff --git a/transformers/latency.go b/transformers/latency.go index 4ff05266..9679e830 100644 --- a/transformers/latency.go +++ b/transformers/latency.go @@ -100,47 +100,35 @@ func (mp *HashQueries) Delete(key uint64) { delete(mp.kv, key) } -// latency processor -type LatencyProcessor struct { - config *pkgconfig.ConfigTransformers - logger *logger.Logger - name string - instance int +// latency transformer +type LatencyTransform struct { + GenericTransformer hashQueries HashQueries mapQueries MapQueries - outChannels []chan dnsutils.DNSMessage - logInfo func(msg string, v ...interface{}) - logError func(msg string, v ...interface{}) -} - -func NewLatencySubprocessor(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, - instance int, outChannels []chan dnsutils.DNSMessage, - logInfo func(msg string, v ...interface{}), logError func(msg string, v ...interface{}), -) *LatencyProcessor { - s := LatencyProcessor{ - config: config, - logger: logger, - name: name, - instance: instance, - outChannels: outChannels, - logInfo: logInfo, - logError: logError, - } - - s.hashQueries = NewHashQueries(time.Duration(config.Latency.QueriesTimeout) * time.Second) - s.mapQueries = NewMapQueries(time.Duration(config.Latency.QueriesTimeout)*time.Second, outChannels) +} - return &s +func NewLatencyTransform(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, instance int, nextWorkers []chan dnsutils.DNSMessage) *LatencyTransform { + t := &LatencyTransform{GenericTransformer: NewTransformer(config, logger, "latency", name, instance, nextWorkers)} + t.hashQueries = NewHashQueries(time.Duration(config.Latency.QueriesTimeout) * time.Second) + t.mapQueries = NewMapQueries(time.Duration(config.Latency.QueriesTimeout)*time.Second, nextWorkers) + return t } -func (s *LatencyProcessor) ReloadConfig(config *pkgconfig.ConfigTransformers) { - s.config = config +func (t *LatencyTransform) GetTransforms() ([]Subtransform, error) { + t.hashQueries.SetTTL(time.Duration(t.config.Latency.QueriesTimeout) * time.Second) + t.mapQueries.SetTTL(time.Duration(t.config.Latency.QueriesTimeout) * time.Second) - s.hashQueries.SetTTL(time.Duration(config.Latency.QueriesTimeout) * time.Second) - s.mapQueries.SetTTL(time.Duration(config.Latency.QueriesTimeout) * time.Second) + subtransforms := []Subtransform{} + if t.config.Latency.MeasureLatency { + subtransforms = append(subtransforms, Subtransform{name: "latency:add", processFunc: t.measureLatency}) + } + if t.config.Latency.UnansweredQueries { + subtransforms = append(subtransforms, Subtransform{name: "latency:timeout", processFunc: t.detectEvictedTimeout}) + } + return subtransforms, nil } -func (s *LatencyProcessor) MeasureLatency(dm *dnsutils.DNSMessage) { +func (t *LatencyTransform) measureLatency(dm *dnsutils.DNSMessage) (int, error) { queryport, _ := strconv.Atoi(dm.NetworkInfo.QueryPort) if len(dm.NetworkInfo.QueryIP) > 0 && queryport > 0 && !dm.DNS.MalformedPacket { // compute the hash of the query @@ -150,20 +138,21 @@ func (s *LatencyProcessor) MeasureLatency(dm *dnsutils.DNSMessage) { hashfnv.Write([]byte(strings.Join(hashData, "+"))) if dm.DNS.Type == dnsutils.DNSQuery || dm.DNS.Type == dnsutils.DNSQueryQuiet { - s.hashQueries.Set(hashfnv.Sum64(), dm.DNSTap.Timestamp) + t.hashQueries.Set(hashfnv.Sum64(), dm.DNSTap.Timestamp) } else { key := hashfnv.Sum64() - value, ok := s.hashQueries.Get(key) + value, ok := t.hashQueries.Get(key) if ok { - s.hashQueries.Delete(key) + t.hashQueries.Delete(key) latency := float64(dm.DNSTap.Timestamp-value) / float64(1000000000) dm.DNSTap.Latency = latency } } } + return ReturnKeep, nil } -func (s *LatencyProcessor) DetectEvictedTimeout(dm *dnsutils.DNSMessage) { +func (t *LatencyTransform) detectEvictedTimeout(dm *dnsutils.DNSMessage) (int, error) { queryport, _ := strconv.Atoi(dm.NetworkInfo.QueryPort) if len(dm.NetworkInfo.QueryIP) > 0 && queryport > 0 && !dm.DNS.MalformedPacket { @@ -175,9 +164,10 @@ func (s *LatencyProcessor) DetectEvictedTimeout(dm *dnsutils.DNSMessage) { key := hashfnv.Sum64() if dm.DNS.Type == dnsutils.DNSQuery || dm.DNS.Type == dnsutils.DNSQueryQuiet { - s.mapQueries.Set(key, *dm) - } else if s.mapQueries.Exists(key) { - s.mapQueries.Delete(key) + t.mapQueries.Set(key, *dm) + } else if t.mapQueries.Exists(key) { + t.mapQueries.Delete(key) } } + return ReturnKeep, nil } diff --git a/transformers/latency_test.go b/transformers/latency_test.go index 70e19540..0a907f44 100644 --- a/transformers/latency_test.go +++ b/transformers/latency_test.go @@ -14,11 +14,11 @@ import ( func TestLatency_MeasureLatency(t *testing.T) { // enable feature config := pkgconfig.GetFakeConfigTransformers() - log := logger.New(false) outChannels := []chan dnsutils.DNSMessage{} // init transformer - latencyProcessor := NewLatencySubprocessor(config, logger.New(true), "test", 0, outChannels, log.Info, log.Error) + latency := NewLatencyTransform(config, logger.New(true), "test", 0, outChannels) + latency.GetTransforms() testcases := []struct { name string @@ -45,7 +45,7 @@ func TestLatency_MeasureLatency(t *testing.T) { CQ.DNSTap.Timestamp = 1704486841216166066 // Measure latency - latencyProcessor.MeasureLatency(&CQ) + latency.measureLatency(&CQ) // Register Query CR := dnsutils.GetFakeDNSMessage() @@ -53,7 +53,7 @@ func TestLatency_MeasureLatency(t *testing.T) { CR.DNSTap.Timestamp = 1704486841227961611 // Measure latency - latencyProcessor.MeasureLatency(&CR) + latency.measureLatency(&CR) if CR.DNSTap.Latency == 0.0 { t.Errorf("incorrect latency, got 0.0") @@ -68,12 +68,12 @@ func TestLatency_DetectEvictedTimeout(t *testing.T) { config.Latency.Enable = true config.Latency.QueriesTimeout = 1 - log := logger.New(false) outChannels := []chan dnsutils.DNSMessage{} outChannels = append(outChannels, make(chan dnsutils.DNSMessage, 1)) // init transformer - latencyProcessor := NewLatencySubprocessor(config, logger.New(true), "test", 0, outChannels, log.Info, log.Error) + latency := NewLatencyTransform(config, logger.New(true), "test", 0, outChannels) + latency.GetTransforms() testcases := []struct { name string @@ -100,7 +100,7 @@ func TestLatency_DetectEvictedTimeout(t *testing.T) { CQ.DNSTap.Timestamp = 1704486841216166066 // Measure latency - latencyProcessor.DetectEvictedTimeout(&CQ) + latency.detectEvictedTimeout(&CQ) time.Sleep(2 * time.Second) @@ -143,6 +143,7 @@ func Test_HashQueries_Expire(t *testing.T) { } } +// Bench func Benchmark_HashQueries_Set(b *testing.B) { mapexpire := NewHashQueries(10 * time.Second) diff --git a/transformers/machinelearning.go b/transformers/machinelearning.go index 1edfad5b..84d57a48 100644 --- a/transformers/machinelearning.go +++ b/transformers/machinelearning.go @@ -1,7 +1,6 @@ package transformers import ( - "fmt" "math" "strings" "unicode" @@ -22,73 +21,27 @@ func isConsonant(char rune) bool { return true } -type MlProcessor struct { - config *pkgconfig.ConfigTransformers - instance int - outChannels []chan dnsutils.DNSMessage - logInfo func(msg string, v ...interface{}) - logError func(msg string, v ...interface{}) +type MlTransform struct { + GenericTransformer } -func NewMachineLearningSubprocessor(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, - instance int, outChannels []chan dnsutils.DNSMessage, - logInfo func(msg string, v ...interface{}), logError func(msg string, v ...interface{}), -) MlProcessor { - s := MlProcessor{ - config: config, - instance: instance, - outChannels: outChannels, - logInfo: logInfo, - logError: logError, - } - - return s -} - -func (p *MlProcessor) ReloadConfig(config *pkgconfig.ConfigTransformers) { - p.config = config -} - -func (p *MlProcessor) LogInfo(msg string, v ...interface{}) { - log := fmt.Sprintf("ml#%d - ", p.instance) - p.logInfo(log+msg, v...) +func NewMachineLearningTransform(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, instance int, nextWorkers []chan dnsutils.DNSMessage) *MlTransform { + t := &MlTransform{GenericTransformer: NewTransformer(config, logger, "machinelearning", name, instance, nextWorkers)} + return t } -func (p *MlProcessor) LogError(msg string, v ...interface{}) { - log := fmt.Sprintf("ml#%d - ", p.instance) - p.logError(log+msg, v...) -} - -func (p *MlProcessor) InitDNSMessage(dm *dnsutils.DNSMessage) { - if dm.MachineLearning == nil { - dm.MachineLearning = &dnsutils.TransformML{ - Entropy: 0, - Length: 0, - Digits: 0, - Lowers: 0, - Uppers: 0, - Labels: 0, - Specials: 0, - RatioDigits: 0, - RatioLetters: 0, - RatioSpecials: 0, - Others: 0, - ConsecutiveChars: 0, - ConsecutiveVowels: 0, - ConsecutiveDigits: 0, - ConsecutiveConsonants: 0, - Size: 0, - Occurrences: 0, - UncommonQtypes: 0, - } +func (t *MlTransform) GetTransforms() ([]Subtransform, error) { + subtransforms := []Subtransform{} + if t.config.MachineLearning.Enable { + subtransforms = append(subtransforms, Subtransform{name: "machinelearning:add-feature", processFunc: t.addFeatures}) } + return subtransforms, nil } -func (p *MlProcessor) AddFeatures(dm *dnsutils.DNSMessage) { +func (t *MlTransform) addFeatures(dm *dnsutils.DNSMessage) (int, error) { if dm.MachineLearning == nil { - p.LogError("transformer is not properly initialized") - return + dm.MachineLearning = &dnsutils.TransformML{} } // count global number of chars @@ -227,4 +180,6 @@ func (p *MlProcessor) AddFeatures(dm *dnsutils.DNSMessage) { dm.MachineLearning.ConsecutiveVowels = consecutiveVowelCount dm.MachineLearning.ConsecutiveDigits = consecutiveDigitCount dm.MachineLearning.ConsecutiveConsonants = consecutiveConsonantCount + + return ReturnKeep, nil } diff --git a/transformers/machinelearning_test.go b/transformers/machinelearning_test.go new file mode 100644 index 00000000..6ddb1925 --- /dev/null +++ b/transformers/machinelearning_test.go @@ -0,0 +1,29 @@ +package transformers + +import ( + "testing" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" +) + +func TestML_AddFeatures(t *testing.T) { + // enable feature + config := pkgconfig.GetFakeConfigTransformers() + config.MachineLearning.Enable = true + + // init the processor + outChans := []chan dnsutils.DNSMessage{} + ml := NewMachineLearningTransform(config, logger.New(false), "test", 0, outChans) + + dm := dnsutils.GetFakeDNSMessage() + + // init transforms and check + ml.GetTransforms() + ml.addFeatures(&dm) + + if dm.MachineLearning.Labels != 2 { + t.Errorf("incorrect feature label value in DNSMessage: %d", dm.MachineLearning.Labels) + } +} diff --git a/transformers/normalize.go b/transformers/normalize.go index 5226b242..895873ab 100644 --- a/transformers/normalize.go +++ b/transformers/normalize.go @@ -1,7 +1,6 @@ package transformers import ( - "fmt" "strings" "github.com/dmachard/go-dnscollector/dnsutils" @@ -12,143 +11,83 @@ import ( var ( DnstapMessage = map[string]string{ - "AUTH_QUERY": "AQ", - "AUTH_RESPONSE": "AR", - "RESOLVER_QUERY": "RQ", - "RESOLVER_RESPONSE": "RR", - "CLIENT_QUERY": "CQ", - "CLIENT_RESPONSE": "CR", - "FORWARDER_QUERY": "FQ", - "FORWARDER_RESPONSE": "FR", - "STUB_QUERY": "SQ", - "STUB_RESPONSE": "SR", - "TOOL_QUERY": "TQ", - "TOOL_RESPONSE": "TR", - "UPDATE_QUERY": "UQ", - "UPDATE_RESPONSE": "UR", - "DNSQueryType": "Q", // powerdns - "DNSResponseType": "R", // powerdns + "AUTH_QUERY": "AQ", "AUTH_RESPONSE": "AR", + "RESOLVER_QUERY": "RQ", "RESOLVER_RESPONSE": "RR", + "CLIENT_QUERY": "CQ", "CLIENT_RESPONSE": "CR", + "FORWARDER_QUERY": "FQ", "FORWARDER_RESPONSE": "FR", + "STUB_QUERY": "SQ", "STUB_RESPONSE": "SR", + "TOOL_QUERY": "TQ", "TOOL_RESPONSE": "TR", + "UPDATE_QUERY": "UQ", "UPDATE_RESPONSE": "UR", + "DNSQueryType": "Q", "DNSResponseType": "R", // powerdns } DNSQr = map[string]string{ - "QUERY": "Q", - "REPLY": "R", + "QUERY": "Q", "REPLY": "R", } IPversion = map[string]string{ - "INET6": "6", - "INET": "4", + "INET6": "6", "INET": "4", } Rcodes = map[string]string{ - "NOERROR": "0", - "FORMERR": "1", - "SERVFAIL": "2", - "NXDOMAIN": "3", - "NOIMP": "4", - "REFUSED": "5", - "YXDOMAIN": "6", - "YXRRSET": "7", - "NXRRSET": "8", - "NOTAUTH": "9", - "NOTZONE": "10", - "DSOTYPENI": "11", - "BADSIG": "16", - "BADKEY": "17", - "BADTIME": "18", - "BADMODE": "19", - "BADNAME": "20", - "BADALG": "21", - "BADTRUNC": "22", - "BADCOOKIE": "23", + "NOERROR": "0", "FORMERR": "1", "SERVFAIL": "2", "NXDOMAIN": "3", "NOIMP": "4", "REFUSED": "5", "YXDOMAIN": "6", + "YXRRSET": "7", "NXRRSET": "8", "NOTAUTH": "9", "NOTZONE": "10", "DSOTYPENI": "11", + "BADSIG": "16", "BADKEY": "17", "BADTIME": "18", "BADMODE": "19", "BADNAME": "20", "BADALG": "21", "BADTRUNC": "22", "BADCOOKIE": "23", } ) -type NormalizeProcessor struct { - config *pkgconfig.ConfigTransformers - logger *logger.Logger - name string - instance int - activeProcessors []func(dm *dnsutils.DNSMessage) int - outChannels []chan dnsutils.DNSMessage - logInfo func(msg string, v ...interface{}) - logError func(msg string, v ...interface{}) -} - -func NewNormalizeSubprocessor( - config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, - instance int, outChannels []chan dnsutils.DNSMessage, - logInfo func(msg string, v ...interface{}), logError func(msg string, v ...interface{}), -) NormalizeProcessor { - s := NormalizeProcessor{ - config: config, - logger: logger, - name: name, - instance: instance, - outChannels: outChannels, - logInfo: logInfo, - logError: logError, - } - - return s -} - -func (p *NormalizeProcessor) ReloadConfig(config *pkgconfig.ConfigTransformers) { - p.config = config +func processRecords(records []dnsutils.DNSAnswer) { + for i := range records { + records[i].Name = strings.ToLower(records[i].Name) + switch records[i].Rdatatype { + case "CNAME", "SOA", "NS", "MX", "PTR", "SRV": + records[i].Rdata = strings.ToLower(records[i].Rdata) + } + } } -func (p *NormalizeProcessor) LogInfo(msg string, v ...interface{}) { - log := fmt.Sprintf("normalize#%d - ", p.instance) - p.logInfo(log+msg, v...) +type NormalizeTransform struct { + GenericTransformer } -func (p *NormalizeProcessor) LogError(msg string, v ...interface{}) { - p.logError("normalize - "+msg, v...) +func NewNormalizeTransform(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, instance int, nextWorkers []chan dnsutils.DNSMessage) *NormalizeTransform { + t := &NormalizeTransform{GenericTransformer: NewTransformer(config, logger, "normalize", name, instance, nextWorkers)} + return t } -func (p *NormalizeProcessor) LoadActiveProcessors() { - // clean the slice - p.activeProcessors = p.activeProcessors[:0] - - if p.config.Normalize.QnameLowerCase { - p.activeProcessors = append(p.activeProcessors, p.LowercaseQname) - p.LogInfo("lowercase subprocessor is enabled") +func (t *NormalizeTransform) GetTransforms() ([]Subtransform, error) { + subprocessors := []Subtransform{} + if t.config.Normalize.RRLowerCase { + subprocessors = append(subprocessors, Subtransform{name: "normalize:rr-lowercase", processFunc: t.RRLowercase}) } - - if p.config.Normalize.QuietText { - p.activeProcessors = append(p.activeProcessors, p.QuietText) - p.LogInfo("quiet text subprocessor is enabled") + if t.config.Normalize.QnameLowerCase { + subprocessors = append(subprocessors, Subtransform{name: "normalize:qname-lowercase", processFunc: t.QnameLowercase}) } - - if p.config.Normalize.AddTld { - p.activeProcessors = append(p.activeProcessors, p.GetEffectiveTld) - p.LogInfo("add tld subprocessor is enabled") - } - if p.config.Normalize.AddTldPlusOne { - p.activeProcessors = append(p.activeProcessors, p.GetEffectiveTldPlusOne) - p.LogInfo("add tld+1 subprocessor enabled") + if t.config.Normalize.QuietText { + subprocessors = append(subprocessors, Subtransform{name: "normalize:quiet", processFunc: t.QuietText}) } -} -func (p *NormalizeProcessor) IsEnabled() bool { - return p.config.Normalize.Enable -} - -func (p *NormalizeProcessor) InitDNSMessage(dm *dnsutils.DNSMessage) { - if dm.PublicSuffix == nil { - dm.PublicSuffix = &dnsutils.TransformPublicSuffix{ - QnamePublicSuffix: "-", - QnameEffectiveTLDPlusOne: "-", - } + if t.config.Normalize.AddTld { + subprocessors = append(subprocessors, Subtransform{name: "normalize:add-etld", processFunc: t.GetEffectiveTld}) } + if t.config.Normalize.AddTldPlusOne { + subprocessors = append(subprocessors, Subtransform{name: "normalize:add-etld+1", processFunc: t.GetEffectiveTldPlusOne}) + } + return subprocessors, nil } -func (p *NormalizeProcessor) LowercaseQname(dm *dnsutils.DNSMessage) int { +func (t *NormalizeTransform) QnameLowercase(dm *dnsutils.DNSMessage) (int, error) { dm.DNS.Qname = strings.ToLower(dm.DNS.Qname) + return ReturnKeep, nil +} - return ReturnSuccess +func (t *NormalizeTransform) RRLowercase(dm *dnsutils.DNSMessage) (int, error) { + processRecords(dm.DNS.DNSRRs.Answers) + processRecords(dm.DNS.DNSRRs.Nameservers) + processRecords(dm.DNS.DNSRRs.Records) + return ReturnKeep, nil } -func (p *NormalizeProcessor) QuietText(dm *dnsutils.DNSMessage) int { +func (t *NormalizeTransform) QuietText(dm *dnsutils.DNSMessage) (int, error) { if v, found := DnstapMessage[dm.DNSTap.Operation]; found { dm.DNSTap.Operation = v } @@ -161,10 +100,14 @@ func (p *NormalizeProcessor) QuietText(dm *dnsutils.DNSMessage) int { if v, found := Rcodes[dm.DNS.Rcode]; found { dm.DNS.Rcode = v } - return ReturnSuccess + return ReturnKeep, nil } -func (p *NormalizeProcessor) GetEffectiveTld(dm *dnsutils.DNSMessage) int { +func (t *NormalizeTransform) GetEffectiveTld(dm *dnsutils.DNSMessage) (int, error) { + if dm.PublicSuffix == nil { + dm.PublicSuffix = &dnsutils.TransformPublicSuffix{QnamePublicSuffix: "-", QnameEffectiveTLDPlusOne: "-"} + } + // PublicSuffix is case sensitive. // remove ending dot ? qname := strings.ToLower(dm.DNS.Qname) @@ -174,36 +117,26 @@ func (p *NormalizeProcessor) GetEffectiveTld(dm *dnsutils.DNSMessage) int { etld, icann := publicsuffixlist.PublicSuffix(qname) if icann { dm.PublicSuffix.QnamePublicSuffix = etld + dm.PublicSuffix.ManagedByICANN = true } else { - p.logError("suffix unmanaged by icann: %s", qname) + dm.PublicSuffix.ManagedByICANN = false } - return ReturnSuccess + return ReturnKeep, nil } -func (p *NormalizeProcessor) GetEffectiveTldPlusOne(dm *dnsutils.DNSMessage) int { +func (t *NormalizeTransform) GetEffectiveTldPlusOne(dm *dnsutils.DNSMessage) (int, error) { + if dm.PublicSuffix == nil { + dm.PublicSuffix = &dnsutils.TransformPublicSuffix{QnamePublicSuffix: "-", QnameEffectiveTLDPlusOne: "-"} + } + // PublicSuffix is case sensitive, remove ending dot ? qname := strings.ToLower(dm.DNS.Qname) qname = strings.TrimSuffix(qname, ".") - if etld, err := publicsuffixlist.EffectiveTLDPlusOne(qname); err == nil { + etld, err := publicsuffixlist.EffectiveTLDPlusOne(qname) + if err == nil { dm.PublicSuffix.QnameEffectiveTLDPlusOne = etld } - return ReturnSuccess -} - -func (p *NormalizeProcessor) ProcessDNSMessage(dm *dnsutils.DNSMessage) int { - if len(p.activeProcessors) == 0 { - return ReturnSuccess - } - - var rCode int - for _, fn := range p.activeProcessors { - rCode = fn(dm) - if rCode != ReturnSuccess { - return rCode - } - } - - return ReturnSuccess + return ReturnKeep, nil } diff --git a/transformers/normalize_test.go b/transformers/normalize_test.go index f8fa5195..4028af3b 100644 --- a/transformers/normalize_test.go +++ b/transformers/normalize_test.go @@ -1,8 +1,6 @@ package transformers import ( - "encoding/json" - "reflect" "strings" "testing" @@ -11,71 +9,72 @@ import ( "github.com/dmachard/go-logger" ) -func TestNormalize_Json(t *testing.T) { +func TestNormalize_LowercaseQname(t *testing.T) { // enable feature config := pkgconfig.GetFakeConfigTransformers() + config.Normalize.Enable = true + config.Normalize.QnameLowerCase = true - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} - // get fake - dm := dnsutils.GetFakeDNSMessage() - dm.Init() - - // init subproccesor - qnameNorm := NewNormalizeSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - qnameNorm.InitDNSMessage(&dm) - - // expected json - refJSON := ` - { - "publicsuffix": { - "tld":"-", - "etld+1":"-" - } - } - ` + // init the processor + normTransformer := NewNormalizeTransform(config, logger.New(false), "test", 0, outChans) - var dmMap map[string]interface{} - err := json.Unmarshal([]byte(dm.ToJSON()), &dmMap) - if err != nil { - t.Fatalf("could not unmarshal dm json: %s\n", err) - } + qname := "www.Google.Com" + dm := dnsutils.GetFakeDNSMessage() + dm.DNS.Qname = qname - var refMap map[string]interface{} - err = json.Unmarshal([]byte(refJSON), &refMap) + returnCode, err := normTransformer.QnameLowercase(&dm) if err != nil { - t.Fatalf("could not unmarshal ref json: %s\n", err) + t.Errorf("process transform err %s", err.Error()) } - if _, ok := dmMap["publicsuffix"]; !ok { - t.Fatalf("transformer key is missing") + if dm.DNS.Qname != NormAddress { + t.Errorf("Qname to lowercase failed, got %s", dm.DNS.Qname) } - - if !reflect.DeepEqual(dmMap["publicsuffix"], refMap["publicsuffix"]) { - t.Errorf("json format different from reference") + if returnCode != ReturnKeep { + t.Errorf("Return code is %v and not RETURN_KEEP (%v)", returnCode, ReturnKeep) } } -func TestNormalize_LowercaseQname(t *testing.T) { +func TestNormalize_RRLowercaseQname(t *testing.T) { // enable feature config := pkgconfig.GetFakeConfigTransformers() config.Normalize.Enable = true - config.Normalize.QnameLowerCase = true - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init the processor - qnameNorm := NewNormalizeSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + normTransformer := NewNormalizeTransform(config, logger.New(false), "test", 0, outChans) - qname := "www.Google.Com" + // create DNSMessage with answers + rrqname := "www.RRGoogle.Com" dm := dnsutils.GetFakeDNSMessage() - dm.DNS.Qname = qname + dm.DNS.Qname = "www.test.com" + dm.DNS.DNSRRs.Answers = append(dm.DNS.DNSRRs.Answers, dnsutils.DNSAnswer{Name: rrqname}) + dm.DNS.DNSRRs.Nameservers = append(dm.DNS.DNSRRs.Nameservers, dnsutils.DNSAnswer{Name: rrqname}) + dm.DNS.DNSRRs.Records = append(dm.DNS.DNSRRs.Records, dnsutils.DNSAnswer{Name: rrqname}) + + // process DNSMessage + returnCode, err := normTransformer.RRLowercase(&dm) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if returnCode != ReturnKeep { + t.Errorf("Return code is %v, want %v", returnCode, ReturnKeep) + } - ret := qnameNorm.LowercaseQname(&dm) - if dm.DNS.Qname != strings.ToLower(qname) { - t.Errorf("Qname to lowercase failed, got %d", ret) + // checks + if dm.DNS.DNSRRs.Answers[0].Name != strings.ToLower(rrqname) { + t.Errorf("RR Answers to lowercase failed, got %s", dm.DNS.DNSRRs.Answers[0].Name) + } + + if dm.DNS.DNSRRs.Nameservers[0].Name != strings.ToLower(rrqname) { + t.Errorf("RR Nameservers to lowercase failed, got %s", dm.DNS.DNSRRs.Nameservers[0].Name) + } + + if dm.DNS.DNSRRs.Records[0].Name != strings.ToLower(rrqname) { + t.Errorf("RR Records to lowercase failed, got %s", dm.DNS.DNSRRs.Records[0].Name) } } @@ -85,11 +84,10 @@ func TestNormalize_QuietText(t *testing.T) { config.Normalize.Enable = true config.Normalize.QuietText = true - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init the processor - norm := NewNormalizeSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + norm := NewNormalizeTransform(config, logger.New(false), "test", 0, outChans) dm := dnsutils.GetFakeDNSMessage() norm.QuietText(&dm) @@ -109,11 +107,10 @@ func TestNormalize_AddTLD(t *testing.T) { config.Normalize.Enable = true config.Normalize.AddTld = true - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init the processor - psl := NewNormalizeSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + psl := NewNormalizeTransform(config, logger.New(false), "test", 0, outChans) tt := []struct { name string @@ -143,7 +140,7 @@ func TestNormalize_AddTLD(t *testing.T) { dm := dnsutils.GetFakeDNSMessage() dm.DNS.Qname = tc.qname - psl.InitDNSMessage(&dm) + // psl.InitDNSMessage(&dm) psl.GetEffectiveTld(&dm) if dm.PublicSuffix.QnamePublicSuffix != tc.want { @@ -160,11 +157,10 @@ func TestNormalize_AddTldPlusOne(t *testing.T) { config.Normalize.Enable = true config.Normalize.AddTld = true - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init the processor - psl := NewNormalizeSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + psl := NewNormalizeTransform(config, logger.New(false), "test", 0, outChans) tt := []struct { name string @@ -189,7 +185,7 @@ func TestNormalize_AddTldPlusOne(t *testing.T) { dm := dnsutils.GetFakeDNSMessage() dm.DNS.Qname = tc.qname - psl.InitDNSMessage(&dm) + // psl.InitDNSMessage(&dm) psl.GetEffectiveTldPlusOne(&dm) if dm.PublicSuffix.QnameEffectiveTLDPlusOne != tc.want { @@ -199,3 +195,131 @@ func TestNormalize_AddTldPlusOne(t *testing.T) { }) } } + +func TestNormalize_SuffixUnmanaged(t *testing.T) { + // enable feature + config := pkgconfig.GetFakeConfigTransformers() + outChans := []chan dnsutils.DNSMessage{} + + // init the processor + psl := NewNormalizeTransform(config, logger.New(true), "test", 0, outChans) + + dm := dnsutils.GetFakeDNSMessage() + // https://publicsuffix.org/list/effective_tld_names.dat + // // ===BEGIN ICANN DOMAINS=== + // .... + // // ===END ICANN DOMAINS=== + // ===BEGIN PRIVATE DOMAINS=== + // .. + dm.DNS.Qname = "play.googleapis.com" + // // ===END PRIVATE DOMAINS=== + + // psl.InitDNSMessage(&dm) + psl.GetEffectiveTld(&dm) + if dm.PublicSuffix.ManagedByICANN { + t.Errorf("Qname %s should be private domains", dm.DNS.Qname) + } +} + +func TestNormalize_SuffixICANNManaged(t *testing.T) { + // enable feature + config := pkgconfig.GetFakeConfigTransformers() + outChans := []chan dnsutils.DNSMessage{} + + // init the processor + psl := NewNormalizeTransform(config, logger.New(true), "test", 0, outChans) + + dm := dnsutils.GetFakeDNSMessage() + // https://publicsuffix.org/list/effective_tld_names.dat + // // ===BEGIN ICANN DOMAINS=== + dm.DNS.Qname = "fr.wikipedia.org" + // // ===END ICANN DOMAINS=== + // ===BEGIN PRIVATE DOMAINS=== + // .. + // // ===END PRIVATE DOMAINS=== + + // psl.InitDNSMessage(&dm) + psl.GetEffectiveTld(&dm) + if !dm.PublicSuffix.ManagedByICANN { + t.Errorf("Qname %s should be ICANN managed", dm.DNS.Qname) + } +} + +// bench tests + +func BenchmarkNormalize_GetEffectiveTld(b *testing.B) { + config := pkgconfig.GetFakeConfigTransformers() + channels := []chan dnsutils.DNSMessage{} + + subprocessor := NewNormalizeTransform(config, logger.New(false), "test", 0, channels) + dm := dnsutils.GetFakeDNSMessage() + dm.DNS.Qname = "en.wikipedia.org" + + b.ResetTimer() + for i := 0; i < b.N; i++ { + // subprocessor.InitDNSMessage(&dm) + subprocessor.GetEffectiveTld(&dm) + } +} + +func BenchmarkNormalize_GetEffectiveTldPlusOne(b *testing.B) { + config := pkgconfig.GetFakeConfigTransformers() + channels := []chan dnsutils.DNSMessage{} + + subprocessor := NewNormalizeTransform(config, logger.New(false), "test", 0, channels) + dm := dnsutils.GetFakeDNSMessage() + dm.DNS.Qname = "en.wikipedia.org" + + b.ResetTimer() + for i := 0; i < b.N; i++ { + // subprocessor.InitDNSMessage(&dm) + subprocessor.GetEffectiveTld(&dm) + } +} + +func BenchmarkNormalize_QnameLowercase(b *testing.B) { + config := pkgconfig.GetFakeConfigTransformers() + channels := []chan dnsutils.DNSMessage{} + + subprocessor := NewNormalizeTransform(config, logger.New(false), "test", 0, channels) + dm := dnsutils.GetFakeDNSMessage() + dm.DNS.Qname = "EN.Wikipedia.Org" + + b.ResetTimer() + for i := 0; i < b.N; i++ { + subprocessor.QnameLowercase(&dm) + } +} + +func BenchmarkNormalize_RRLowercase(b *testing.B) { + config := pkgconfig.GetFakeConfigTransformers() + channels := []chan dnsutils.DNSMessage{} + + tranform := NewNormalizeTransform(config, logger.New(false), "test", 0, channels) + + name := "En.Tikipedia.Org" + dm := dnsutils.GetFakeDNSMessage() + dm.DNS.Qname = name + dm.DNS.DNSRRs.Answers = append(dm.DNS.DNSRRs.Answers, dnsutils.DNSAnswer{Name: name}) + dm.DNS.DNSRRs.Nameservers = append(dm.DNS.DNSRRs.Nameservers, dnsutils.DNSAnswer{Name: name}) + dm.DNS.DNSRRs.Records = append(dm.DNS.DNSRRs.Records, dnsutils.DNSAnswer{Name: name}) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + tranform.RRLowercase(&dm) + } +} + +func BenchmarkNormalize_QuietText(b *testing.B) { + config := pkgconfig.GetFakeConfigTransformers() + channels := []chan dnsutils.DNSMessage{} + + subprocessor := NewNormalizeTransform(config, logger.New(false), "test", 0, channels) + dm := dnsutils.GetFakeDNSMessage() + dm.DNS.Qname = "EN.Wikipedia.Org" + + b.ResetTimer() + for i := 0; i < b.N; i++ { + subprocessor.QuietText(&dm) + } +} diff --git a/transformers/reducer.go b/transformers/reducer.go index 801b6f46..c5f75e59 100644 --- a/transformers/reducer.go +++ b/transformers/reducer.go @@ -100,99 +100,55 @@ func (mp *MapTraffic) ProcessExpiredKeys() { } } -type ReducerProcessor struct { - config *pkgconfig.ConfigTransformers - logger *logger.Logger - name string - instance int - outChannels []chan dnsutils.DNSMessage - activeProcessors []func(dm *dnsutils.DNSMessage) int - mapTraffic MapTraffic - logInfo func(msg string, v ...interface{}) - logError func(msg string, v ...interface{}) - strBuilder strings.Builder +type ReducerTransform struct { + GenericTransformer + mapTraffic MapTraffic + strBuilder strings.Builder } -func NewReducerSubprocessor( - config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, - instance int, outChannels []chan dnsutils.DNSMessage, - logInfo func(msg string, v ...interface{}), logError func(msg string, v ...interface{}), -) *ReducerProcessor { - s := ReducerProcessor{ - config: config, - logger: logger, - name: name, - instance: instance, - outChannels: outChannels, - logInfo: logInfo, - logError: logError, - } - - s.mapTraffic = NewMapTraffic(time.Duration(config.Reducer.WatchInterval)*time.Second, outChannels, logInfo, logError) - return &s +func NewReducerTransform(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, instance int, nextWorkers []chan dnsutils.DNSMessage) *ReducerTransform { + t := &ReducerTransform{GenericTransformer: NewTransformer(config, logger, "reducer", name, instance, nextWorkers)} + t.mapTraffic = NewMapTraffic(time.Duration(config.Reducer.WatchInterval)*time.Second, nextWorkers, t.LogInfo, t.LogError) + return t } -func (p *ReducerProcessor) ReloadConfig(config *pkgconfig.ConfigTransformers) { - p.config = config - p.mapTraffic.SetTTL(time.Duration(config.Reducer.WatchInterval) * time.Second) - - p.LoadActiveReducers() +func (t *ReducerTransform) ReloadConfig(config *pkgconfig.ConfigTransformers) { + t.GenericTransformer.ReloadConfig(config) + t.mapTraffic.SetTTL(time.Duration(config.Reducer.WatchInterval) * time.Second) + t.GetTransforms() } -func (p *ReducerProcessor) LoadActiveReducers() { - // clean the slice - p.activeProcessors = p.activeProcessors[:0] - - if p.config.Reducer.RepetitiveTrafficDetector { - p.activeProcessors = append(p.activeProcessors, p.RepetitiveTrafficDetector) - go p.mapTraffic.Run() +func (t *ReducerTransform) GetTransforms() ([]Subtransform, error) { + subtransforms := []Subtransform{} + if t.config.Reducer.RepetitiveTrafficDetector { + subtransforms = append(subtransforms, Subtransform{name: "reducer", processFunc: t.repetitiveTrafficDetector}) + go t.mapTraffic.Run() } + return subtransforms, nil } -func (p *ReducerProcessor) InitDNSMessage(dm *dnsutils.DNSMessage) { +func (t *ReducerTransform) repetitiveTrafficDetector(dm *dnsutils.DNSMessage) (int, error) { if dm.Reducer == nil { - dm.Reducer = &dnsutils.TransformReducer{ - Occurrences: 0, - CumulativeLength: 0, - } + dm.Reducer = &dnsutils.TransformReducer{} } -} -func (p *ReducerProcessor) RepetitiveTrafficDetector(dm *dnsutils.DNSMessage) int { - p.strBuilder.Reset() - p.strBuilder.WriteString(dm.DNSTap.Identity) - p.strBuilder.WriteString(dm.DNSTap.Operation) - p.strBuilder.WriteString(dm.NetworkInfo.QueryIP) - if p.config.Reducer.QnamePlusOne { + t.strBuilder.Reset() + t.strBuilder.WriteString(dm.DNSTap.Identity) + t.strBuilder.WriteString(dm.DNSTap.Operation) + t.strBuilder.WriteString(dm.NetworkInfo.QueryIP) + if t.config.Reducer.QnamePlusOne { qname := strings.ToLower(dm.DNS.Qname) qname = strings.TrimSuffix(qname, ".") if etld, err := publicsuffixlist.EffectiveTLDPlusOne(qname); err == nil { dm.DNS.Qname = etld } } - p.strBuilder.WriteString(dm.DNS.Qname) - p.strBuilder.WriteString(dm.DNS.Qtype) - dmTag := p.strBuilder.String() + t.strBuilder.WriteString(dm.DNS.Qname) + t.strBuilder.WriteString(dm.DNS.Qtype) + dmTag := t.strBuilder.String() - p.mapTraffic.Set(dmTag, dm) - - return ReturnDrop -} - -func (p *ReducerProcessor) ProcessDNSMessage(dm *dnsutils.DNSMessage) int { dmCopy := *dm + t.mapTraffic.Set(dmTag, &dmCopy) - if len(p.activeProcessors) == 0 { - return ReturnSuccess - } - - var rCode int - for _, fn := range p.activeProcessors { - rCode = fn(&dmCopy) - if rCode != ReturnSuccess { - return rCode - } - } - - return ReturnSuccess + return ReturnDrop, nil } diff --git a/transformers/reducer_test.go b/transformers/reducer_test.go index 4d2c9376..fccfe4c9 100644 --- a/transformers/reducer_test.go +++ b/transformers/reducer_test.go @@ -15,7 +15,6 @@ func TestReducer_Json(t *testing.T) { // enable feature config := pkgconfig.GetFakeConfigTransformers() - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // get fake @@ -24,14 +23,14 @@ func TestReducer_Json(t *testing.T) { // init subproccesor - reducer := NewReducerSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - reducer.InitDNSMessage(&dm) + reducer := NewReducerTransform(config, logger.New(false), "test", 0, outChans) + reducer.repetitiveTrafficDetector(&dm) // expected json refJSON := ` { "reducer": { - "occurrences": 0, + "occurrences": 1, "cumulative-length": 0 } } @@ -64,14 +63,16 @@ func TestReducer_RepetitiveTrafficDetector(t *testing.T) { config.Reducer.RepetitiveTrafficDetector = true config.Reducer.WatchInterval = 1 - log := logger.New(false) outChan := make(chan dnsutils.DNSMessage, 1) outChans := []chan dnsutils.DNSMessage{} outChans = append(outChans, outChan) // init subproccesor - reducer := NewReducerSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - reducer.LoadActiveReducers() + reducer := NewReducerTransform(config, logger.New(false), "test", 0, outChans) + subtransforms, _ := reducer.GetTransforms() + if len(subtransforms) != 1 { + t.Errorf("invalid number of subtransforms enabled") + } // malformed DNS message testcases := []struct { @@ -147,12 +148,16 @@ func TestReducer_RepetitiveTrafficDetector(t *testing.T) { }, } + time.Sleep(1 * time.Second) + for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { for _, dmIn := range tc.dnsMessagesIn { - reducer.InitDNSMessage(&dmIn) - ret := reducer.ProcessDNSMessage(&dmIn) + ret, err := reducer.repetitiveTrafficDetector(&dmIn) + if err != nil { + t.Errorf("transform error - %v", err) + } if ret != ReturnDrop { t.Errorf("DNS message should be dropped") } @@ -178,14 +183,16 @@ func TestReducer_QnamePlusOne(t *testing.T) { config.Reducer.QnamePlusOne = true config.Reducer.WatchInterval = 1 - log := logger.New(false) outChan := make(chan dnsutils.DNSMessage, 1) outChans := []chan dnsutils.DNSMessage{} outChans = append(outChans, outChan) // init subproccesor - reducer := NewReducerSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - reducer.LoadActiveReducers() + reducer := NewReducerTransform(config, logger.New(false), "test", 0, outChans) + subtransforms, _ := reducer.GetTransforms() + if len(subtransforms) != 1 { + t.Errorf("invalid number of subtransforms enabled") + } testcases := []struct { name string @@ -219,8 +226,10 @@ func TestReducer_QnamePlusOne(t *testing.T) { t.Run(tc.name, func(t *testing.T) { for _, dmIn := range tc.dnsMessagesIn { - reducer.InitDNSMessage(&dmIn) - ret := reducer.ProcessDNSMessage(&dmIn) + ret, err := reducer.repetitiveTrafficDetector(&dmIn) + if err != nil { + t.Errorf("transform error - %v", err) + } if ret != ReturnDrop { t.Errorf("DNS message should be dropped") } diff --git a/transformers/relabeling.go b/transformers/relabeling.go new file mode 100644 index 00000000..551af496 --- /dev/null +++ b/transformers/relabeling.go @@ -0,0 +1,68 @@ +package transformers + +import ( + "regexp" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" +) + +type RelabelTransform struct { + GenericTransformer + RelabelingRules []dnsutils.RelabelingRule +} + +func NewRelabelTransform(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, instance int, nextWorkers []chan dnsutils.DNSMessage) *RelabelTransform { + t := &RelabelTransform{GenericTransformer: NewTransformer(config, logger, "relabeling", name, instance, nextWorkers)} + return t +} + +func (t *RelabelTransform) GetTransforms() ([]Subtransform, error) { + subtransforms := []Subtransform{} + if len(t.config.Relabeling.Rename) > 0 || len(t.config.Relabeling.Remove) > 0 { + actions := t.Precompile() + subtransforms = append(subtransforms, Subtransform{name: "relabeling:" + actions, processFunc: t.AddRules}) + } + return subtransforms, nil +} + +// Pre-compile regular expressions +func (t *RelabelTransform) Precompile() string { + actionRename := false + actionRemove := false + for _, label := range t.config.Relabeling.Rename { + t.RelabelingRules = append(t.RelabelingRules, dnsutils.RelabelingRule{ + Regex: regexp.MustCompile(label.Regex), + Replacement: label.Replacement, + Action: "rename", + }) + actionRename = true + } + for _, label := range t.config.Relabeling.Remove { + t.RelabelingRules = append(t.RelabelingRules, dnsutils.RelabelingRule{ + Regex: regexp.MustCompile(label.Regex), + Replacement: label.Replacement, + Action: "drop", + }) + actionRemove = true + } + + if actionRename && actionRemove { + return "rename+remove" + } + if actionRename && !actionRemove { + return "rename" + } + if !actionRename && actionRemove { + return "remove" + } + return "error" +} + +func (t *RelabelTransform) AddRules(dm *dnsutils.DNSMessage) (int, error) { + if dm.Relabeling == nil { + dm.Relabeling = &dnsutils.TransformRelabeling{Rules: t.RelabelingRules} + } + return ReturnKeep, nil +} diff --git a/transformers/relabeling_test.go b/transformers/relabeling_test.go new file mode 100644 index 00000000..c8dbf8a8 --- /dev/null +++ b/transformers/relabeling_test.go @@ -0,0 +1,31 @@ +package transformers + +import ( + "testing" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" +) + +func TestRelabeling_CompileRegex(t *testing.T) { + // enable feature + config := pkgconfig.GetFakeConfigTransformers() + config.Relabeling.Enable = true + config.Relabeling.Rename = append(config.Relabeling.Rename, pkgconfig.RelabelingConfig{ + Regex: "^dns.qname$", + Replacement: "qname_test", + }) + config.Relabeling.Remove = append(config.Relabeling.Remove, pkgconfig.RelabelingConfig{ + Regex: "^dns.qtype$", + }) + + // init the processor + outChans := []chan dnsutils.DNSMessage{} + relabeling := NewRelabelTransform(config, logger.New(false), "test", 0, outChans) + relabeling.GetTransforms() + + if len(relabeling.RelabelingRules) != 2 { + t.Errorf("invalid number of rules") + } +} diff --git a/transformers/subprocessors.go b/transformers/subprocessors.go deleted file mode 100644 index 55b61826..00000000 --- a/transformers/subprocessors.go +++ /dev/null @@ -1,315 +0,0 @@ -package transformers - -import ( - "fmt" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-logger" -) - -const ( - enabled = "enabled" -) - -var ( - ReturnSuccess = 1 - ReturnDrop = 2 - ReturnError = 3 -) - -type Transforms struct { - config *pkgconfig.ConfigTransformers - logger *logger.Logger - name string - instance int - - SuspiciousTransform SuspiciousTransform - GeoipTransform GeoIPProcessor - FilteringTransform FilteringProcessor - UserPrivacyTransform UserPrivacyProcessor - NormalizeTransform NormalizeProcessor - LatencyTransform *LatencyProcessor - ReducerTransform *ReducerProcessor - ExtractProcessor ExtractProcessor - MachineLearningTransform MlProcessor - ATagsTransform ATagsProcessor - - activeTransforms []func(dm *dnsutils.DNSMessage) int -} - -func NewTransforms(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, outChannels []chan dnsutils.DNSMessage, instance int) Transforms { - - d := Transforms{ - config: config, - logger: logger, - name: name, - instance: instance, - } - - d.SuspiciousTransform = NewSuspiciousSubprocessor(config, logger, name, instance, outChannels, d.LogInfo, d.LogError) - d.NormalizeTransform = NewNormalizeSubprocessor(config, logger, name, instance, outChannels, d.LogInfo, d.LogError) - d.ExtractProcessor = NewExtractSubprocessor(config, logger, name, instance, outChannels, d.LogInfo, d.LogError) - d.LatencyTransform = NewLatencySubprocessor(config, logger, name, instance, outChannels, d.LogInfo, d.LogError) - d.ReducerTransform = NewReducerSubprocessor(config, logger, name, instance, outChannels, d.LogInfo, d.LogError) - d.UserPrivacyTransform = NewUserPrivacySubprocessor(config, logger, name, instance, outChannels, d.LogInfo, d.LogError) - d.FilteringTransform = NewFilteringProcessor(config, logger, name, instance, outChannels, d.LogInfo, d.LogError) - d.GeoipTransform = NewDNSGeoIPProcessor(config, logger, name, instance, outChannels, d.LogInfo, d.LogError) - d.MachineLearningTransform = NewMachineLearningSubprocessor(config, logger, name, instance, outChannels, d.LogInfo, d.LogError) - d.ATagsTransform = NewATagsSubprocessor(config, logger, name, instance, outChannels, d.LogInfo, d.LogError) - - d.Prepare() - return d -} - -func (p *Transforms) ReloadConfig(config *pkgconfig.ConfigTransformers) { - p.config = config - p.NormalizeTransform.ReloadConfig(config) - p.GeoipTransform.ReloadConfig(config) - p.FilteringTransform.ReloadConfig(config) - p.UserPrivacyTransform.ReloadConfig(config) - p.LatencyTransform.ReloadConfig(config) - p.SuspiciousTransform.ReloadConfig(config) - p.ReducerTransform.ReloadConfig(config) - p.ExtractProcessor.ReloadConfig(config) - p.MachineLearningTransform.ReloadConfig(config) - p.ATagsTransform.ReloadConfig(config) - - p.Prepare() -} - -func (p *Transforms) Prepare() error { - // clean the slice - p.activeTransforms = p.activeTransforms[:0] - - var prefixlog string - if p.instance > 0 { - prefixlog = fmt.Sprintf("conn #%d - ", p.instance) - } else { - prefixlog = "" - } - - if p.config.Normalize.Enable { - p.LogInfo(prefixlog + "qname lowercase subprocessor is " + enabled) - - p.NormalizeTransform.LoadActiveProcessors() - } - - if p.config.GeoIP.Enable { - p.activeTransforms = append(p.activeTransforms, p.geoipTransform) - p.LogInfo(prefixlog + "geoip subprocessor is " + enabled) - - if err := p.GeoipTransform.Open(); err != nil { - p.LogError(prefixlog+"geoip subprocessor open error %v", err) - } - } - - if p.config.UserPrivacy.Enable { - // Apply user privacy on qname and query ip - if p.config.UserPrivacy.AnonymizeIP { - p.activeTransforms = append(p.activeTransforms, p.anonymizeIP) - p.LogInfo(prefixlog + "ip anonymization subprocessor is enabled") - } - - if p.config.UserPrivacy.MinimazeQname { - p.activeTransforms = append(p.activeTransforms, p.minimazeQname) - p.LogInfo(prefixlog + "minimaze qname subprocessor is enabled") - } - - if p.config.UserPrivacy.HashIP { - p.activeTransforms = append(p.activeTransforms, p.hashIP) - p.LogInfo(prefixlog + "hash ip subprocessor is enabled") - } - } - - if p.config.Filtering.Enable { - p.LogInfo(prefixlog + "filtering subprocessor is " + enabled) - - p.FilteringTransform.LoadRcodes() - p.FilteringTransform.LoadDomainsList() - p.FilteringTransform.LoadQueryIPList() - p.FilteringTransform.LoadrDataIPList() - - p.FilteringTransform.LoadActiveFilters() - } - - if p.config.Latency.Enable { - if p.config.Latency.MeasureLatency { - p.activeTransforms = append(p.activeTransforms, p.measureLatency) - p.LogInfo(prefixlog + "measure latency subprocessor is enabled") - } - if p.config.Latency.UnansweredQueries { - p.activeTransforms = append(p.activeTransforms, p.detectEvictedTimeout) - p.LogInfo(prefixlog + "unanswered queries subprocessor is enabled") - } - } - - if p.config.Suspicious.Enable { - p.activeTransforms = append(p.activeTransforms, p.suspiciousTransform) - p.LogInfo(prefixlog + "suspicious subprocessor is " + enabled) - } - - if p.config.Reducer.Enable { - p.LogInfo(prefixlog + "reducer subprocessor is " + enabled) - - p.ReducerTransform.LoadActiveReducers() - } - - if p.config.Extract.Enable { - if p.config.Extract.AddPayload { - p.activeTransforms = append(p.activeTransforms, p.addBase64Payload) - p.LogInfo(prefixlog + "extract subprocessor is enabled") - } - } - - if p.config.MachineLearning.Enable { - p.activeTransforms = append(p.activeTransforms, p.machineLearningTransform) - p.LogInfo(prefixlog + "machinelearning subprocessor is" + enabled) - } - - if p.config.ATags.Enable { - p.activeTransforms = append(p.activeTransforms, p.ATagsTransform.AddTags) - p.LogInfo(prefixlog + "atags subprocessor is enabled") - } - - return nil -} - -func (p *Transforms) InitDNSMessageFormat(dm *dnsutils.DNSMessage) { - if p.config.Filtering.Enable { - p.FilteringTransform.InitDNSMessage(dm) - } - - if p.config.GeoIP.Enable { - p.GeoipTransform.InitDNSMessage(dm) - } - - if p.config.Suspicious.Enable { - p.SuspiciousTransform.InitDNSMessage(dm) - } - - if p.config.Normalize.Enable { - if p.config.Normalize.AddTld || p.config.Normalize.AddTldPlusOne { - p.NormalizeTransform.InitDNSMessage(dm) - } - } - - if p.config.Extract.Enable { - if p.config.Extract.AddPayload { - p.ExtractProcessor.InitDNSMessage(dm) - } - } - - if p.config.Reducer.Enable { - p.ReducerTransform.InitDNSMessage(dm) - } - - if p.config.MachineLearning.Enable { - p.MachineLearningTransform.InitDNSMessage(dm) - } - - if p.config.ATags.Enable { - p.ATagsTransform.InitDNSMessage(dm) - } -} - -func (p *Transforms) Reset() { - if p.config.GeoIP.Enable { - p.GeoipTransform.Close() - } -} - -func (p *Transforms) LogInfo(msg string, v ...interface{}) { - p.logger.Info(pkgutils.PrefixLogTransformer+"["+p.name+"] "+msg, v...) -} - -func (p *Transforms) LogError(msg string, v ...interface{}) { - p.logger.Error(pkgutils.PrefixLogTransformer+"["+p.name+"] "+msg, v...) -} - -// transform functions: return code -func (p *Transforms) machineLearningTransform(dm *dnsutils.DNSMessage) int { - p.MachineLearningTransform.AddFeatures(dm) - return ReturnSuccess -} - -func (p *Transforms) suspiciousTransform(dm *dnsutils.DNSMessage) int { - p.SuspiciousTransform.CheckIfSuspicious(dm) - return ReturnSuccess -} - -func (p *Transforms) geoipTransform(dm *dnsutils.DNSMessage) int { - geoInfo, err := p.GeoipTransform.Lookup(dm.NetworkInfo.QueryIP) - if err != nil { - p.LogError("geoip lookup error %v", err) - return ReturnError - } - - dm.Geo.Continent = geoInfo.Continent - dm.Geo.CountryIsoCode = geoInfo.CountryISOCode - dm.Geo.City = geoInfo.City - dm.Geo.AutonomousSystemNumber = geoInfo.ASN - dm.Geo.AutonomousSystemOrg = geoInfo.ASO - - return ReturnSuccess -} - -func (p *Transforms) anonymizeIP(dm *dnsutils.DNSMessage) int { - dm.NetworkInfo.QueryIP = p.UserPrivacyTransform.AnonymizeIP(dm.NetworkInfo.QueryIP) - - return ReturnSuccess -} - -func (p *Transforms) hashIP(dm *dnsutils.DNSMessage) int { - dm.NetworkInfo.QueryIP = p.UserPrivacyTransform.HashIP(dm.NetworkInfo.QueryIP) - dm.NetworkInfo.ResponseIP = p.UserPrivacyTransform.HashIP(dm.NetworkInfo.ResponseIP) - return ReturnSuccess -} - -func (p *Transforms) measureLatency(dm *dnsutils.DNSMessage) int { - p.LatencyTransform.MeasureLatency(dm) - return ReturnSuccess -} - -func (p *Transforms) detectEvictedTimeout(dm *dnsutils.DNSMessage) int { - p.LatencyTransform.DetectEvictedTimeout(dm) - return ReturnSuccess -} - -func (p *Transforms) minimazeQname(dm *dnsutils.DNSMessage) int { - dm.DNS.Qname = p.UserPrivacyTransform.MinimazeQname(dm.DNS.Qname) - - return ReturnSuccess -} - -func (p *Transforms) addBase64Payload(dm *dnsutils.DNSMessage) int { - dm.Extracted.Base64Payload = p.ExtractProcessor.AddBase64Payload(dm) - return ReturnSuccess -} - -func (p *Transforms) ProcessMessage(dm *dnsutils.DNSMessage) int { - // Begin to normalize - p.NormalizeTransform.ProcessDNSMessage(dm) - - // Traffic filtering ? - if p.FilteringTransform.CheckIfDrop(dm) { - return ReturnDrop - } - - // Traffic reducer ? - if p.ReducerTransform.ProcessDNSMessage(dm) == ReturnDrop { - return ReturnDrop - } - - // and finally apply other transformation - var rCode int - for _, fn := range p.activeTransforms { - rCode = fn(dm) - if rCode != ReturnSuccess { - return rCode - } - } - - return ReturnSuccess -} diff --git a/transformers/subprocessors_test.go b/transformers/subprocessors_test.go deleted file mode 100644 index f7a791ed..00000000 --- a/transformers/subprocessors_test.go +++ /dev/null @@ -1,313 +0,0 @@ -package transformers - -import ( - "testing" - - "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-logger" -) - -const ( - IPv6Address = "fe80::6111:626:c1b2:2353" - CapsAddress = "www.Google.Com" - NormAddress = "www.google.com" - IPv6ShortND = "fe80::" - Localhost = "localhost" -) - -func TestTransformsSuspicious(t *testing.T) { - // config - config := pkgconfig.GetFakeConfigTransformers() - config.Suspicious.Enable = true - - // init subproccesor - channels := []chan dnsutils.DNSMessage{} - subprocessors := NewTransforms(config, logger.New(false), "test", channels, 0) - - // malformed DNS message - dm := dnsutils.GetFakeDNSMessage() - dm.DNS.MalformedPacket = true - - // init dns message with additional part - subprocessors.InitDNSMessageFormat(&dm) - - returnCode := subprocessors.ProcessMessage(&dm) - - if dm.Suspicious.Score != 1.0 { - t.Errorf("suspicious score should be equal to 1.0") - } - - if dm.Suspicious.MalformedPacket != true { - t.Errorf("suspicious malformed packet flag should be equal to true") - } - - if returnCode != ReturnSuccess { - t.Errorf("Return code is %v and not RETURN_SUCCESS (%v)", returnCode, ReturnSuccess) - } -} - -func TestTransformsGeoIPLookupCountry(t *testing.T) { - // enable geoip - config := pkgconfig.GetFakeConfigTransformers() - config.GeoIP.Enable = true - config.GeoIP.DBCountryFile = "../testsdata/GeoLite2-Country.mmdb" - - // init processor - channels := []chan dnsutils.DNSMessage{} - subprocessors := NewTransforms(config, logger.New(false), "test", channels, 0) - - // create test message - dm := dnsutils.GetFakeDNSMessage() - dm.NetworkInfo.QueryIP = "83.112.146.176" - - // init dns message with additional part - subprocessors.InitDNSMessageFormat(&dm) - - // apply subprocessors - returnCode := subprocessors.ProcessMessage(&dm) - - if dm.Geo.CountryIsoCode != "FR" { - t.Errorf("country invalid want: FR got: %s", dm.Geo.CountryIsoCode) - } - - if returnCode != ReturnSuccess { - t.Errorf("Return code is %v and not RETURN_SUCCESS (%v)", returnCode, ReturnSuccess) - } -} - -func TestTransformsGeoIPLookupAsn(t *testing.T) { - // enable geoip - config := pkgconfig.GetFakeConfigTransformers() - config.GeoIP.Enable = true - config.GeoIP.DBASNFile = "../testsdata/GeoLite2-ASN.mmdb" - - // init the processor - channels := []chan dnsutils.DNSMessage{} - subprocessors := NewTransforms(config, logger.New(false), "test", channels, 0) - - // create test message - dm := dnsutils.GetFakeDNSMessage() - dm.NetworkInfo.QueryIP = "83.112.146.176" - - // init dns message with additional part - subprocessors.InitDNSMessageFormat(&dm) - - // apply subprocessors - returnCode := subprocessors.ProcessMessage(&dm) - - if dm.Geo.AutonomousSystemOrg != "Orange" { - t.Errorf("asn organisation invalid want: Orange got: %s", dm.Geo.AutonomousSystemOrg) - } - - if returnCode != ReturnSuccess { - t.Errorf("Return code is %v and not RETURN_SUCCESS (%v)", returnCode, ReturnSuccess) - } -} - -func TestTransformsReduceQname(t *testing.T) { - // enable feature - config := pkgconfig.GetFakeConfigTransformers() - config.UserPrivacy.Enable = true - config.UserPrivacy.MinimazeQname = true - - // init the processor - channels := []chan dnsutils.DNSMessage{} - subprocessors := NewTransforms(config, logger.New(false), "test", channels, 0) - - // create test message - dm := dnsutils.GetFakeDNSMessage() - - // init dns message with additional part - subprocessors.InitDNSMessageFormat(&dm) - - // test 1: google.com - dm.DNS.Qname = NormAddress - returnCode := subprocessors.ProcessMessage(&dm) - - if dm.DNS.Qname != "google.com" { - t.Errorf("Qname minimization failed, got %s", dm.DNS.Qname) - } - if returnCode != ReturnSuccess { - t.Errorf("Return code is %v and not RETURN_SUCCESS (%v)", returnCode, ReturnSuccess) - } - - // test 2: localhost - dm.DNS.Qname = Localhost - returnCode = subprocessors.ProcessMessage(&dm) - - if dm.DNS.Qname != Localhost { - t.Errorf("Qname minimization failed, got %s", dm.DNS.Qname) - } - if returnCode != ReturnSuccess { - t.Errorf("Return code is %v and not RETURN_SUCCESS (%v)", returnCode, ReturnSuccess) - } - - // test 3: local.home - dm.DNS.Qname = "localhost.domain.local.home" - returnCode = subprocessors.ProcessMessage(&dm) - - if dm.DNS.Qname != "local.home" { - t.Errorf("Qname minimization failed, got %s", dm.DNS.Qname) - } - if returnCode != ReturnSuccess { - t.Errorf("Return code is %v and not RETURN_SUCCESS (%v)", returnCode, ReturnSuccess) - } -} - -func TestTransformsAnonymizeIPv4(t *testing.T) { - // enable feature - config := pkgconfig.GetFakeConfigTransformers() - config.UserPrivacy.Enable = true - config.UserPrivacy.AnonymizeIP = true - - // init the processor - channels := []chan dnsutils.DNSMessage{} - subprocessors := NewTransforms(config, logger.New(false), "test", channels, 0) - - // create test message - dm := dnsutils.GetFakeDNSMessage() - - // init dns message with additional part - subprocessors.InitDNSMessageFormat(&dm) - - dm.NetworkInfo.QueryIP = "192.168.1.2" - - returnCode := subprocessors.ProcessMessage(&dm) - if dm.NetworkInfo.QueryIP != "192.168.0.0" { - t.Errorf("Ipv4 anonymization failed, got %v", dm.NetworkInfo.QueryIP) - } - if returnCode != ReturnSuccess { - t.Errorf("Return code is %v and not RETURN_SUCCESS (%v)", returnCode, ReturnSuccess) - } -} - -func TestTransformsAnonymizeIPv6(t *testing.T) { - // enable feature - config := pkgconfig.GetFakeConfigTransformers() - config.UserPrivacy.Enable = true - config.UserPrivacy.AnonymizeIP = true - - // init the processor - channels := []chan dnsutils.DNSMessage{} - subprocessors := NewTransforms(config, logger.New(false), "test", channels, 0) - - // create test message - dm := dnsutils.GetFakeDNSMessage() - - // init dns message with additional part - subprocessors.InitDNSMessageFormat(&dm) - - dm.NetworkInfo.QueryIP = IPv6Address - - returnCode := subprocessors.ProcessMessage(&dm) - if dm.NetworkInfo.QueryIP != IPv6ShortND { - t.Errorf("Ipv6 anonymization failed, got %s", dm.NetworkInfo.QueryIP) - } - if returnCode != ReturnSuccess { - t.Errorf("Return code is %v and not RETURN_SUCCESS (%v)", returnCode, ReturnSuccess) - } -} - -func TestTransformsNormalizeLowercaseQname(t *testing.T) { - // enable feature - config := pkgconfig.GetFakeConfigTransformers() - config.Normalize.Enable = true - config.Normalize.QnameLowerCase = true - - // init the processor - channels := []chan dnsutils.DNSMessage{} - subprocessors := NewTransforms(config, logger.New(false), "test", channels, 0) - - // create test message - dm := dnsutils.GetFakeDNSMessage() - // init dns message with additional part - subprocessors.InitDNSMessageFormat(&dm) - - dm.DNS.Qname = CapsAddress - dm.NetworkInfo.QueryIP = IPv6Address - - returnCode := subprocessors.ProcessMessage(&dm) - if dm.DNS.Qname != NormAddress { - t.Errorf("Qname to lowercase failed, got %s", dm.DNS.Qname) - } - if returnCode != ReturnSuccess { - t.Errorf("Return code is %v and not RETURN_SUCCESS (%v)", returnCode, ReturnSuccess) - } -} - -func TestMultiTransforms(t *testing.T) { - // enable feature - config := pkgconfig.GetFakeConfigTransformers() - config.Normalize.Enable = true - config.Normalize.QnameLowerCase = true - config.UserPrivacy.Enable = true - config.UserPrivacy.AnonymizeIP = true - - // init the processor - channels := []chan dnsutils.DNSMessage{} - subprocessors := NewTransforms(config, logger.New(false), "test", channels, 0) - - // create test message - dm := dnsutils.GetFakeDNSMessage() - // init dns message with additional part - subprocessors.InitDNSMessageFormat(&dm) - - dm.DNS.Qname = CapsAddress - dm.NetworkInfo.QueryIP = IPv6Address - - returnCode := subprocessors.ProcessMessage(&dm) - if dm.DNS.Qname != NormAddress { - t.Errorf("Qname to lowercase failed, got %s", dm.DNS.Qname) - } - if dm.NetworkInfo.QueryIP != IPv6ShortND { - t.Errorf("Ipv6 anonymization failed, got %s", dm.NetworkInfo.QueryIP) - } - if returnCode != ReturnSuccess { - t.Errorf("Return code is %v and not RETURN_SUCCESS (%v)", returnCode, ReturnSuccess) - } -} - -func TestTransformAndFilter(t *testing.T) { - // enable feature - config := pkgconfig.GetFakeConfigTransformers() - config.UserPrivacy.Enable = true - config.UserPrivacy.AnonymizeIP = true - - // file contains google.fr, test.github.com - config.Filtering.Enable = true - config.Filtering.KeepDomainFile = "../testsdata/filtering_keep_domains.txt" - - testURL1 := "mail.google.com" - testURL2 := "test.github.com" - - // init the processor - channels := []chan dnsutils.DNSMessage{} - subprocessors := NewTransforms(config, logger.New(false), "test", channels, 0) - - // create test message - dm := dnsutils.GetFakeDNSMessage() - - // should be dropped and not transformed - dm.DNS.Qname = testURL1 - dm.NetworkInfo.QueryIP = IPv6Address - - returnCode := subprocessors.ProcessMessage(&dm) - if returnCode != ReturnDrop { - t.Errorf("Return code is %v and not RETURN_DROP (%v)", returnCode, ReturnDrop) - } - if dm.NetworkInfo.QueryIP == IPv6ShortND { - t.Errorf("Ipv6 anonymization occurred (it should have dropped before filter)") - } - - // should not be dropped, and should be transformed - dm.DNS.Qname = testURL2 - dm.NetworkInfo.QueryIP = IPv6Address - returnCode = subprocessors.ProcessMessage(&dm) - if returnCode != ReturnSuccess { - t.Errorf("Return code is %v and not RETURN_SUCCESS (%v)", returnCode, ReturnSuccess) - } - if dm.NetworkInfo.QueryIP != IPv6ShortND { - t.Errorf("Ipv6 anonymization failed, got %s", dm.NetworkInfo.QueryIP) - } -} diff --git a/transformers/suspicious.go b/transformers/suspicious.go index 07e02547..78412757 100644 --- a/transformers/suspicious.go +++ b/transformers/suspicious.go @@ -1,7 +1,6 @@ package transformers import ( - "fmt" "regexp" "strings" @@ -11,102 +10,53 @@ import ( ) type SuspiciousTransform struct { - config *pkgconfig.ConfigTransformers - logger *logger.Logger - name string - CommonQtypes map[string]bool + GenericTransformer + commonQtypes map[string]bool whitelistDomainsRegex map[string]*regexp.Regexp - instance int - outChannels []chan dnsutils.DNSMessage - logInfo func(msg string, v ...interface{}) - logError func(msg string, v ...interface{}) } -func NewSuspiciousSubprocessor(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, - instance int, outChannels []chan dnsutils.DNSMessage, - logInfo func(msg string, v ...interface{}), logError func(msg string, v ...interface{}), -) SuspiciousTransform { - d := SuspiciousTransform{ - config: config, - logger: logger, - name: name, - CommonQtypes: make(map[string]bool), - whitelistDomainsRegex: make(map[string]*regexp.Regexp), - instance: instance, - outChannels: outChannels, - logInfo: logInfo, - logError: logError, - } - - d.ReadConfig() - - return d +func NewSuspiciousTransform(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, instance int, nextWorkers []chan dnsutils.DNSMessage) *SuspiciousTransform { + t := &SuspiciousTransform{GenericTransformer: NewTransformer(config, logger, "suspicious", name, instance, nextWorkers)} + t.commonQtypes = make(map[string]bool) + t.whitelistDomainsRegex = make(map[string]*regexp.Regexp) + return t } -func (p *SuspiciousTransform) ReadConfig() { +func (t *SuspiciousTransform) GetTransforms() ([]Subtransform, error) { + subtransforms := []Subtransform{} + // cleanup maps - for key := range p.CommonQtypes { - delete(p.CommonQtypes, key) + for key := range t.commonQtypes { + delete(t.commonQtypes, key) } - for key := range p.whitelistDomainsRegex { - delete(p.whitelistDomainsRegex, key) + for key := range t.whitelistDomainsRegex { + delete(t.whitelistDomainsRegex, key) } // load maps - for _, v := range p.config.Suspicious.CommonQtypes { - p.CommonQtypes[v] = true + for _, v := range t.config.Suspicious.CommonQtypes { + t.commonQtypes[v] = true } - for _, v := range p.config.Suspicious.WhitelistDomains { - p.whitelistDomainsRegex[v] = regexp.MustCompile(v) + for _, v := range t.config.Suspicious.WhitelistDomains { + t.whitelistDomainsRegex[v] = regexp.MustCompile(v) } -} -func (p *SuspiciousTransform) ReloadConfig(config *pkgconfig.ConfigTransformers) { - p.config = config - - p.ReadConfig() -} - -func (p *SuspiciousTransform) IsEnabled() bool { - return p.config.Suspicious.Enable -} - -func (p *SuspiciousTransform) LogInfo(msg string, v ...interface{}) { - log := fmt.Sprintf("suspicious#%d - ", p.instance) - p.logInfo(log+msg, v...) -} - -func (p *SuspiciousTransform) LogError(msg string, v ...interface{}) { - log := fmt.Sprintf("suspicious#%d - ", p.instance) - p.logError(log+msg, v...) -} - -func (p *SuspiciousTransform) InitDNSMessage(dm *dnsutils.DNSMessage) { - if dm.Suspicious == nil { - dm.Suspicious = &dnsutils.TransformSuspicious{ - Score: 0.0, - MalformedPacket: false, - LargePacket: false, - LongDomain: false, - SlowDomain: false, - UnallowedChars: false, - UncommonQtypes: false, - ExcessiveNumberLabels: false, - } + if t.config.Suspicious.Enable { + subtransforms = append(subtransforms, Subtransform{name: "suspicious:check", processFunc: t.checkIfSuspicious}) } + return subtransforms, nil } -func (p *SuspiciousTransform) CheckIfSuspicious(dm *dnsutils.DNSMessage) { +func (t *SuspiciousTransform) checkIfSuspicious(dm *dnsutils.DNSMessage) (int, error) { if dm.Suspicious == nil { - p.LogError("transformer is not properly initialized") - return + dm.Suspicious = &dnsutils.TransformSuspicious{} } // ignore some domains ? - for _, d := range p.whitelistDomainsRegex { + for _, d := range t.whitelistDomainsRegex { if d.MatchString(dm.DNS.Qname) { - return + return ReturnKeep, nil } } @@ -117,41 +67,43 @@ func (p *SuspiciousTransform) CheckIfSuspicious(dm *dnsutils.DNSMessage) { } // long domain name ? - if len(dm.DNS.Qname) > p.config.Suspicious.ThresholdQnameLen { + if len(dm.DNS.Qname) > t.config.Suspicious.ThresholdQnameLen { dm.Suspicious.Score += 1.0 dm.Suspicious.LongDomain = true } // large packet size ? - if dm.DNS.Length > p.config.Suspicious.ThresholdPacketLen { + if dm.DNS.Length > t.config.Suspicious.ThresholdPacketLen { dm.Suspicious.Score += 1.0 dm.Suspicious.LargePacket = true } // slow domain name resolution ? - if dm.DNSTap.Latency > p.config.Suspicious.ThresholdSlow { + if dm.DNSTap.Latency > t.config.Suspicious.ThresholdSlow { dm.Suspicious.Score += 1.0 dm.Suspicious.SlowDomain = true } // uncommon qtype? - if _, found := p.CommonQtypes[dm.DNS.Qtype]; !found { + if _, found := t.commonQtypes[dm.DNS.Qtype]; !found { dm.Suspicious.Score += 1.0 dm.Suspicious.UncommonQtypes = true } // count the number of labels in qname - if strings.Count(dm.DNS.Qname, ".") > p.config.Suspicious.ThresholdMaxLabels { + if strings.Count(dm.DNS.Qname, ".") > t.config.Suspicious.ThresholdMaxLabels { dm.Suspicious.Score += 1.0 dm.Suspicious.ExcessiveNumberLabels = true } // search for unallowed characters - for _, v := range p.config.Suspicious.UnallowedChars { + for _, v := range t.config.Suspicious.UnallowedChars { if strings.Contains(dm.DNS.Qname, v) { dm.Suspicious.Score += 1.0 dm.Suspicious.UnallowedChars = true break } } + + return ReturnKeep, nil } diff --git a/transformers/suspicious_test.go b/transformers/suspicious_test.go index 2614d0c4..a481412a 100644 --- a/transformers/suspicious_test.go +++ b/transformers/suspicious_test.go @@ -14,16 +14,23 @@ func TestSuspicious_Json(t *testing.T) { // enable feature config := pkgconfig.GetFakeConfigTransformers() - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // get fake dm := dnsutils.GetFakeDNSMessage() - dm.Init() // init subproccesor - suspicious := NewSuspiciousSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) - suspicious.InitDNSMessage(&dm) + suspicious := NewSuspiciousTransform(config, logger.New(false), "test", 0, outChans) + + // init transforms and check + suspicious.GetTransforms() + returnCode, err := suspicious.checkIfSuspicious(&dm) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if returnCode != ReturnKeep { + t.Errorf("Return code is %v, want keep(%v)", returnCode, ReturnKeep) + } // expected json refJSON := ` @@ -42,7 +49,7 @@ func TestSuspicious_Json(t *testing.T) { ` var dmMap map[string]interface{} - err := json.Unmarshal([]byte(dm.ToJSON()), &dmMap) + err = json.Unmarshal([]byte(dm.ToJSON()), &dmMap) if err != nil { t.Fatalf("could not unmarshal dm json: %s\n", err) } @@ -67,23 +74,28 @@ func TestSuspicious_MalformedPacket(t *testing.T) { config := pkgconfig.GetFakeConfigTransformers() config.Suspicious.Enable = true - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - suspicious := NewSuspiciousSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + suspicious := NewSuspiciousTransform(config, logger.New(false), "test", 0, outChans) // malformed DNS message dm := dnsutils.GetFakeDNSMessage() dm.DNS.MalformedPacket = true - // init dns message with additional part - suspicious.InitDNSMessage(&dm) + // init transforms and check + suspicious.GetTransforms() - suspicious.CheckIfSuspicious(&dm) + returnCode, err := suspicious.checkIfSuspicious(&dm) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if returnCode != ReturnKeep { + t.Errorf("Return code is %v, want keep(%v)", returnCode, ReturnKeep) + } if dm.Suspicious.Score != 1.0 { - t.Errorf("suspicious score should be equal to 1.0") + t.Errorf("suspicious score should be equal to 0.0, got: %d", int(dm.Suspicious.Score)) } if dm.Suspicious.MalformedPacket != true { @@ -97,23 +109,27 @@ func TestSuspicious_LongDomain(t *testing.T) { config.Suspicious.Enable = true config.Suspicious.ThresholdQnameLen = 4 - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - suspicious := NewSuspiciousSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + suspicious := NewSuspiciousTransform(config, logger.New(false), "test", 0, outChans) // malformed DNS message dm := dnsutils.GetFakeDNSMessage() dm.DNS.Qname = "longdomain.com" - // init dns message with additional part - suspicious.InitDNSMessage(&dm) - - suspicious.CheckIfSuspicious(&dm) + // init transforms and check + suspicious.GetTransforms() + returnCode, err := suspicious.checkIfSuspicious(&dm) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if returnCode != ReturnKeep { + t.Errorf("Return code is %v, want keep(%v)", returnCode, ReturnKeep) + } if dm.Suspicious.Score != 1.0 { - t.Errorf("suspicious score should be equal to 1.0") + t.Errorf("suspicious score should be equal to 0.0, got: %d", int(dm.Suspicious.Score)) } if dm.Suspicious.LongDomain != true { @@ -127,23 +143,27 @@ func TestSuspicious_SlowDomain(t *testing.T) { config.Suspicious.Enable = true config.Suspicious.ThresholdSlow = 3.0 - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - suspicious := NewSuspiciousSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + suspicious := NewSuspiciousTransform(config, logger.New(false), "test", 0, outChans) // malformed DNS message dm := dnsutils.GetFakeDNSMessage() dm.DNSTap.Latency = 4.0 - // init dns message with additional part - suspicious.InitDNSMessage(&dm) - - suspicious.CheckIfSuspicious(&dm) + // init transforms and check + suspicious.GetTransforms() + returnCode, err := suspicious.checkIfSuspicious(&dm) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if returnCode != ReturnKeep { + t.Errorf("Return code is %v, want keep(%v)", returnCode, ReturnKeep) + } if dm.Suspicious.Score != 1.0 { - t.Errorf("suspicious score should be equal to 1.0") + t.Errorf("suspicious score should be equal to 0.0, got: %d", int(dm.Suspicious.Score)) } if dm.Suspicious.SlowDomain != true { @@ -157,23 +177,27 @@ func TestSuspicious_LargePacket(t *testing.T) { config.Suspicious.Enable = true config.Suspicious.ThresholdPacketLen = 4 - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - suspicious := NewSuspiciousSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + suspicious := NewSuspiciousTransform(config, logger.New(false), "test", 0, outChans) // malformed DNS message dm := dnsutils.GetFakeDNSMessage() dm.DNS.Length = 50 - // init dns message with additional part - suspicious.InitDNSMessage(&dm) - - suspicious.CheckIfSuspicious(&dm) + // init transforms and check + suspicious.GetTransforms() + returnCode, err := suspicious.checkIfSuspicious(&dm) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if returnCode != ReturnKeep { + t.Errorf("Return code is %v, want keep(%v)", returnCode, ReturnKeep) + } if dm.Suspicious.Score != 1.0 { - t.Errorf("suspicious score should be equal to 1.0") + t.Errorf("suspicious score should be equal to 0.0, got: %d", int(dm.Suspicious.Score)) } if dm.Suspicious.LargePacket != true { @@ -186,23 +210,27 @@ func TestSuspicious_UncommonQtype(t *testing.T) { config := pkgconfig.GetFakeConfigTransformers() config.Suspicious.Enable = true - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - suspicious := NewSuspiciousSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + suspicious := NewSuspiciousTransform(config, logger.New(false), "test", 0, outChans) // malformed DNS message dm := dnsutils.GetFakeDNSMessage() dm.DNS.Qtype = "LOC" - // init dns message with additional part - suspicious.InitDNSMessage(&dm) - - suspicious.CheckIfSuspicious(&dm) + // init transforms and check + suspicious.GetTransforms() + returnCode, err := suspicious.checkIfSuspicious(&dm) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if returnCode != ReturnKeep { + t.Errorf("Return code is %v, want keep(%v)", returnCode, ReturnKeep) + } if dm.Suspicious.Score != 1.0 { - t.Errorf("suspicious score should be equal to 1.0") + t.Errorf("suspicious score should be equal to 0.0, got: %d", int(dm.Suspicious.Score)) } if dm.Suspicious.UncommonQtypes != true { @@ -216,23 +244,27 @@ func TestSuspicious_ExceedMaxLabels(t *testing.T) { config.Suspicious.Enable = true config.Suspicious.ThresholdMaxLabels = 2 - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - suspicious := NewSuspiciousSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + suspicious := NewSuspiciousTransform(config, logger.New(false), "test", 0, outChans) // malformed DNS message dm := dnsutils.GetFakeDNSMessage() dm.DNS.Qname = "test.sub.dnscollector.com" - // init dns message with additional part - suspicious.InitDNSMessage(&dm) - - suspicious.CheckIfSuspicious(&dm) + // init transforms and check + suspicious.GetTransforms() + returnCode, err := suspicious.checkIfSuspicious(&dm) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if returnCode != ReturnKeep { + t.Errorf("Return code is %v, want keep(%v)", returnCode, ReturnKeep) + } if dm.Suspicious.Score != 1.0 { - t.Errorf("suspicious score should be equal to 1.0") + t.Errorf("suspicious score should be equal to 0.0, got: %d", int(dm.Suspicious.Score)) } if dm.Suspicious.ExcessiveNumberLabels != true { @@ -245,23 +277,27 @@ func TestSuspicious_UnallowedChars(t *testing.T) { config := pkgconfig.GetFakeConfigTransformers() config.Suspicious.Enable = true - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - suspicious := NewSuspiciousSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + suspicious := NewSuspiciousTransform(config, logger.New(false), "test", 0, outChans) // malformed DNS message dm := dnsutils.GetFakeDNSMessage() dm.DNS.Qname = "AAAAAA==.dnscollector.com" - // init dns message with additional part - suspicious.InitDNSMessage(&dm) - - suspicious.CheckIfSuspicious(&dm) + // init transforms and check + suspicious.GetTransforms() + returnCode, err := suspicious.checkIfSuspicious(&dm) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if returnCode != ReturnKeep { + t.Errorf("Return code is %v, want keep(%v)", returnCode, ReturnKeep) + } if dm.Suspicious.Score != 1.0 { - t.Errorf("suspicious score should be equal to 1.0") + t.Errorf("suspicious score should be equal to 0.0, got: %d", int(dm.Suspicious.Score)) } if dm.Suspicious.UnallowedChars != true { @@ -274,20 +310,23 @@ func TestSuspicious_WhitelistDomains(t *testing.T) { config := pkgconfig.GetFakeConfigTransformers() config.Suspicious.Enable = true - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init subproccesor - suspicious := NewSuspiciousSubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + suspicious := NewSuspiciousTransform(config, logger.New(false), "test", 0, outChans) // IPv6 DNS message PTR dm := dnsutils.GetFakeDNSMessage() dm.DNS.Qname = "0.f.e.d.c.b.a.9.8.7.6.5.4.3.2.1.ip6.arpa" - // init dns message with additional part - suspicious.InitDNSMessage(&dm) - - suspicious.CheckIfSuspicious(&dm) + suspicious.GetTransforms() + returnCode, err := suspicious.checkIfSuspicious(&dm) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if returnCode != ReturnKeep { + t.Errorf("Return code is %v, want keep(%v)", returnCode, ReturnKeep) + } if dm.Suspicious.Score != 0.0 { t.Errorf("suspicious score should be equal to 0.0, got: %d", int(dm.Suspicious.Score)) diff --git a/transformers/transformers.go b/transformers/transformers.go new file mode 100644 index 00000000..ad311a5a --- /dev/null +++ b/transformers/transformers.go @@ -0,0 +1,148 @@ +package transformers + +import ( + "fmt" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" +) + +var ( + ReturnKeep = 1 + ReturnDrop = 2 +) + +type Subtransform struct { + name string + processFunc func(dm *dnsutils.DNSMessage) (int, error) +} + +type Transformation interface { + GetTransforms() ([]Subtransform, error) + ReloadConfig(config *pkgconfig.ConfigTransformers) + Reset() +} + +type GenericTransformer struct { + config *pkgconfig.ConfigTransformers + logger *logger.Logger + name string + nextWorkers []chan dnsutils.DNSMessage + LogInfo, LogError func(msg string, v ...interface{}) +} + +func NewTransformer(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, workerName string, instance int, nextWorkers []chan dnsutils.DNSMessage) GenericTransformer { + t := GenericTransformer{config: config, logger: logger, nextWorkers: nextWorkers, name: name} + + t.LogInfo = func(msg string, v ...interface{}) { + log := fmt.Sprintf("worker - [%s] (conn #%d) [transform=%s] - ", workerName, instance, name) + logger.Info(log+msg, v...) + } + + t.LogError = func(msg string, v ...interface{}) { + log := fmt.Sprintf("worker - [%s] (conn #%d) [transform=%s] - ", workerName, instance, name) + logger.Error(log+msg, v...) + } + return t +} + +func (t *GenericTransformer) ReloadConfig(config *pkgconfig.ConfigTransformers) { + t.config = config +} + +func (t *GenericTransformer) Reset() {} + +type TransformEntry struct { + Transformation +} + +type Transforms struct { + config *pkgconfig.ConfigTransformers + logger *logger.Logger + name string + instance int + + availableTransforms []TransformEntry + activeTransforms []func(dm *dnsutils.DNSMessage) (int, error) +} + +func NewTransforms(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, nextWorkers []chan dnsutils.DNSMessage, instance int) Transforms { + + d := Transforms{config: config, logger: logger, name: name, instance: instance} + + // order definition important + d.availableTransforms = append(d.availableTransforms, TransformEntry{NewNormalizeTransform(config, logger, name, instance, nextWorkers)}) + d.availableTransforms = append(d.availableTransforms, TransformEntry{NewFilteringTransform(config, logger, name, instance, nextWorkers)}) + d.availableTransforms = append(d.availableTransforms, TransformEntry{NewReducerTransform(config, logger, name, instance, nextWorkers)}) + d.availableTransforms = append(d.availableTransforms, TransformEntry{NewATagsTransform(config, logger, name, instance, nextWorkers)}) + d.availableTransforms = append(d.availableTransforms, TransformEntry{NewRelabelTransform(config, logger, name, instance, nextWorkers)}) + d.availableTransforms = append(d.availableTransforms, TransformEntry{NewUserPrivacyTransform(config, logger, name, instance, nextWorkers)}) + d.availableTransforms = append(d.availableTransforms, TransformEntry{NewExtractTransform(config, logger, name, instance, nextWorkers)}) + d.availableTransforms = append(d.availableTransforms, TransformEntry{NewSuspiciousTransform(config, logger, name, instance, nextWorkers)}) + d.availableTransforms = append(d.availableTransforms, TransformEntry{NewMachineLearningTransform(config, logger, name, instance, nextWorkers)}) + d.availableTransforms = append(d.availableTransforms, TransformEntry{NewLatencyTransform(config, logger, name, instance, nextWorkers)}) + d.availableTransforms = append(d.availableTransforms, TransformEntry{NewDNSGeoIPTransform(config, logger, name, instance, nextWorkers)}) + + d.Prepare() + return d +} + +func (p *Transforms) ReloadConfig(config *pkgconfig.ConfigTransformers) { + p.config = config + + for _, transform := range p.availableTransforms { + transform.ReloadConfig(config) + } + + p.Prepare() +} + +func (p *Transforms) Prepare() error { + // clean the slice + p.activeTransforms = p.activeTransforms[:0] + tranformsList := []string{} + + for _, transform := range p.availableTransforms { + subtransforms, err := transform.GetTransforms() + if err != nil { + p.LogError("error on init subtransforms:", err) + continue + } + for _, subtransform := range subtransforms { + p.activeTransforms = append(p.activeTransforms, subtransform.processFunc) + tranformsList = append(tranformsList, subtransform.name) + } + } + + if len(tranformsList) > 0 { + p.LogInfo("transformers applied: %v", tranformsList) + } + return nil +} + +func (p *Transforms) Reset() { + for _, transform := range p.availableTransforms { + transform.Reset() + } +} + +func (p *Transforms) LogInfo(msg string, v ...interface{}) { + connlog := fmt.Sprintf("(conn #%d) ", p.instance) + p.logger.Info(pkgconfig.PrefixLogWorker+"["+p.name+"] "+connlog+msg, v...) +} + +func (p *Transforms) LogError(msg string, v ...interface{}) { + p.logger.Error(pkgconfig.PrefixLogWorker+"["+p.name+"] "+msg, v...) +} + +func (p *Transforms) ProcessMessage(dm *dnsutils.DNSMessage) (int, error) { + for _, transform := range p.activeTransforms { + if result, err := transform(dm); err != nil { + return ReturnKeep, fmt.Errorf("error on transform processing: %v", err.Error()) + } else if result == ReturnDrop { + return ReturnDrop, nil + } + } + return ReturnKeep, nil +} diff --git a/transformers/transformers_test.go b/transformers/transformers_test.go new file mode 100644 index 00000000..fa1085c7 --- /dev/null +++ b/transformers/transformers_test.go @@ -0,0 +1,92 @@ +package transformers + +import ( + "testing" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" +) + +const ( + IPv6Address = "fe80::6111:626:c1b2:2353" + CapsAddress = "www.Google.Com" + NormAddress = "www.google.com" + + Localhost = "localhost" +) + +// Bench to init DNS message +func BenchmarkTransforms_InitAndProcess(b *testing.B) { + config := pkgconfig.GetFakeConfigTransformers() + config.Suspicious.Enable = true + config.GeoIP.Enable = true + config.GeoIP.DBCountryFile = ".././tests/testsdata/GeoLite2-Country.mmdb" + config.GeoIP.DBASNFile = ".././tests/testsdata/GeoLite2-ASN.mmdb" + config.UserPrivacy.Enable = true + config.UserPrivacy.MinimazeQname = true + config.UserPrivacy.AnonymizeIP = true + config.Normalize.Enable = true + config.Normalize.QnameLowerCase = true + config.Filtering.Enable = true + config.Filtering.KeepDomainFile = ".././tests/testsdata/filtering_keep_domains.txt" + + channels := []chan dnsutils.DNSMessage{} + transformers := NewTransforms(config, logger.New(false), "test", channels, 0) + + dm := dnsutils.GetFakeDNSMessage() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + transformers.ProcessMessage(&dm) + } +} + +func TestTransforms_ProcessOrder(t *testing.T) { + // enable feature + config := pkgconfig.GetFakeConfigTransformers() + config.Normalize.Enable = true + config.Normalize.QnameLowerCase = true + config.UserPrivacy.Enable = true + config.UserPrivacy.AnonymizeIP = true + config.Filtering.Enable = true + config.Filtering.KeepDomainFile = "../tests/testsdata/filtering_keep_domains.txt" // file contains google.fr, test.github.com + + testURL1 := "mail.google.com" + testURL2 := "test.github.com" + + // init the transformer + subprocessors := NewTransforms(config, logger.New(false), "test", []chan dnsutils.DNSMessage{}, 0) + + // create test message + dm := dnsutils.GetFakeDNSMessage() + + // should be dropped and not transformed + dm.DNS.Qname = testURL1 + dm.NetworkInfo.QueryIP = IPv6Address + + returnCode, err := subprocessors.ProcessMessage(&dm) + if err != nil { + t.Errorf("process transform err %s", err.Error()) + } + + if returnCode != ReturnDrop { + t.Errorf("Return code is %v and not RETURN_KEEP (%v)", returnCode, ReturnKeep) + } + + // should not be dropped, and should be transformed + dm.DNS.Qname = testURL2 + dm.NetworkInfo.QueryIP = IPv6Address + + returnCode, err = subprocessors.ProcessMessage(&dm) + if err != nil { + t.Errorf("process transform err %s", err.Error()) + } + + if returnCode != ReturnKeep { + t.Errorf("Return code is %v and not RETURN_KEEP (%v)", returnCode, ReturnKeep) + } + if dm.NetworkInfo.QueryIP != IPv6ShortND { + t.Errorf("Ipv6 anonymization failed, got %s", dm.NetworkInfo.QueryIP) + } +} diff --git a/transformers/userprivacy.go b/transformers/userprivacy.go index d3eb3f61..e930963b 100644 --- a/transformers/userprivacy.go +++ b/transformers/userprivacy.go @@ -6,135 +6,108 @@ import ( "crypto/sha512" "fmt" "net" - "strconv" "strings" "github.com/dmachard/go-dnscollector/dnsutils" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" "golang.org/x/net/publicsuffix" ) -func parseCIDRMask(mask string) (net.IPMask, error) { - parts := strings.Split(mask, "/") - if len(parts) != 2 { - return nil, fmt.Errorf("invalid mask format, expected /integer: %s", mask) - } - - ones, err := strconv.Atoi(parts[1]) - if err != nil { - return nil, fmt.Errorf("invalid /%s cidr", mask) - } - - if strings.Contains(parts[0], ":") { - ipv6Mask := net.CIDRMask(ones, 128) - return ipv6Mask, nil +func HashIP(ip string, algo string) string { + switch algo { + case "sha1": + hash := sha1.New() + hash.Write([]byte(ip)) + return fmt.Sprintf("%x", hash.Sum(nil)) + case "sha256": + hash := sha256.New() + hash.Write([]byte(ip)) + return fmt.Sprintf("%x", hash.Sum(nil)) + case "sha512": // nolint + hash := sha512.New() + hash.Write([]byte(ip)) + return fmt.Sprintf("%x", hash.Sum(nil)) + default: + return ip } - - ipv4Mask := net.CIDRMask(ones, 32) - return ipv4Mask, nil } -type UserPrivacyProcessor struct { - config *pkgconfig.ConfigTransformers - v4Mask net.IPMask - v6Mask net.IPMask - instance int - outChannels []chan dnsutils.DNSMessage - logInfo func(msg string, v ...interface{}) - logError func(msg string, v ...interface{}) +type UserPrivacyTransform struct { + GenericTransformer + v4Mask, v6Mask net.IPMask } -func NewUserPrivacySubprocessor(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, - instance int, outChannels []chan dnsutils.DNSMessage, - logInfo func(msg string, v ...interface{}), logError func(msg string, v ...interface{}), -) UserPrivacyProcessor { - s := UserPrivacyProcessor{ - config: config, - instance: instance, - outChannels: outChannels, - logInfo: logInfo, - logError: logError, - } - s.ReadConfig() - return s +func NewUserPrivacyTransform(config *pkgconfig.ConfigTransformers, logger *logger.Logger, name string, instance int, nextWorkers []chan dnsutils.DNSMessage) *UserPrivacyTransform { + t := &UserPrivacyTransform{GenericTransformer: NewTransformer(config, logger, "userprivacy", name, instance, nextWorkers)} + return t } -func (s *UserPrivacyProcessor) ReadConfig() { +func (t *UserPrivacyTransform) GetTransforms() ([]Subtransform, error) { + subprocessors := []Subtransform{} var err error - s.v4Mask, err = parseCIDRMask(s.config.UserPrivacy.AnonymizeIPV4Bits) + t.v4Mask, err = netutils.ParseCIDRMask(t.config.UserPrivacy.AnonymizeIPV4Bits) if err != nil { - s.LogError("unable to init v4 mask: %v", err) + return nil, fmt.Errorf("unable to init v4 mask: %w", err) } - if !strings.Contains(s.config.UserPrivacy.AnonymizeIPV6Bits, ":") { - s.LogError("invalid v6 mask, expect format ::/integer") + if !strings.Contains(t.config.UserPrivacy.AnonymizeIPV6Bits, ":") { + return nil, fmt.Errorf("invalid v6 mask, expect format ::/integer") } - s.v6Mask, err = parseCIDRMask(s.config.UserPrivacy.AnonymizeIPV6Bits) + t.v6Mask, err = netutils.ParseCIDRMask(t.config.UserPrivacy.AnonymizeIPV6Bits) if err != nil { - s.LogError("unable to init v6 mask: %v", err) + return nil, fmt.Errorf("unable to init v6 mask: %w", err) } -} -func (s *UserPrivacyProcessor) ReloadConfig(config *pkgconfig.ConfigTransformers) { - s.config = config -} - -func (s *UserPrivacyProcessor) LogInfo(msg string, v ...interface{}) { - log := fmt.Sprintf("userprivacy#%d - ", s.instance) - s.logInfo(log+msg, v...) -} + if t.config.UserPrivacy.AnonymizeIP { + subprocessors = append(subprocessors, Subtransform{name: "userprivacy:ip-anonymization", processFunc: t.anonymizeQueryIP}) + } -func (s *UserPrivacyProcessor) LogError(msg string, v ...interface{}) { - log := fmt.Sprintf("userprivacy#%d - ", s.instance) - s.logError(log+msg, v...) -} + if t.config.UserPrivacy.MinimazeQname { + subprocessors = append(subprocessors, Subtransform{name: "userprivacy:minimaze-qname", processFunc: t.minimazeQname}) + } -func (s *UserPrivacyProcessor) MinimazeQname(qname string) string { - if etpo, err := publicsuffix.EffectiveTLDPlusOne(qname); err == nil { - return etpo + if t.config.UserPrivacy.HashQueryIP { + subprocessors = append(subprocessors, Subtransform{name: "userprivacy:hash-query-ip", processFunc: t.hashQueryIP}) + } + if t.config.UserPrivacy.HashReplyIP { + subprocessors = append(subprocessors, Subtransform{name: "userprivacy:hash-reply-ip", processFunc: t.hashReplyIP}) } - return qname + return subprocessors, nil } -func (s *UserPrivacyProcessor) AnonymizeIP(ip string) string { - // if mask is nil, something is wrong - if s.v4Mask == nil { - return ip +func (t *UserPrivacyTransform) anonymizeQueryIP(dm *dnsutils.DNSMessage) (int, error) { + queryIP := net.ParseIP(dm.NetworkInfo.QueryIP) + if queryIP == nil { + return ReturnKeep, fmt.Errorf("not a valid query ip: %v", dm.NetworkInfo.QueryIP) } - if s.v6Mask == nil { - return ip + + switch { + case queryIP.To4() != nil: + dm.NetworkInfo.QueryIP = queryIP.Mask(t.v4Mask).String() + default: + dm.NetworkInfo.QueryIP = queryIP.Mask(t.v6Mask).String() } - ipaddr := net.ParseIP(ip) - isipv4 := strings.LastIndex(ip, ".") + return ReturnKeep, nil +} - // ipv4, /16 mask - if isipv4 != -1 { - return ipaddr.Mask(s.v4Mask).String() - } +func (t *UserPrivacyTransform) hashQueryIP(dm *dnsutils.DNSMessage) (int, error) { + dm.NetworkInfo.QueryIP = HashIP(dm.NetworkInfo.QueryIP, t.config.UserPrivacy.HashIPAlgo) + return ReturnKeep, nil +} - // ipv6, /64 mask - return ipaddr.Mask(s.v6Mask).String() +func (t *UserPrivacyTransform) hashReplyIP(dm *dnsutils.DNSMessage) (int, error) { + dm.NetworkInfo.ResponseIP = HashIP(dm.NetworkInfo.ResponseIP, t.config.UserPrivacy.HashIPAlgo) + return ReturnKeep, nil } -func (s *UserPrivacyProcessor) HashIP(ip string) string { - switch s.config.UserPrivacy.HashIPAlgo { - case "sha1": - hash := sha1.New() - hash.Write([]byte(ip)) - return fmt.Sprintf("%x", hash.Sum(nil)) - case "sha256": - hash := sha256.New() - hash.Write([]byte(ip)) - return fmt.Sprintf("%x", hash.Sum(nil)) - case "sha512": - hash := sha512.New() - hash.Write([]byte(ip)) - return fmt.Sprintf("%x", hash.Sum(nil)) - default: - return ip +func (t *UserPrivacyTransform) minimazeQname(dm *dnsutils.DNSMessage) (int, error) { + if etpo, err := publicsuffix.EffectiveTLDPlusOne(dm.DNS.Qname); err == nil { + dm.DNS.Qname = etpo } + return ReturnKeep, nil } diff --git a/transformers/userprivacy_test.go b/transformers/userprivacy_test.go index 5970d204..675c40c0 100644 --- a/transformers/userprivacy_test.go +++ b/transformers/userprivacy_test.go @@ -9,148 +9,262 @@ import ( ) var ( - TestIP4 = "192.168.1.2" - TestIP6 = "fe80::6111:626:c1b2:2353" + TestIP4 = "192.168.1.2" + TestIP6 = "fe80::6111:626:c1b2:2353" + IPv6ShortND = "fe80::" ) -func TestUserPrivacy_ReduceQname(t *testing.T) { - // enable feature +// bench +func BenchmarkUserPrivacy_ReduceQname(b *testing.B) { config := pkgconfig.GetFakeConfigTransformers() config.UserPrivacy.Enable = true config.UserPrivacy.MinimazeQname = true - log := logger.New(false) - outChans := []chan dnsutils.DNSMessage{} - - // init the processor - userPrivacy := NewUserPrivacySubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + channels := []chan dnsutils.DNSMessage{} - qname := "www.google.com" - ret := userPrivacy.MinimazeQname(qname) - if ret != "google.com" { - t.Errorf("Qname minimization failed, got %s", ret) - } + userprivacy := NewUserPrivacyTransform(config, logger.New(false), "test", 0, channels) + userprivacy.GetTransforms() - qname = "localhost" - ret = userPrivacy.MinimazeQname(qname) - if ret != "localhost" { - t.Errorf("Qname minimization failed, got %s", ret) - } + dm := dnsutils.GetFakeDNSMessage() + dm.DNS.Qname = "localhost.domain.local.home" - qname = "localhost.domain.local.home" - ret = userPrivacy.MinimazeQname(qname) - if ret != "local.home" { - t.Errorf("Qname minimization failed, got %s", ret) + b.ResetTimer() + for i := 0; i < b.N; i++ { + userprivacy.minimazeQname(&dm) } } -func TestUserPrivacy_HashIPDefault(t *testing.T) { - // enable feature +func BenchmarkUserPrivacy_HashIP(b *testing.B) { config := pkgconfig.GetFakeConfigTransformers() config.UserPrivacy.Enable = true - config.UserPrivacy.HashIP = true + config.UserPrivacy.HashQueryIP = true - log := logger.New(false) - outChans := []chan dnsutils.DNSMessage{} + channels := []chan dnsutils.DNSMessage{} - // init the processor - userPrivacy := NewUserPrivacySubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + userprivacy := NewUserPrivacyTransform(config, logger.New(false), "test", 0, channels) + userprivacy.GetTransforms() - ret := userPrivacy.HashIP(TestIP4) - if ret != "c0ca1efec6aaf505e943397662c28f89ac8f3bc2" { - t.Errorf("IP hashing failed, got %s", ret) + dm := dnsutils.GetFakeDNSMessage() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + userprivacy.hashQueryIP(&dm) } } -func TestUserPrivacy_HashIPSha512(t *testing.T) { - // enable feature +func BenchmarkUserPrivacy_HashIPSha512(b *testing.B) { config := pkgconfig.GetFakeConfigTransformers() config.UserPrivacy.Enable = true - config.UserPrivacy.HashIP = true + config.UserPrivacy.HashQueryIP = true config.UserPrivacy.HashIPAlgo = "sha512" - log := logger.New(false) - outChans := []chan dnsutils.DNSMessage{} + channels := []chan dnsutils.DNSMessage{} - // init the processor - userPrivacy := NewUserPrivacySubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + userprivacy := NewUserPrivacyTransform(config, logger.New(false), "test", 0, channels) + userprivacy.GetTransforms() + + dm := dnsutils.GetFakeDNSMessage() - ret := userPrivacy.HashIP(TestIP4) - if ret != "800e8f97a29404b7031dfb8d7185b2d30a3cd326b535cda3dcec20a0f4749b1099f98e49245d67eb188091adfba9a45dc0c15e612b554ae7181d8f8a479b67a0" { - t.Errorf("IP hashing failed, got %s", ret) + b.ResetTimer() + for i := 0; i < b.N; i++ { + userprivacy.hashQueryIP(&dm) } } -func TestUserPrivacy_AnonymizeIPv4DefaultMask(t *testing.T) { - // enable feature +func BenchmarkUserPrivacy_AnonymizeIP(b *testing.B) { config := pkgconfig.GetFakeConfigTransformers() config.UserPrivacy.Enable = true config.UserPrivacy.AnonymizeIP = true - log := logger.New(false) - outChans := []chan dnsutils.DNSMessage{} + channels := []chan dnsutils.DNSMessage{} - // init the processor - userPrivacy := NewUserPrivacySubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + userprivacy := NewUserPrivacyTransform(config, logger.New(false), "test", 0, channels) + userprivacy.GetTransforms() + + dm := dnsutils.GetFakeDNSMessage() - ret := userPrivacy.AnonymizeIP(TestIP4) - if ret != "192.168.0.0" { - t.Errorf("Ipv4 anonymization failed, got %s", ret) + b.ResetTimer() + for i := 0; i < b.N; i++ { + userprivacy.anonymizeQueryIP(&dm) } } -func TestUserPrivacy_AnonymizeIPv6DefaultMask(t *testing.T) { +// other tests +func TestUserPrivacy_ReduceQname(t *testing.T) { // enable feature config := pkgconfig.GetFakeConfigTransformers() config.UserPrivacy.Enable = true - config.UserPrivacy.AnonymizeIP = true + config.UserPrivacy.MinimazeQname = true - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} // init the processor - userPrivacy := NewUserPrivacySubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + userPrivacy := NewUserPrivacyTransform(config, logger.New(false), "test", 0, outChans) + userPrivacy.GetTransforms() + + // Define test cases + testCases := []struct { + input string + expected string + returnCode int + }{ + {"www.google.com", "google.com", ReturnKeep}, + {"localhost", "localhost", ReturnKeep}, + {"null", "null", ReturnKeep}, + {"invalid", "invalid", ReturnKeep}, + {"localhost.domain.local.home", "local.home", ReturnKeep}, + } - ret := userPrivacy.AnonymizeIP(TestIP6) - if ret != "fe80::" { - t.Errorf("Ipv6 anonymization failed, got %s", ret) + // Execute test cases + for _, tc := range testCases { + t.Run(tc.input, func(t *testing.T) { + dm := dnsutils.GetFakeDNSMessage() + dm.DNS.Qname = tc.input + + returnCode, err := userPrivacy.minimazeQname(&dm) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + if dm.DNS.Qname != tc.expected { + t.Errorf("Qname minimization failed, got %s, want %s", dm.DNS.Qname, tc.expected) + } + + if returnCode != tc.returnCode { + t.Errorf("Return code is %v, want %v", returnCode, tc.returnCode) + } + }) } } -func TestUserPrivacy_AnonymizeIPv4RemoveIP(t *testing.T) { - // enable feature +func TestUserPrivacy_HashIP(t *testing.T) { + // Define test cases + testCases := []struct { + name string + inputIP string + expectedIP string + hashAlgo string + }{ + {"Hash IP Default", TestIP4, "c0ca1efec6aaf505e943397662c28f89ac8f3bc2", ""}, + {"Hash IP Sha512", TestIP4, "800e8f97a29404b7031dfb8d7185b2d30a3cd326b535cda3dcec20a0f4749b1099f98e49245d67eb188091adfba9a45dc0c15e612b554ae7181d8f8a479b67a0", "sha512"}, + } + + // Execute test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Enable feature and set specific hash algorithm if provided + config := pkgconfig.GetFakeConfigTransformers() + config.UserPrivacy.Enable = true + config.UserPrivacy.HashQueryIP = true + if tc.hashAlgo != "" { + config.UserPrivacy.HashIPAlgo = tc.hashAlgo + } + + outChans := []chan dnsutils.DNSMessage{} + + // Init the processor + userPrivacy := NewUserPrivacyTransform(config, logger.New(false), "test", 0, outChans) + userPrivacy.GetTransforms() + + dm := dnsutils.GetFakeDNSMessage() + dm.NetworkInfo.QueryIP = tc.inputIP + + returnCode, err := userPrivacy.hashQueryIP(&dm) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + + if dm.NetworkInfo.QueryIP != tc.expectedIP { + t.Errorf("IP hashing failed, got %s, want %s", dm.NetworkInfo.QueryIP, tc.expectedIP) + } + + if returnCode != ReturnKeep { + t.Errorf("Return code is %v, want %v", returnCode, ReturnKeep) + } + }) + } +} + +func TestUserPrivacy_AnonymizeIP(t *testing.T) { + // Enable feature config := pkgconfig.GetFakeConfigTransformers() config.UserPrivacy.Enable = true config.UserPrivacy.AnonymizeIP = true - config.UserPrivacy.AnonymizeIPV4Bits = "/0" - log := logger.New(false) outChans := []chan dnsutils.DNSMessage{} - // init the processor - userPrivacy := NewUserPrivacySubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + // Init the processor + userPrivacy := NewUserPrivacyTransform(config, logger.New(false), "test", 0, outChans) + userPrivacy.GetTransforms() + + // Define test cases + testCases := []struct { + name string + inputIP string + expected string + expectErr bool + returnCode int + }{ + {"IPv4 Default Mask", "192.168.1.2", "192.168.0.0", false, ReturnKeep}, + {"IPv6 Default Mask", "fe80::6111:626:c1b2:2353", "fe80::", false, ReturnKeep}, + {"Invalid ip", "xxxxxxxxxxx", "xxxxxxxxxxx", true, ReturnKeep}, + } - ret := userPrivacy.AnonymizeIP(TestIP4) - if ret != "0.0.0.0" { - t.Errorf("Ipv4 anonymization failed with mask %s, got %s", config.UserPrivacy.AnonymizeIPV4Bits, ret) + // Execute test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + dm := dnsutils.GetFakeDNSMessage() + dm.NetworkInfo.QueryIP = tc.inputIP + + returnCode, err := userPrivacy.anonymizeQueryIP(&dm) + if err != nil && !tc.expectErr { + t.Fatalf("Unexpected error: %v", err) + } + + if dm.NetworkInfo.QueryIP != tc.expected { + t.Errorf("%s anonymization failed, got %s, want %s", tc.name, dm.NetworkInfo.QueryIP, tc.expected) + } + + if returnCode != tc.returnCode { + t.Errorf("Return code is %v, want %v", returnCode, tc.returnCode) + } + }) } } -func TestUserPrivacy_AnonymizeIPv6RemoveIP(t *testing.T) { - // enable feature +func TestUserPrivacy_AnonymizeIPRemove(t *testing.T) { + // Enable feature and set specific IP anonymization mask config := pkgconfig.GetFakeConfigTransformers() config.UserPrivacy.Enable = true config.UserPrivacy.AnonymizeIP = true + config.UserPrivacy.AnonymizeIPV4Bits = "/0" config.UserPrivacy.AnonymizeIPV6Bits = "::/0" - log := logger.New(false) - outChans := []chan dnsutils.DNSMessage{} + // Init the processor + userPrivacy := NewUserPrivacyTransform(config, logger.New(false), "test", 0, []chan dnsutils.DNSMessage{}) + userPrivacy.GetTransforms() + + // Define test cases + testCases := []struct { + name string + inputIP string + expectedIP string + }{ + {"IPv4 Remove IP", TestIP4, "0.0.0.0"}, + {"IPv6 Remove IP", TestIP6, "::"}, + } - // init the processor - userPrivacy := NewUserPrivacySubprocessor(config, logger.New(false), "test", 0, outChans, log.Info, log.Error) + // Execute test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + dm := dnsutils.GetFakeDNSMessage() + dm.NetworkInfo.QueryIP = tc.inputIP - ret := userPrivacy.AnonymizeIP(TestIP6) - if ret != "::" { - t.Errorf("Ipv6 anonymization failed, got %s", ret) + userPrivacy.anonymizeQueryIP(&dm) + if dm.NetworkInfo.QueryIP != tc.expectedIP { + t.Errorf("anonymization failed got %s, want %s", dm.NetworkInfo.QueryIP, tc.expectedIP) + } + }) } } diff --git a/workers/clickhouse.go b/workers/clickhouse.go new file mode 100644 index 00000000..02b985e0 --- /dev/null +++ b/workers/clickhouse.go @@ -0,0 +1,150 @@ +package workers + +import ( + "net/http" + "strconv" + "time" + + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" +) + +var ( + separator = "','" +) + +type ClickhouseData struct { + Identity string `json:"identity"` + QueryIP string `json:"query_ip"` + QName string `json:"q_name"` + Operation string `json:"operation"` + Family string `json:"family"` + Protocol string `json:"protocol"` + QType string `json:"q_type"` + RCode string `json:"r_code"` + TimeNSec string `json:"timensec"` + TimeStamp string `json:"timestamp"` +} + +type ClickhouseClient struct { + *GenericWorker +} + +func NewClickhouseClient(config *pkgconfig.Config, console *logger.Logger, name string) *ClickhouseClient { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.ClickhouseClient.ChannelBufferSize > 0 { + bufSize = config.Loggers.ClickhouseClient.ChannelBufferSize + } + w := &ClickhouseClient{GenericWorker: NewGenericWorker(config, console, name, "clickhouse", bufSize, pkgconfig.DefaultMonitor)} + w.ReadConfig() + return w +} + +func (w *ClickhouseClient) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // goroutine to process transformed dns messages + go w.StartLogging() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("input channel closed!") + return + } + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + + } + } +} + +func (w *ClickhouseClient) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + for { + select { + case <-w.OnLoggerStopped(): + return + + // incoming dns message to process + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("output channel closed!") + return + } + t, err := time.Parse(time.RFC3339, dm.DNSTap.TimestampRFC3339) + timensec := "" + if err == nil { + timensec = strconv.Itoa(int(t.UnixNano())) + } + data := ClickhouseData{ + Identity: dm.DNSTap.Identity, + QueryIP: dm.NetworkInfo.QueryIP, + QName: dm.DNS.Qname, + Operation: dm.DNSTap.Operation, + Family: dm.NetworkInfo.Family, + Protocol: dm.NetworkInfo.Protocol, + QType: dm.DNS.Qtype, + RCode: dm.DNS.Rcode, + TimeNSec: timensec, + TimeStamp: strconv.Itoa(int(int64(dm.DNSTap.TimeSec))), + } + url := w.GetConfig().Loggers.ClickhouseClient.URL + "?query=INSERT%20INTO%20" + url += w.GetConfig().Loggers.ClickhouseClient.Database + "." + w.GetConfig().Loggers.ClickhouseClient.Table + url += "(identity,queryip,qname,operation,family,protocol,qtype,rcode,timensec,timestamp)%20VALUES%20('" + data.Identity + separator + url += data.QueryIP + separator + data.QName + separator + data.Operation + separator + data.Family + separator + data.Protocol + url += separator + data.QType + separator + data.RCode + separator + data.TimeNSec + separator + data.TimeStamp + "')" + req, _ := http.NewRequest("POST", url, nil) + + req.Header.Add("Accept", "*/*") + req.Header.Add("X-ClickHouse-User", w.GetConfig().Loggers.ClickhouseClient.User) + req.Header.Add("X-ClickHouse-Key", w.GetConfig().Loggers.ClickhouseClient.Password) + + _, errReq := http.DefaultClient.Do(req) + if errReq != nil { + w.LogError(errReq.Error()) + } + } + } +} diff --git a/workers/clickhouse_test.go b/workers/clickhouse_test.go new file mode 100644 index 00000000..dae0bdcb --- /dev/null +++ b/workers/clickhouse_test.go @@ -0,0 +1,67 @@ +package workers + +import ( + "bufio" + "net" + "net/http" + "regexp" + "testing" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" +) + +func Test_ClickhouseClient(t *testing.T) { + + testcases := []struct { + mode string + pattern string + }{ + { + mode: pkgconfig.ModeJSON, + pattern: pkgconfig.ProgQname, + }, + } + cfg := pkgconfig.GetDefaultConfig() + cfg.Loggers.ClickhouseClient.URL = "http://127.0.0.1:8123" + cfg.Loggers.ClickhouseClient.User = "default" + cfg.Loggers.ClickhouseClient.Password = "password" + cfg.Loggers.ClickhouseClient.Database = "database" + cfg.Loggers.ClickhouseClient.Table = "table" + fakeRcvr, err := net.Listen("tcp", "127.0.0.1:8123") + if err != nil { + t.Fatal(err) + } + defer fakeRcvr.Close() + + for _, tc := range testcases { + t.Run(tc.mode, func(t *testing.T) { + g := NewClickhouseClient(cfg, logger.New(false), "test") + + go g.StartCollect() + + dm := dnsutils.GetFakeDNSMessage() + g.GetInputChannel() <- dm + // accept conn + conn, err := fakeRcvr.Accept() + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + // read and parse http request on server side + request, err := http.ReadRequest(bufio.NewReader(conn)) + if err != nil { + t.Fatal(err) + } + query := request.URL.Query().Get("query") + conn.Write([]byte(pkgconfig.HTTPOK)) + + pattern := regexp.MustCompile(tc.pattern) + if !pattern.MatchString(query) { + t.Errorf("clickhouse test error want %s, got: %s", tc.pattern, query) + } + }) + } +} diff --git a/workers/devnull.go b/workers/devnull.go new file mode 100644 index 00000000..d98600e9 --- /dev/null +++ b/workers/devnull.go @@ -0,0 +1,43 @@ +package workers + +import ( + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" +) + +type DevNull struct { + *GenericWorker +} + +func NewDevNull(config *pkgconfig.Config, console *logger.Logger, name string) *DevNull { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.DevNull.ChannelBufferSize > 0 { + bufSize = config.Loggers.DevNull.ChannelBufferSize + } + s := &DevNull{GenericWorker: NewGenericWorker(config, console, name, "devnull", bufSize, pkgconfig.DefaultMonitor)} + s.ReadConfig() + return s +} + +func (w *DevNull) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + return + + case _, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("run: input channel closed!") + return + } + + // count global messages + w.CountIngressTraffic() + + } + } +} diff --git a/workers/dnsmessage.go b/workers/dnsmessage.go new file mode 100644 index 00000000..d338e052 --- /dev/null +++ b/workers/dnsmessage.go @@ -0,0 +1,255 @@ +package workers + +import ( + "bufio" + "fmt" + "net/http" + "os" + "reflect" + "regexp" + "strings" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" +) + +func isFileSource(matchSource string) bool { + return strings.HasPrefix(matchSource, "file://") +} + +func isURLSource(matchSource string) bool { + return strings.HasPrefix(matchSource, "http://") || strings.HasPrefix(matchSource, "https://") +} + +type MatchSource struct { + regexList []*regexp.Regexp + stringList []string +} + +type DNSMessage struct { + *GenericWorker +} + +func NewDNSMessage(next []Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *DNSMessage { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Collectors.DNSMessage.ChannelBufferSize > 0 { + bufSize = config.Collectors.DNSMessage.ChannelBufferSize + } + s := &DNSMessage{GenericWorker: NewGenericWorker(config, logger, name, "dnsmessage", bufSize, pkgconfig.DefaultMonitor)} + s.SetDefaultRoutes(next) + s.ReadConfig() + return s +} + +func (w *DNSMessage) ReadConfigMatching(value interface{}) { + reflectedValue := reflect.ValueOf(value) + if reflectedValue.Kind() == reflect.Map { + keys := reflectedValue.MapKeys() + matchSrc := "" + srcKind := dnsutils.MatchingKindString + for _, k := range keys { + v := reflectedValue.MapIndex(k) + if k.Interface().(string) == "match-source" { + matchSrc = v.Interface().(string) + } + if k.Interface().(string) == "source-kind" { + srcKind = v.Interface().(string) + } + } + if len(matchSrc) > 0 { + sourceData, err := w.LoadData(matchSrc, srcKind) + if err != nil { + w.LogFatal(err) + } + if len(sourceData.regexList) > 0 { + value.(map[interface{}]interface{})[srcKind] = sourceData.regexList + } + if len(sourceData.stringList) > 0 { + value.(map[interface{}]interface{})[srcKind] = sourceData.stringList + } + } + } +} + +func (w *DNSMessage) ReadConfig() { + // load external file for include + if len(w.GetConfig().Collectors.DNSMessage.Matching.Include) > 0 { + for _, value := range w.GetConfig().Collectors.DNSMessage.Matching.Include { + w.ReadConfigMatching(value) + } + } + // load external file for exclude + if len(w.GetConfig().Collectors.DNSMessage.Matching.Exclude) > 0 { + for _, value := range w.GetConfig().Collectors.DNSMessage.Matching.Exclude { + w.ReadConfigMatching(value) + } + } +} + +func (w *DNSMessage) LoadData(matchSource string, srcKind string) (MatchSource, error) { + if isFileSource(matchSource) { + dataSource, err := w.LoadFromFile(matchSource, srcKind) + if err != nil { + w.LogFatal(err) + } + return dataSource, nil + } else if isURLSource(matchSource) { + dataSource, err := w.LoadFromURL(matchSource, srcKind) + if err != nil { + w.LogFatal(err) + } + return dataSource, nil + } + return MatchSource{}, fmt.Errorf("match source not supported %s", matchSource) +} + +func (w *DNSMessage) LoadFromURL(matchSource string, srcKind string) (MatchSource, error) { + w.LogInfo("loading matching source from url=%s", matchSource) + resp, err := http.Get(matchSource) + if err != nil { + return MatchSource{}, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return MatchSource{}, fmt.Errorf("invalid status code: %d", resp.StatusCode) + } + + matchSources := MatchSource{} + scanner := bufio.NewScanner(resp.Body) + + switch srcKind { + case dnsutils.MatchingKindRegexp: + for scanner.Scan() { + matchSources.regexList = append(matchSources.regexList, regexp.MustCompile(scanner.Text())) + } + w.LogInfo("remote source loaded with %d entries kind=%s", len(matchSources.regexList), srcKind) + case dnsutils.MatchingKindString: + for scanner.Scan() { + matchSources.stringList = append(matchSources.stringList, scanner.Text()) + } + w.LogInfo("remote source loaded with %d entries kind=%s", len(matchSources.stringList), srcKind) + } + + return matchSources, nil +} + +func (w *DNSMessage) LoadFromFile(filePath string, srcKind string) (MatchSource, error) { + localFile := strings.TrimPrefix(filePath, "file://") + + w.LogInfo("loading matching source from file=%s", localFile) + file, err := os.Open(localFile) + if err != nil { + return MatchSource{}, fmt.Errorf("unable to open file: %w", err) + } + + matchSources := MatchSource{} + scanner := bufio.NewScanner(file) + + switch srcKind { + case dnsutils.MatchingKindRegexp: + for scanner.Scan() { + matchSources.regexList = append(matchSources.regexList, regexp.MustCompile(scanner.Text())) + } + w.LogInfo("file loaded with %d entries kind=%s", len(matchSources.regexList), srcKind) + case dnsutils.MatchingKindString: + for scanner.Scan() { + matchSources.stringList = append(matchSources.stringList, scanner.Text()) + } + w.LogInfo("file loaded with %d entries kind=%s", len(matchSources.stringList), srcKind) + } + + return matchSources, nil +} + +func (w *DNSMessage) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + var err error + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().IngoingTransformers, w.GetLogger(), w.GetName(), defaultRoutes, 0) + + // read incoming dns message + w.LogInfo("waiting dns message to process...") + for { + select { + case <-w.OnStop(): + return + + // save the new config + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("channel closed, exit") + return + } + // count global messages + w.CountIngressTraffic() + + // matching enabled, filtering DNS messages ? + matched := true + matchedInclude := false + matchedExclude := false + + if len(w.GetConfig().Collectors.DNSMessage.Matching.Include) > 0 { + err, matchedInclude = dm.Matching(w.GetConfig().Collectors.DNSMessage.Matching.Include) + if err != nil { + w.LogError(err.Error()) + } + if matched && matchedInclude { + matched = true + } else { + matched = false + } + } + + if len(w.GetConfig().Collectors.DNSMessage.Matching.Exclude) > 0 { + err, matchedExclude = dm.Matching(w.GetConfig().Collectors.DNSMessage.Matching.Exclude) + if err != nil { + w.LogError(err.Error()) + } + if matched && !matchedExclude { + matched = true + } else { + matched = false + } + } + + // count output packets + w.CountEgressTraffic() + + // apply tranforms on matched packets only + // init dns message with additionnals parts if necessary + if matched { + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + } + + // drop packet ? + if !matched { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to next + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} diff --git a/workers/dnsmessage_test.go b/workers/dnsmessage_test.go new file mode 100644 index 00000000..0ed65fc4 --- /dev/null +++ b/workers/dnsmessage_test.go @@ -0,0 +1,119 @@ +package workers + +import ( + "fmt" + "regexp" + "testing" + "time" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" +) + +func TestDnsMessage_RoutingPolicy(t *testing.T) { + // simulate next workers + kept := GetWorkerForTest(pkgconfig.DefaultBufferSize) + dropped := GetWorkerForTest(pkgconfig.DefaultBufferSize) + + // config for the collector + config := pkgconfig.GetDefaultConfig() + config.Collectors.DNSMessage.Enable = true + config.Collectors.DNSMessage.Matching.Include = map[string]interface{}{ + "dns.qname": "dns.collector", + } + + // init the collector + c := NewDNSMessage(nil, config, logger.New(false), "test") + c.SetDefaultRoutes([]Worker{kept}) + c.SetDefaultDropped([]Worker{dropped}) + + // start to collect and send DNS messages on it + go c.StartCollect() + + // this message should be kept by the collector + dm := dnsutils.GetFakeDNSMessage() + c.GetInputChannel() <- dm + + // this message should dropped by the collector + dm.DNS.Qname = "dropped.collector" + c.GetInputChannel() <- dm + + // the 1er message should be in th k worker + dmKept := <-kept.GetInputChannel() + if dmKept.DNS.Qname != "dns.collector" { + t.Errorf("invalid dns message with default routing policy") + } + + // the 2nd message should be in the d worker + dmDropped := <-dropped.GetInputChannel() + if dmDropped.DNS.Qname != "dropped.collector" { + t.Errorf("invalid dns message with dropped routing policy") + } + +} + +func TestDnsMessage_BufferLoggerIsFull(t *testing.T) { + // redirect stdout output to bytes buffer + logsChan := make(chan logger.LogEntry, 50) + lg := logger.New(true) + lg.SetOutputChannel((logsChan)) + + // init the collector and run-it + config := pkgconfig.GetDefaultConfig() + c := NewDNSMessage(nil, config, lg, "test") + + // init next logger with a buffer of one element + nxt := GetWorkerForTest(1) + c.AddDefaultRoute(nxt) + + // run collector + go c.StartCollect() + + // add a shot of dnsmessages to collector + dmIn := dnsutils.GetFakeDNSMessage() + for i := 0; i < 512; i++ { + c.GetInputChannel() <- dmIn + } + + // waiting monitor to run in consumer + time.Sleep(12 * time.Second) + + for entry := range logsChan { + fmt.Println(entry) + pattern := regexp.MustCompile(pkgconfig.ExpectedBufferMsg511) + if pattern.MatchString(entry.Message) { + break + } + } + + // read dnsmessage from next logger + dmOut := <-nxt.GetInputChannel() + if dmOut.DNS.Qname != pkgconfig.ExpectedQname2 { + t.Errorf("invalid qname in dns message: %s", dmOut.DNS.Qname) + } + + // send second shot of packets to consumer + for i := 0; i < 1024; i++ { + c.GetInputChannel() <- dmIn + } + + // waiting monitor to run in consumer + time.Sleep(12 * time.Second) + + for entry := range logsChan { + fmt.Println(entry) + pattern := regexp.MustCompile(pkgconfig.ExpectedBufferMsg1023) + if pattern.MatchString(entry.Message) { + break + } + } + // read dnsmessage from next logger + dm2 := <-nxt.GetInputChannel() + if dm2.DNS.Qname != pkgconfig.ExpectedQname2 { + t.Errorf("invalid qname in dns message: %s", dm2.DNS.Qname) + } + + // stop all + c.Stop() +} diff --git a/workers/dnsprocessor.go b/workers/dnsprocessor.go new file mode 100644 index 00000000..a5ff1bad --- /dev/null +++ b/workers/dnsprocessor.go @@ -0,0 +1,109 @@ +package workers + +import ( + "fmt" + "time" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" +) + +type DNSProcessor struct { + *GenericWorker +} + +func NewDNSProcessor(config *pkgconfig.Config, logger *logger.Logger, name string, size int) DNSProcessor { + w := DNSProcessor{GenericWorker: NewGenericWorker(config, logger, name, "dns processor", size, pkgconfig.DefaultMonitor)} + return w +} + +func (w *DNSProcessor) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare enabled transformers + transforms := transformers.NewTransforms(&w.GetConfig().IngoingTransformers, w.GetLogger(), w.GetName(), defaultRoutes, 0) + + // read incoming dns message + for { + select { + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + transforms.ReloadConfig(&cfg.IngoingTransformers) + + case <-w.OnStop(): + transforms.Reset() + return + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("channel closed, exit") + return + } + // count global messages + w.CountIngressTraffic() + + // compute timestamp + ts := time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)) + dm.DNSTap.Timestamp = ts.UnixNano() + dm.DNSTap.TimestampRFC3339 = ts.UTC().Format(time.RFC3339Nano) + + // decode the dns payload + dnsHeader, err := dnsutils.DecodeDNS(dm.DNS.Payload) + if err != nil { + dm.DNS.MalformedPacket = true + w.LogError("dns parser malformed packet: %s - %v+", err, dm) + } + + // dns reply ? + if dnsHeader.Qr == 1 { + dm.DNSTap.Operation = "CLIENT_RESPONSE" + dm.DNS.Type = dnsutils.DNSReply + qip := dm.NetworkInfo.QueryIP + qport := dm.NetworkInfo.QueryPort + dm.NetworkInfo.QueryIP = dm.NetworkInfo.ResponseIP + dm.NetworkInfo.QueryPort = dm.NetworkInfo.ResponsePort + dm.NetworkInfo.ResponseIP = qip + dm.NetworkInfo.ResponsePort = qport + } else { + dm.DNS.Type = dnsutils.DNSQuery + dm.DNSTap.Operation = dnsutils.DNSTapClientQuery + } + + if err = dnsutils.DecodePayload(&dm, &dnsHeader, w.GetConfig()); err != nil { + w.LogError("%v - %v", err, dm) + } + + if dm.DNS.MalformedPacket { + if w.GetConfig().Global.Trace.LogMalformed { + w.LogInfo("payload: %v", dm.DNS.Payload) + } + } + + // count output packets + w.CountEgressTraffic() + + // apply all enabled transformers + transformResult, err := transforms.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // convert latency to human + dm.DNSTap.LatencySec = fmt.Sprintf("%.6f", dm.DNSTap.Latency) + + // dispatch dns message to all generators + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} diff --git a/processors/dns_test.go b/workers/dnsprocessor_test.go similarity index 65% rename from processors/dns_test.go rename to workers/dnsprocessor_test.go index ec213e03..830e35d3 100644 --- a/processors/dns_test.go +++ b/workers/dnsprocessor_test.go @@ -1,4 +1,4 @@ -package processors +package workers import ( "bytes" @@ -9,7 +9,6 @@ import ( "github.com/dmachard/go-dnscollector/dnsutils" "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" "github.com/dmachard/go-logger" ) @@ -19,17 +18,19 @@ func Test_DnsProcessor(t *testing.T) { logger.SetOutput(&o) // init and run the dns processor - consumer := NewDNSProcessor(pkgconfig.GetFakeConfig(), logger, "test", 512) + fl := GetWorkerForTest(pkgconfig.DefaultBufferSize) - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + consumer := NewDNSProcessor(pkgconfig.GetDefaultConfig(), logger, "test", 512) + consumer.AddDefaultRoute(fl) + consumer.AddDroppedRoute(fl) + go consumer.StartCollect() dm := dnsutils.GetFakeDNSMessageWithPayload() - consumer.GetChannel() <- dm + consumer.GetInputChannel() <- dm // read dns message from dnstap consumer dmOut := <-fl.GetInputChannel() - if dmOut.DNS.Qname != ExpectedQname { + if dmOut.DNS.Qname != pkgconfig.ExpectedQname { t.Errorf("invalid qname in dns message: %s", dm.DNS.Qname) } } @@ -41,16 +42,17 @@ func Test_DnsProcessor_BufferLoggerIsFull(t *testing.T) { lg.SetOutputChannel((logsChan)) // init and run the dns processor - consumer := NewDNSProcessor(pkgconfig.GetFakeConfig(), lg, "test", 512) - - fl := pkgutils.NewFakeLoggerWithBufferSize(1) - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + fl := GetWorkerForTest(pkgconfig.DefaultBufferOne) + consumer := NewDNSProcessor(pkgconfig.GetDefaultConfig(), lg, "test", 512) + consumer.AddDefaultRoute(fl) + consumer.AddDroppedRoute(fl) + go consumer.StartCollect() dm := dnsutils.GetFakeDNSMessageWithPayload() // add packets to consumer for i := 0; i < 512; i++ { - consumer.GetChannel() <- dm + consumer.GetInputChannel() <- dm } // waiting monitor to run in consumer @@ -58,7 +60,7 @@ func Test_DnsProcessor_BufferLoggerIsFull(t *testing.T) { for entry := range logsChan { fmt.Println(entry) - pattern := regexp.MustCompile(ExpectedBufferMsg511) + pattern := regexp.MustCompile(pkgconfig.ExpectedBufferMsg511) if pattern.MatchString(entry.Message) { break } @@ -66,13 +68,13 @@ func Test_DnsProcessor_BufferLoggerIsFull(t *testing.T) { // read dns message from dnstap consumer dmOut := <-fl.GetInputChannel() - if dmOut.DNS.Qname != ExpectedQname { + if dmOut.DNS.Qname != pkgconfig.ExpectedQname { t.Errorf("invalid qname in dns message: %s", dmOut.DNS.Qname) } // send second shot of packets to consumer for i := 0; i < 1024; i++ { - consumer.GetChannel() <- dm + consumer.GetInputChannel() <- dm } // waiting monitor to run in consumer @@ -80,7 +82,7 @@ func Test_DnsProcessor_BufferLoggerIsFull(t *testing.T) { for entry := range logsChan { fmt.Println(entry) - pattern := regexp.MustCompile(ExpectedBufferMsg1023) + pattern := regexp.MustCompile(pkgconfig.ExpectedBufferMsg1023) if pattern.MatchString(entry.Message) { break } @@ -88,7 +90,7 @@ func Test_DnsProcessor_BufferLoggerIsFull(t *testing.T) { // read dns message from dnstap consumer dm2 := <-fl.GetInputChannel() - if dm2.DNS.Qname != ExpectedQname { + if dm2.DNS.Qname != pkgconfig.ExpectedQname { t.Errorf("invalid qname in second dns message: %s", dm2.DNS.Qname) } } diff --git a/workers/dnstap_relay.go b/workers/dnstap_relay.go new file mode 100644 index 00000000..4677cdc0 --- /dev/null +++ b/workers/dnstap_relay.go @@ -0,0 +1,170 @@ +package workers + +import ( + "bufio" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-framestream" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" +) + +type DnstapProxifier struct { + *GenericWorker + connCounter uint64 +} + +func NewDnstapProxifier(next []Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *DnstapProxifier { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Collectors.DnstapProxifier.ChannelBufferSize > 0 { + bufSize = config.Collectors.DnstapProxifier.ChannelBufferSize + } + s := &DnstapProxifier{GenericWorker: NewGenericWorker(config, logger, name, "dnstaprelay", bufSize, pkgconfig.DefaultMonitor)} + s.SetDefaultRoutes(next) + s.CheckConfig() + return s +} + +func (w *DnstapProxifier) CheckConfig() { + if !netutils.IsValidTLS(w.GetConfig().Collectors.DnstapProxifier.TLSMinVersion) { + w.LogFatal(pkgconfig.PrefixLogWorker + "[" + w.GetName() + "] dnstaprelay - invalid tls min version") + } +} + +func (w *DnstapProxifier) HandleFrame(recvFrom chan []byte, sendTo []chan dnsutils.DNSMessage) { + defer w.LogInfo("frame handler terminated") + + dm := dnsutils.DNSMessage{} + + for data := range recvFrom { + // init DNS message container + dm.Init() + + // register payload + dm.DNSTap.Payload = data + + // forward to outputs + for i := range sendTo { + sendTo[i] <- dm + } + } +} + +func (w *DnstapProxifier) HandleConn(conn net.Conn, connID uint64, forceClose chan bool, wg *sync.WaitGroup) { + // close connection on function exit + defer func() { + w.LogInfo("conn #%d - connection handler terminated", connID) + conn.Close() + wg.Done() + }() + + // get peer address + peer := conn.RemoteAddr().String() + w.LogInfo("new connection from %s\n", peer) + + recvChan := make(chan []byte, 512) + defaultRoutes, _ := GetRoutes(w.GetDefaultRoutes()) + go w.HandleFrame(recvChan, defaultRoutes) + + // frame stream library + fsReader := bufio.NewReader(conn) + fsWriter := bufio.NewWriter(conn) + fs := framestream.NewFstrm(fsReader, fsWriter, conn, 5*time.Second, []byte("protobuf:dnstap.Dnstap"), true) + + // init framestream receiver + if err := fs.InitReceiver(); err != nil { + w.LogError("error stream receiver initialization: %s", err) + return + } else { + w.LogInfo("receiver framestream initialized") + } + + // goroutine to close the connection properly + cleanup := make(chan struct{}) + go func() { + defer w.LogInfo("conn #%d - cleanup connection handler terminated", connID) + + for { + select { + case <-forceClose: + w.LogInfo("conn #%d - force to cleanup the connection handler", connID) + conn.Close() + close(recvChan) + return + case <-cleanup: + w.LogInfo("conn #%d - cleanup the connection handler", connID) + close(recvChan) + return + } + } + }() + + // process incoming frame and send it to recv channel + err := fs.ProcessFrame(recvChan) + if err != nil { + if netutils.IsClosedConnectionError(err) { + w.LogInfo("conn #%d - connection closed with peer %s", connID, peer) + } else { + w.LogError("conn #%d - transport error: %s", connID, err) + } + + close(cleanup) + } +} + +func (w *DnstapProxifier) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + var connWG sync.WaitGroup + connCleanup := make(chan bool) + + // start to listen + listener, err := netutils.StartToListen( + w.GetConfig().Collectors.DnstapProxifier.ListenIP, w.GetConfig().Collectors.DnstapProxifier.ListenPort, + w.GetConfig().Collectors.DnstapProxifier.SockPath, + w.GetConfig().Collectors.DnstapProxifier.TLSSupport, netutils.TLSVersion[w.GetConfig().Collectors.DnstapProxifier.TLSMinVersion], + w.GetConfig().Collectors.DnstapProxifier.CertFile, w.GetConfig().Collectors.DnstapProxifier.KeyFile) + if err != nil { + w.LogFatal("collector dnstaprelay listening failed: ", err) + } + w.LogInfo("listening on %s", listener.Addr()) + + // goroutine to Accept() and blocks waiting for new connection. + acceptChan := make(chan net.Conn) + netutils.AcceptConnections(listener, acceptChan) + + // main loop + for { + select { + case <-w.OnStop(): + w.LogInfo("stop to listen...") + listener.Close() + + w.LogInfo("closing connected peers...") + close(connCleanup) + connWG.Wait() + return + + // save the new config + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.CheckConfig() + + case conn, opened := <-acceptChan: + if !opened { + return + } + + // handle the connection + connWG.Add(1) + connID := atomic.AddUint64(&w.connCounter, 1) + go w.HandleConn(conn, connID, connCleanup, &connWG) + } + } +} diff --git a/collectors/dnstap_proxifier_test.go b/workers/dnstap_relay_test.go similarity index 72% rename from collectors/dnstap_proxifier_test.go rename to workers/dnstap_relay_test.go index 9da01071..7165c760 100644 --- a/collectors/dnstap_proxifier_test.go +++ b/workers/dnstap_relay_test.go @@ -1,22 +1,20 @@ -package collectors +package workers import ( "bufio" - "log" "net" "testing" "time" - "github.com/dmachard/go-dnscollector/netlib" + "github.com/dmachard/go-dnscollector/dnsutils" "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" "github.com/dmachard/go-framestream" "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" "google.golang.org/protobuf/proto" ) -func Test_DnstapProxifier(t *testing.T) { +func Test_DnstapRelay(t *testing.T) { testcases := []struct { name string mode string @@ -25,19 +23,19 @@ func Test_DnstapProxifier(t *testing.T) { }{ { name: "tcp_default", - mode: netlib.SocketTCP, + mode: netutils.SocketTCP, address: ":6000", listenPort: 0, }, { name: "tcp_custom_port", - mode: netlib.SocketTCP, + mode: netutils.SocketTCP, address: ":7100", listenPort: 7100, }, { name: "unix_default", - mode: netlib.SocketUnix, + mode: netutils.SocketUnix, address: "/tmp/dnscollector_relay.sock", listenPort: 0, }, @@ -45,23 +43,22 @@ func Test_DnstapProxifier(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - g := pkgutils.NewFakeLogger() + g := GetWorkerForTest(pkgconfig.DefaultBufferSize) - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() if tc.listenPort > 0 { config.Collectors.DnstapProxifier.ListenPort = tc.listenPort } - if tc.mode == netlib.SocketUnix { + if tc.mode == netutils.SocketUnix { config.Collectors.DnstapProxifier.SockPath = tc.address } - c := NewDnstapProxifier([]pkgutils.Worker{g}, config, logger.New(false), "test") - if err := c.Listen(); err != nil { - log.Fatal("collector dnstap relay error: ", err) - } - - go c.Run() + // start collector + c := NewDnstapProxifier([]Worker{g}, config, logger.New(false), "test") + go c.StartCollect() + // start client + time.Sleep(1 * time.Second) conn, err := net.Dial(tc.mode, tc.address) if err != nil { t.Error("could not connect: ", err) @@ -77,13 +74,13 @@ func Test_DnstapProxifier(t *testing.T) { frame := &framestream.Frame{} // get fake dns question - dnsquery, err := processors.GetFakeDNS() + dnsquery, err := dnsutils.GetFakeDNS() if err != nil { t.Fatalf("dns question pack error") } // get fake dnstap message - dtQuery := processors.GetFakeDNSTap(dnsquery) + dtQuery := GetFakeDNSTap(dnsquery) // serialize to bytes data, err := proto.Marshal(dtQuery) diff --git a/workers/dnstapclient.go b/workers/dnstapclient.go new file mode 100644 index 00000000..0c44d91d --- /dev/null +++ b/workers/dnstapclient.go @@ -0,0 +1,318 @@ +package workers + +import ( + "bufio" + "crypto/tls" + "net" + "strconv" + "time" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-framestream" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" + "github.com/segmentio/kafka-go/compress" +) + +type DnstapSender struct { + *GenericWorker + fs *framestream.Fstrm + fsReady bool + transport string + transportConn net.Conn + transportReady, transportReconnect chan bool +} + +func NewDnstapSender(config *pkgconfig.Config, logger *logger.Logger, name string) *DnstapSender { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.DNSTap.ChannelBufferSize > 0 { + bufSize = config.Loggers.DNSTap.ChannelBufferSize + } + w := &DnstapSender{GenericWorker: NewGenericWorker(config, logger, name, "dnstap", bufSize, pkgconfig.DefaultMonitor)} + w.transportReady = make(chan bool) + w.transportReconnect = make(chan bool) + w.ReadConfig() + return w +} + +func (w *DnstapSender) ReadConfig() { + w.transport = w.GetConfig().Loggers.DNSTap.Transport + + // begin backward compatibility + if w.GetConfig().Loggers.DNSTap.TLSSupport { + w.transport = netutils.SocketTLS + } + if len(w.GetConfig().Loggers.DNSTap.SockPath) > 0 { + w.transport = netutils.SocketUnix + } + // end + + // get hostname or global one + if w.GetConfig().Loggers.DNSTap.ServerID == "" { + w.GetConfig().Loggers.DNSTap.ServerID = w.GetConfig().GetServerIdentity() + } + + if !netutils.IsValidTLS(w.GetConfig().Loggers.DNSTap.TLSMinVersion) { + w.LogFatal(pkgconfig.PrefixLogWorker + "invalid tls min version") + } +} + +func (w *DnstapSender) Disconnect() { + if w.transportConn != nil { + // reset framestream and ignore errors + w.LogInfo("closing framestream") + w.fs.ResetSender() + + // closing tcp + w.LogInfo("closing tcp connection") + w.transportConn.Close() + w.LogInfo("closed") + } +} + +func (w *DnstapSender) ConnectToRemote() { + for { + if w.transportConn != nil { + w.transportConn.Close() + w.transportConn = nil + } + + address := net.JoinHostPort( + w.GetConfig().Loggers.DNSTap.RemoteAddress, + strconv.Itoa(w.GetConfig().Loggers.DNSTap.RemotePort), + ) + connTimeout := time.Duration(w.GetConfig().Loggers.DNSTap.ConnectTimeout) * time.Second + + // make the connection + var conn net.Conn + var err error + + switch w.transport { + case netutils.SocketUnix: + address = w.GetConfig().Loggers.DNSTap.RemoteAddress + if len(w.GetConfig().Loggers.DNSTap.SockPath) > 0 { + address = w.GetConfig().Loggers.DNSTap.SockPath + } + w.LogInfo("connecting to %s://%s", w.transport, address) + conn, err = net.DialTimeout(w.transport, address, connTimeout) + + case netutils.SocketTCP: + w.LogInfo("connecting to %s://%s", w.transport, address) + conn, err = net.DialTimeout(w.transport, address, connTimeout) + + case netutils.SocketTLS: + w.LogInfo("connecting to %s://%s", w.transport, address) + + var tlsConfig *tls.Config + + tlsOptions := netutils.TLSOptions{ + InsecureSkipVerify: w.GetConfig().Loggers.DNSTap.TLSInsecure, MinVersion: w.GetConfig().Loggers.DNSTap.TLSMinVersion, + CAFile: w.GetConfig().Loggers.DNSTap.CAFile, CertFile: w.GetConfig().Loggers.DNSTap.CertFile, KeyFile: w.GetConfig().Loggers.DNSTap.KeyFile, + } + + tlsConfig, err = netutils.TLSClientConfig(tlsOptions) + if err == nil { + dialer := &net.Dialer{Timeout: connTimeout} + conn, err = tls.DialWithDialer(dialer, netutils.SocketTCP, address, tlsConfig) + } + default: + w.LogFatal("invalid transport:", w.transport) + } + + // something is wrong during connection ? + if err != nil { + w.LogError("%s", err) + w.LogInfo("retry to connect in %d seconds", w.GetConfig().Loggers.DNSTap.RetryInterval) + time.Sleep(time.Duration(w.GetConfig().Loggers.DNSTap.RetryInterval) * time.Second) + continue + } + + w.transportConn = conn + + // block until framestream is ready + w.transportReady <- true + + // block until an error occurred, need to reconnect + w.transportReconnect <- true + } +} + +func (w *DnstapSender) FlushBuffer(buf *[]dnsutils.DNSMessage) { + + var data []byte + var err error + bulkFrame := &framestream.Frame{} + subFrame := &framestream.Frame{} + + for _, dm := range *buf { + // update identity ? + if w.GetConfig().Loggers.DNSTap.OverwriteIdentity { + dm.DNSTap.Identity = w.GetConfig().Loggers.DNSTap.ServerID + } + + // encode dns message to dnstap protobuf binary + data, err = dm.ToDNSTap(w.GetConfig().Loggers.DNSTap.ExtendedSupport) + if err != nil { + w.LogError("failed to encode to DNStap protobuf: %s", err) + continue + } + + if w.GetConfig().Loggers.DNSTap.Compression == pkgconfig.CompressNone { + // send the frame + bulkFrame.Write(data) + if err := w.fs.SendFrame(bulkFrame); err != nil { + w.LogError("send frame error %s", err) + w.fsReady = false + <-w.transportReconnect + break + } + } else { + subFrame.Write(data) + bulkFrame.AppendData(subFrame.Data()) + } + } + + if w.GetConfig().Loggers.DNSTap.Compression != pkgconfig.CompressNone { + bulkFrame.Encode() + if err := w.fs.SendCompressedFrame(&compress.GzipCodec, bulkFrame); err != nil { + w.LogError("send bulk frame error %s", err) + w.fsReady = false + <-w.transportReconnect + } + } + + // reset buffer + *buf = nil +} + +func (w *DnstapSender) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // goroutine to process transformed dns messages + go w.StartLogging() + + // init remote conn + go w.ConnectToRemote() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("input channel closed!") + return + } + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} + +func (w *DnstapSender) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + // init buffer + bufferDm := []dnsutils.DNSMessage{} + + // init flust timer for buffer + flushInterval := time.Duration(w.GetConfig().Loggers.DNSTap.FlushInterval) * time.Second + flushTimer := time.NewTimer(flushInterval) + + w.LogInfo("ready to process") + for { + select { + case <-w.OnLoggerStopped(): + // closing remote connection if exist + w.Disconnect() + return + + // init framestream + case <-w.transportReady: + w.LogInfo("transport connected with success") + // frame stream library + fsReader := bufio.NewReader(w.transportConn) + fsWriter := bufio.NewWriter(w.transportConn) + w.fs = framestream.NewFstrm(fsReader, fsWriter, w.transportConn, 5*time.Second, []byte("protobuf:dnstap.Dnstap"), true) + + // init framestream protocol + if err := w.fs.InitSender(); err != nil { + w.LogError("sender protocol initialization error %s", err) + w.fsReady = false + w.transportConn.Close() + <-w.transportReconnect + } else { + w.fsReady = true + w.LogInfo("framestream initialized with success") + } + // incoming dns message to process + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("output channel closed!") + return + } + + // drop dns message if the connection is not ready to avoid memory leak or + // to block the channel + if !w.fsReady { + continue + } + + // append dns message to buffer + bufferDm = append(bufferDm, dm) + + // buffer is full ? + if len(bufferDm) >= w.GetConfig().Loggers.DNSTap.BufferSize { + w.FlushBuffer(&bufferDm) + } + + // flush the buffer + case <-flushTimer.C: + // force to flush the buffer + if len(bufferDm) > 0 { + w.FlushBuffer(&bufferDm) + } + + // restart timer + flushTimer.Reset(flushInterval) + } + } +} diff --git a/loggers/dnstapclient_test.go b/workers/dnstapclient_test.go similarity index 50% rename from loggers/dnstapclient_test.go rename to workers/dnstapclient_test.go index f67b1623..feca43a4 100644 --- a/loggers/dnstapclient_test.go +++ b/workers/dnstapclient_test.go @@ -1,7 +1,8 @@ -package loggers +package workers import ( "bufio" + "encoding/binary" "net" "testing" "time" @@ -11,31 +12,45 @@ import ( "github.com/dmachard/go-dnstap-protobuf" "github.com/dmachard/go-framestream" "github.com/dmachard/go-logger" + "github.com/segmentio/kafka-go/compress" "google.golang.org/protobuf/proto" ) func Test_DnstapClient(t *testing.T) { testcases := []struct { - transport string - address string + name string + transport string + address string + compression string }{ { - transport: "tcp", - address: ":6000", + name: "dnstap_tcp", + transport: "tcp", + address: ":6000", + compression: "none", }, { - transport: "unix", - address: "/tmp/test.sock", + name: "dnstap_unix", + transport: "unix", + address: "/tmp/test.sock", + compression: "none", + }, + { + name: "dnstap_tcp_gzip_compress", + transport: "tcp", + address: ":6000", + compression: "gzip", }, } for _, tc := range testcases { - t.Run(tc.transport, func(t *testing.T) { + t.Run(tc.name, func(t *testing.T) { // init logger - cfg := pkgconfig.GetFakeConfig() + cfg := pkgconfig.GetDefaultConfig() cfg.Loggers.DNSTap.FlushInterval = 1 cfg.Loggers.DNSTap.BufferSize = 0 + cfg.Loggers.DNSTap.Compression = tc.compression if tc.transport == "unix" { cfg.Loggers.DNSTap.SockPath = tc.address } @@ -50,7 +65,7 @@ func Test_DnstapClient(t *testing.T) { defer fakeRcvr.Close() // start the logger - go g.Run() + go g.StartCollect() // accept conn from logger conn, err := fakeRcvr.Accept() @@ -73,15 +88,47 @@ func Test_DnstapClient(t *testing.T) { g.GetInputChannel() <- dm // receive frame on server side ?, timeout 5s - fs, err := fsSvr.RecvFrame(true) + var fs *framestream.Frame + if tc.compression == "gzip" { + fs, err = fsSvr.RecvCompressedFrame(&compress.GzipCodec, true) + } else { + fs, err = fsSvr.RecvFrame(true) + } if err != nil { t.Errorf("error to receive frame: %s", err) } // decode the dnstap message in server side dt := &dnstap.Dnstap{} - if err := proto.Unmarshal(fs.Data(), dt); err != nil { - t.Errorf("error to decode dnstap") + if cfg.Loggers.DNSTap.Compression == pkgconfig.CompressNone { + if err := proto.Unmarshal(fs.Data(), dt); err != nil { + t.Errorf("error to decode dnstap") + } + } else { + // ignore first 4 bytes + data := fs.Data()[4:] + validFrame := true + for len(data) >= 4 { + // get frame size + payloadSize := binary.BigEndian.Uint32(data[:4]) + data = data[4:] + + // enough next data ? + if uint32(len(data)) < payloadSize { + validFrame = false + break + } + + if err := proto.Unmarshal(data[:payloadSize], dt); err != nil { + t.Errorf("error to decode dnstap from compressed frame") + } + + // continue for next + data = data[payloadSize:] + } + if !validFrame { + t.Errorf("invalid compressed frame") + } } }) } diff --git a/workers/dnstapserver.go b/workers/dnstapserver.go new file mode 100644 index 00000000..cb7c6ab9 --- /dev/null +++ b/workers/dnstapserver.go @@ -0,0 +1,545 @@ +package workers + +import ( + "bufio" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-dnstap-protobuf" + "github.com/dmachard/go-framestream" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" + "github.com/segmentio/kafka-go/compress" + "google.golang.org/protobuf/proto" +) + +type DnstapServer struct { + *GenericWorker + connCounter uint64 +} + +func NewDnstapServer(next []Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *DnstapServer { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Collectors.Dnstap.ChannelBufferSize > 0 { + bufSize = config.Collectors.Dnstap.ChannelBufferSize + } + w := &DnstapServer{GenericWorker: NewGenericWorker(config, logger, name, "dnstap", bufSize, pkgconfig.DefaultMonitor)} + w.SetDefaultRoutes(next) + w.CheckConfig() + return w +} + +func (w *DnstapServer) CheckConfig() { + if !netutils.IsValidTLS(w.GetConfig().Collectors.Dnstap.TLSMinVersion) { + w.LogFatal(pkgconfig.PrefixLogWorker + "[" + w.GetName() + "] dnstap - invalid tls min version") + } +} + +func (w *DnstapServer) HandleConn(conn net.Conn, connID uint64, forceClose chan bool, wg *sync.WaitGroup) { + // close connection on function exit + defer func() { + w.LogInfo("(conn #%d - connection handler terminated", connID) + netutils.Close(conn, w.GetConfig().Collectors.Dnstap.ResetConn) + wg.Done() + }() + + // get peer address + peer := conn.RemoteAddr().String() + peerName := netutils.GetPeerName(peer) + w.LogInfo("conn #%d - new connection from %s (%s)", connID, peer, peerName) + + // start dnstap processor and run it + bufSize := w.GetConfig().Global.Worker.ChannelBufferSize + if w.GetConfig().Collectors.Dnstap.ChannelBufferSize > 0 { + bufSize = w.GetConfig().Collectors.Dnstap.ChannelBufferSize + } + dnstapProcessor := NewDNSTapProcessor(int(connID), peerName, w.GetConfig(), w.GetLogger(), w.GetName(), bufSize) + dnstapProcessor.SetMetrics(w.metrics) + dnstapProcessor.SetDefaultRoutes(w.GetDefaultRoutes()) + dnstapProcessor.SetDefaultDropped(w.GetDroppedRoutes()) + go dnstapProcessor.StartCollect() + + // init frame stream library + fsReader := bufio.NewReader(conn) + fsWriter := bufio.NewWriter(conn) + fs := framestream.NewFstrm(fsReader, fsWriter, conn, 5*time.Second, []byte("protobuf:dnstap.Dnstap"), true) + + // framestream as receiver + if err := fs.InitReceiver(); err != nil { + w.LogError("conn #%d - stream initialization: %s", connID, err) + } else { + w.LogInfo("conn #%d - receiver framestream initialized", connID) + } + + // process incoming frame and send it to dnstap consumer channel + var err error + var frame *framestream.Frame + cleanup := make(chan struct{}) + + // goroutine to close the connection properly + go func() { + defer func() { + dnstapProcessor.Stop() + w.LogInfo("conn #%d - cleanup connection handler terminated", connID) + }() + + for { + select { + case <-forceClose: + w.LogInfo("conn #%d - force to cleanup the connection handler", connID) + netutils.Close(conn, w.GetConfig().Collectors.Dnstap.ResetConn) + return + case <-cleanup: + w.LogInfo("conn #%d - cleanup the connection handler", connID) + return + } + } + }() + + // handle incoming frame + for { + if w.GetConfig().Collectors.Dnstap.Compression == pkgconfig.CompressNone { + frame, err = fs.RecvFrame(false) + } else { + frame, err = fs.RecvCompressedFrame(&compress.GzipCodec, false) + } + if err != nil { + connClosed := false + + var opErr *net.OpError + if errors.As(err, &opErr) { + if errors.Is(opErr, net.ErrClosed) { + connClosed = true + } + } + if errors.Is(err, io.EOF) { + connClosed = true + } + + if connClosed { + w.LogInfo("conn #%d - connection closed with peer %s", connID, peer) + } else { + w.LogError("conn #%d - framestream reader error: %s", connID, err) + } + // exit goroutine + close(cleanup) + break + } + + if frame.IsControl() { + if err := fs.ResetReceiver(frame); err != nil { + if errors.Is(err, io.EOF) { + w.LogInfo("conn #%d - framestream reseted by sender", connID) + } else { + w.LogError("conn #%d - unexpected control framestream: %s", connID, err) + } + + } + + // exit goroutine + close(cleanup) + break + } + + if w.GetConfig().Collectors.Dnstap.Compression == pkgconfig.CompressNone { + // send payload to the channel + select { + case dnstapProcessor.GetDataChannel() <- frame.Data(): // Successful send to channel + default: + w.WorkerIsBusy("dnstap-processor") + } + } else { + // ignore first 4 bytes + data := frame.Data()[4:] + validFrame := true + for len(data) >= 4 { + // get frame size + payloadSize := binary.BigEndian.Uint32(data[:4]) + data = data[4:] + + // enough next data ? + if uint32(len(data)) < payloadSize { + validFrame = false + break + } + // send payload to the channel + select { + case dnstapProcessor.GetDataChannel() <- data[:payloadSize]: // Successful send to channel + default: + w.WorkerIsBusy("dnstap-processor") + } + + // continue for next + data = data[payloadSize:] + } + if !validFrame { + w.LogError("conn #%d - invalid compressed frame received", connID) + continue + } + } + } +} + +func (w *DnstapServer) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + var connWG sync.WaitGroup + connCleanup := make(chan bool) + cfg := w.GetConfig().Collectors.Dnstap + + // start to listen + listener, err := netutils.StartToListen( + cfg.ListenIP, cfg.ListenPort, cfg.SockPath, + cfg.TLSSupport, netutils.TLSVersion[cfg.TLSMinVersion], + cfg.CertFile, cfg.KeyFile) + if err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] listen error: ", err) + } + w.LogInfo("listening on %s", listener.Addr()) + + // goroutine to Accept() blocks waiting for new connection. + acceptChan := make(chan net.Conn) + netutils.AcceptConnections(listener, acceptChan) + + // main loop + for { + select { + case <-w.OnStop(): + w.LogInfo("stop to listen...") + listener.Close() + + w.LogInfo("closing connected peers...") + close(connCleanup) + connWG.Wait() + return + + // save the new config + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.CheckConfig() + + // new incoming connection + case conn, opened := <-acceptChan: + if !opened { + return + } + + if len(cfg.SockPath) == 0 && cfg.RcvBufSize > 0 { + before, actual, err := netutils.SetSockRCVBUF(conn, cfg.RcvBufSize, cfg.TLSSupport) + if err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] unable to set SO_RCVBUF: ", err) + } + w.LogInfo("set SO_RCVBUF option, value before: %d, desired: %d, actual: %d", before, cfg.RcvBufSize, actual) + } + + // handle the connection + connWG.Add(1) + connID := atomic.AddUint64(&w.connCounter, 1) + go w.HandleConn(conn, connID, connCleanup, &connWG) + } + + } +} + +func GetFakeDNSTap(dnsquery []byte) *dnstap.Dnstap { + dtQuery := &dnstap.Dnstap{} + + dt := dnstap.Dnstap_MESSAGE + dtQuery.Identity = []byte("dnstap-generator") + dtQuery.Version = []byte("-") + dtQuery.Type = &dt + + mt := dnstap.Message_CLIENT_QUERY + sf := dnstap.SocketFamily_INET + sp := dnstap.SocketProtocol_UDP + + now := time.Now() + tsec := uint64(now.Unix()) + tnsec := uint32(uint64(now.UnixNano()) - uint64(now.Unix())*1e9) + + rport := uint32(53) + qport := uint32(5300) + + msg := &dnstap.Message{Type: &mt} + msg.SocketFamily = &sf + msg.SocketProtocol = &sp + msg.QueryAddress = net.ParseIP("127.0.0.1") + msg.QueryPort = &qport + msg.ResponseAddress = net.ParseIP("127.0.0.2") + msg.ResponsePort = &rport + + msg.QueryMessage = dnsquery + msg.QueryTimeSec = &tsec + msg.QueryTimeNsec = &tnsec + + dtQuery.Message = msg + return dtQuery +} + +type DNSTapProcessor struct { + *GenericWorker + ConnID int + PeerName string + dataChannel chan []byte +} + +func NewDNSTapProcessor(connID int, peerName string, config *pkgconfig.Config, logger *logger.Logger, name string, size int) DNSTapProcessor { + w := DNSTapProcessor{GenericWorker: NewGenericWorker(config, logger, name, "(conn #"+strconv.Itoa(connID)+") dnstap processor", size, pkgconfig.DefaultMonitor)} + w.ConnID = connID + w.PeerName = peerName + w.dataChannel = make(chan []byte, size) + return w +} + +func (w *DNSTapProcessor) GetDataChannel() chan []byte { + return w.dataChannel +} + +func (w *DNSTapProcessor) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + dt := &dnstap.Dnstap{} + edt := &dnsutils.ExtendedDnstap{} + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare enabled transformers + transforms := transformers.NewTransforms(&w.GetConfig().IngoingTransformers, w.GetLogger(), w.GetName(), defaultRoutes, w.ConnID) + + // read incoming dns message + for { + select { + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + transforms.ReloadConfig(&cfg.IngoingTransformers) + + case <-w.OnStop(): + transforms.Reset() + close(w.GetDataChannel()) + return + + case data, opened := <-w.GetDataChannel(): + if !opened { + w.LogInfo("channel closed, exit") + return + } + // count global messages + w.CountIngressTraffic() + + err := proto.Unmarshal(data, dt) + if err != nil { + continue + } + + // count global messages + w.CountIngressTraffic() + + // init dns message + dm := dnsutils.DNSMessage{} + dm.Init() + + dm.DNSTap.PeerName = w.PeerName + + // init dns message with additionnals parts + identity := dt.GetIdentity() + if len(identity) > 0 { + dm.DNSTap.Identity = string(identity) + } + version := dt.GetVersion() + if len(version) > 0 { + dm.DNSTap.Version = string(version) + } + dm.DNSTap.Operation = dt.GetMessage().GetType().String() + + // extended extra field ? + if w.GetConfig().Collectors.Dnstap.ExtendedSupport { + err := proto.Unmarshal(dt.GetExtra(), edt) + if err != nil { + continue + } + + // get original extra value + originalExtra := string(edt.GetOriginalDnstapExtra()) + if len(originalExtra) > 0 { + dm.DNSTap.Extra = originalExtra + } + + // get atags + atags := edt.GetAtags() + if atags != nil { + dm.ATags = &dnsutils.TransformATags{ + Tags: atags.GetTags(), + } + } + + // get public suffix + norm := edt.GetNormalize() + if norm != nil { + dm.PublicSuffix = &dnsutils.TransformPublicSuffix{} + if len(norm.GetTld()) > 0 { + dm.PublicSuffix.QnamePublicSuffix = norm.GetTld() + } + if len(norm.GetEtldPlusOne()) > 0 { + dm.PublicSuffix.QnameEffectiveTLDPlusOne = norm.GetEtldPlusOne() + } + } + + // filtering + sampleRate := edt.GetFiltering() + if sampleRate != nil { + dm.Filtering = &dnsutils.TransformFiltering{} + dm.Filtering.SampleRate = int(sampleRate.SampleRate) + } + } else { + extra := string(dt.GetExtra()) + if len(extra) > 0 { + dm.DNSTap.Extra = extra + } + } + + if ipVersion, valid := netutils.IPVersion[dt.GetMessage().GetSocketFamily().String()]; valid { + dm.NetworkInfo.Family = ipVersion + } else { + dm.NetworkInfo.Family = pkgconfig.StrUnknown + } + + dm.NetworkInfo.Protocol = dt.GetMessage().GetSocketProtocol().String() + + // decode query address and port + queryip := dt.GetMessage().GetQueryAddress() + if len(queryip) > 0 { + dm.NetworkInfo.QueryIP = net.IP(queryip).String() + } + queryport := dt.GetMessage().GetQueryPort() + if queryport > 0 { + dm.NetworkInfo.QueryPort = strconv.FormatUint(uint64(queryport), 10) + } + + // decode response address and port + responseip := dt.GetMessage().GetResponseAddress() + if len(responseip) > 0 { + dm.NetworkInfo.ResponseIP = net.IP(responseip).String() + } + responseport := dt.GetMessage().GetResponsePort() + if responseport > 0 { + dm.NetworkInfo.ResponsePort = strconv.FormatUint(uint64(responseport), 10) + } + + // get dns payload and timestamp according to the type (query or response) + op := dnstap.Message_Type_value[dm.DNSTap.Operation] + if op%2 == 1 { + dnsPayload := dt.GetMessage().GetQueryMessage() + dm.DNS.Payload = dnsPayload + dm.DNS.Length = len(dnsPayload) + dm.DNS.Type = dnsutils.DNSQuery + dm.DNSTap.TimeSec = int(dt.GetMessage().GetQueryTimeSec()) + dm.DNSTap.TimeNsec = int(dt.GetMessage().GetQueryTimeNsec()) + } else { + dnsPayload := dt.GetMessage().GetResponseMessage() + dm.DNS.Payload = dnsPayload + dm.DNS.Length = len(dnsPayload) + dm.DNS.Type = dnsutils.DNSReply + dm.DNSTap.TimeSec = int(dt.GetMessage().GetResponseTimeSec()) + dm.DNSTap.TimeNsec = int(dt.GetMessage().GetResponseTimeNsec()) + } + + // policy + policyType := dt.GetMessage().GetPolicy().GetType() + if len(policyType) > 0 { + dm.DNSTap.PolicyType = policyType + } + + policyRule := string(dt.GetMessage().GetPolicy().GetRule()) + if len(policyRule) > 0 { + dm.DNSTap.PolicyRule = policyRule + } + + policyAction := dt.GetMessage().GetPolicy().GetAction().String() + if len(policyAction) > 0 { + dm.DNSTap.PolicyAction = policyAction + } + + policyMatch := dt.GetMessage().GetPolicy().GetMatch().String() + if len(policyMatch) > 0 { + dm.DNSTap.PolicyMatch = policyMatch + } + + policyValue := string(dt.GetMessage().GetPolicy().GetValue()) + if len(policyValue) > 0 { + dm.DNSTap.PolicyValue = policyValue + } + + // decode query zone if provided + queryZone := dt.GetMessage().GetQueryZone() + if len(queryZone) > 0 { + qz, _, err := dnsutils.ParseLabels(0, queryZone) + if err != nil { + w.LogError("invalid query zone: %v - %v", err, queryZone) + } + dm.DNSTap.QueryZone = qz + } + + // compute timestamp + ts := time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)) + dm.DNSTap.Timestamp = ts.UnixNano() + dm.DNSTap.TimestampRFC3339 = ts.UTC().Format(time.RFC3339Nano) + + // decode payload if provided + if !w.GetConfig().Collectors.Dnstap.DisableDNSParser && len(dm.DNS.Payload) > 0 { + // decode the dns payload to get id, rcode and the number of question + // number of answer, ignore invalid packet + dnsHeader, err := dnsutils.DecodeDNS(dm.DNS.Payload) + if err != nil { + dm.DNS.MalformedPacket = true + w.LogInfo("dns header parser stopped: %s", err) + if w.GetConfig().Global.Trace.LogMalformed { + w.LogError("%v", dm) + w.LogError("dump invalid dns headr: %v", dm.DNS.Payload) + } + } + + if err = dnsutils.DecodePayload(&dm, &dnsHeader, w.GetConfig()); err != nil { + dm.DNS.MalformedPacket = true + w.LogInfo("dns payload parser stopped: %s", err) + if w.GetConfig().Global.Trace.LogMalformed { + w.LogError("%v", dm) + w.LogError("dump invalid dns payload: %v", dm.DNS.Payload) + } + } + } + + // count output packets + w.CountEgressTraffic() + + // apply all enabled transformers + transformResult, err := transforms.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // convert latency to human + dm.DNSTap.LatencySec = fmt.Sprintf("%.6f", dm.DNSTap.Latency) + + // dispatch dns message to connected routes + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} diff --git a/workers/dnstapserver_test.go b/workers/dnstapserver_test.go new file mode 100644 index 00000000..65f6cf12 --- /dev/null +++ b/workers/dnstapserver_test.go @@ -0,0 +1,577 @@ +package workers + +import ( + "bufio" + "bytes" + "fmt" + "net" + "regexp" + "testing" + "time" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnstap-protobuf" + "github.com/dmachard/go-framestream" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" + "github.com/miekg/dns" + "github.com/segmentio/kafka-go/compress" + "google.golang.org/protobuf/proto" +) + +func Test_DnstapCollector(t *testing.T) { + testcases := []struct { + name string + mode string + address string + listenPort int + operation string + compression string + }{ + { + name: "tcp_default", + mode: netutils.SocketTCP, + address: ":6000", + listenPort: 0, + operation: "CLIENT_QUERY", + compression: "none", + }, + { + name: "tcp_custom_port", + mode: netutils.SocketTCP, + address: ":7000", + listenPort: 7000, + operation: "CLIENT_QUERY", + compression: "none", + }, + { + name: "unix_default", + mode: netutils.SocketUnix, + address: "/tmp/dnscollector.sock", + listenPort: 0, + operation: "CLIENT_QUERY", + compression: "none", + }, + { + name: "tcp_compress_gzip", + mode: netutils.SocketTCP, + address: ":7000", + listenPort: 7000, + operation: "CLIENT_QUERY", + compression: "gzip", + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + g := GetWorkerForTest(pkgconfig.DefaultBufferSize) + + config := pkgconfig.GetDefaultConfig() + if tc.listenPort > 0 { + config.Collectors.Dnstap.ListenPort = tc.listenPort + } + if tc.mode == netutils.SocketUnix { + config.Collectors.Dnstap.SockPath = tc.address + } + config.Collectors.Dnstap.Compression = tc.compression + + // start the collector + c := NewDnstapServer([]Worker{g}, config, logger.New(false), "test") + go c.StartCollect() + + // wait before to connect + time.Sleep(1 * time.Second) + conn, err := net.Dial(tc.mode, tc.address) + if err != nil { + t.Error("could not connect: ", err) + } + defer conn.Close() + + r := bufio.NewReader(conn) + w := bufio.NewWriter(conn) + fs := framestream.NewFstrm(r, w, conn, 5*time.Second, []byte("protobuf:dnstap.Dnstap"), true) + if err := fs.InitSender(); err != nil { + t.Fatalf("framestream init error: %s", err) + } else { + bulkFrame := &framestream.Frame{} + subFrame := &framestream.Frame{} + + // get fake dns question + dnsquery, err := dnsutils.GetFakeDNS() + if err != nil { + t.Fatalf("dns question pack error") + } + + // get fake dnstap message + dtQuery := GetFakeDNSTap(dnsquery) + + // serialize to bytes + data, err := proto.Marshal(dtQuery) + if err != nil { + t.Fatalf("dnstap proto marshal error %s", err) + } + // send query + + if config.Collectors.Dnstap.Compression == pkgconfig.CompressNone { + // send the frame + bulkFrame.Write(data) + if err := fs.SendFrame(bulkFrame); err != nil { + t.Fatalf("send frame error %s", err) + } + } else { + subFrame.Write(data) + bulkFrame.AppendData(subFrame.Data()) + } + + if config.Collectors.Dnstap.Compression != pkgconfig.CompressNone { + bulkFrame.Encode() + if err := fs.SendCompressedFrame(&compress.GzipCodec, bulkFrame); err != nil { + t.Fatalf("send compressed frame error %s", err) + } + } + } + + // waiting message in channel + msg := <-g.GetInputChannel() + if msg.DNSTap.Operation != tc.operation { + t.Errorf("want %s, got %s", tc.operation, msg.DNSTap.Operation) + } + + c.Stop() + }) + } +} + +// Testcase for https://github.com/dmachard/go-dnscollector/issues/461 +// Support Bind9 with dnstap closing. +func Test_DnstapCollector_CloseFrameStream(t *testing.T) { + // redirect stdout output to bytes buffer + logsChan := make(chan logger.LogEntry, 50) + lg := logger.New(true) + lg.SetOutputChannel((logsChan)) + + config := pkgconfig.GetDefaultConfig() + config.Collectors.Dnstap.SockPath = "/tmp/dnscollector.sock" + + // start the collector in unix mode + g := GetWorkerForTest(pkgconfig.DefaultBufferSize) + c := NewDnstapServer([]Worker{g}, config, lg, "test") + go c.StartCollect() + + // simulate dns server connection to collector + time.Sleep(1 * time.Second) + conn, err := net.Dial(netutils.SocketUnix, "/tmp/dnscollector.sock") + if err != nil { + t.Error("could not connect: ", err) + } + defer conn.Close() + + r := bufio.NewReader(conn) + w := bufio.NewWriter(conn) + fs := framestream.NewFstrm(r, w, conn, 5*time.Second, []byte("protobuf:dnstap.Dnstap"), true) + if err := fs.InitSender(); err != nil { + t.Fatalf("framestream init error: %s", err) + } + + // checking reset + errClose := fs.ResetSender() + if errClose != nil { + t.Errorf("reset sender error: %s", errClose) + } + + regxp := ".*framestream reseted by sender.*" + pattern := regexp.MustCompile(regxp) + + matchMsg := false + for entry := range logsChan { + fmt.Println(entry) + if pattern.MatchString(entry.Message) { + matchMsg = true + break + } + } + if !matchMsg { + t.Errorf("reset from sender not received") + } + + // cleanup + c.Stop() +} + +func Test_DnstapProcessor(t *testing.T) { + logger := logger.New(true) + var o bytes.Buffer + logger.SetOutput(&o) + + // run the consumer with a fake logger + fl := GetWorkerForTest(pkgconfig.DefaultBufferSize) + + // init the dnstap consumer + consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetDefaultConfig(), logger, "test", 512) + consumer.AddDefaultRoute(fl) + consumer.AddDroppedRoute(fl) + + // prepare dns query + dnsmsg := new(dns.Msg) + dnsmsg.SetQuestion(pkgconfig.ExpectedQname+".", dns.TypeA) + dnsquestion, _ := dnsmsg.Pack() + + // prepare dnstap + dt := &dnstap.Dnstap{} + dt.Type = dnstap.Dnstap_Type.Enum(1) + + dt.Message = &dnstap.Message{} + dt.Message.Type = dnstap.Message_Type.Enum(5) + dt.Message.QueryMessage = dnsquestion + + data, _ := proto.Marshal(dt) + + // start the consumer + go consumer.StartCollect() + + // add packet to consumer + consumer.GetDataChannel() <- data + + // read dns message from dnstap consumer + dm := <-fl.GetInputChannel() + if dm.DNS.Qname != pkgconfig.ExpectedQname { + t.Errorf("invalid qname in dns message: %s", dm.DNS.Qname) + } +} + +func Test_DnstapProcessor_MalformedDnsHeader(t *testing.T) { + // run the consumer with a fake logger + fl := GetWorkerForTest(pkgconfig.DefaultBufferSize) + + // init the dnstap consumer + logger := logger.New(false) + consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetDefaultConfig(), logger, "test", 512) + consumer.AddDefaultRoute(fl) + consumer.AddDroppedRoute(fl) + + // prepare dns query + dnsmsg := new(dns.Msg) + dnsmsg.SetQuestion("www.google.fr.", dns.TypeA) + dnsquestion, _ := dnsmsg.Pack() + + // prepare dnstap + dt := &dnstap.Dnstap{} + dt.Type = dnstap.Dnstap_Type.Enum(1) + + dt.Message = &dnstap.Message{} + dt.Message.Type = dnstap.Message_Type.Enum(5) + dt.Message.QueryMessage = dnsquestion[:4] + + data, _ := proto.Marshal(dt) + + // start the consumer + go consumer.StartCollect() + + // add packet to consumer + consumer.GetDataChannel() <- data + + // read dns message from dnstap consumer + dm := <-fl.GetInputChannel() + if dm.DNS.MalformedPacket == false { + t.Errorf("malformed packet not detected") + } +} + +func Test_DnstapProcessor_MalformedDnsQuestion(t *testing.T) { + // run the consumer with a fake logger + fl := GetWorkerForTest(pkgconfig.DefaultBufferSize) + + // init the dnstap consumer + logger := logger.New(false) + consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetDefaultConfig(), logger, "test", 512) + consumer.AddDefaultRoute(fl) + consumer.AddDroppedRoute(fl) + + // prepare dns query + dnsquestion := []byte{88, 27, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 15, 100, 110, 115, 116, 97, 112, + 99, 111, 108, 108, 101, 99, 116, 111, 114, 4, 116, 101, 115, 116, 0} + + // prepare dnstap + dt := &dnstap.Dnstap{} + dt.Type = dnstap.Dnstap_Type.Enum(1) + + dt.Message = &dnstap.Message{} + dt.Message.Type = dnstap.Message_Type.Enum(5) + dt.Message.QueryMessage = dnsquestion + + data, _ := proto.Marshal(dt) + + // start the consumer + go consumer.StartCollect() + + // add packet to consumer + consumer.GetDataChannel() <- data + + // read dns message from dnstap consumer + dm := <-fl.GetInputChannel() + if dm.DNS.MalformedPacket == false { + t.Errorf("malformed packet not detected") + } +} + +func Test_DnstapProcessor_MalformedDnsAnswer(t *testing.T) { + // run the consumer with a fake logger + fl := GetWorkerForTest(pkgconfig.DefaultBufferSize) + + // init the dnstap consumer + logger := logger.New(false) + consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetDefaultConfig(), logger, "test", 512) + consumer.AddDefaultRoute(fl) + consumer.AddDroppedRoute(fl) + + // prepare dns query + dnsanswer := []byte{46, 172, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 15, 100, 110, 115, 116, 97, 112, 99, 111, 108, 108, 101, 99, 116, + 111, 114, 4, 116, 101, 115, 116, 0, 0, 1, 0, 1, 15, 100, 110, 115, 116, 97, 112, 99, 111, 108, 108, 101, 99, 116, + 111, 114, 4, 116, 101, 115, 116, 0, 0, 1, 0, 1, 0, 0, 14, 16, 0, 4, 127, 0} + + // prepare dnstap + dt := &dnstap.Dnstap{} + dt.Type = dnstap.Dnstap_Type.Enum(1) + + dt.Message = &dnstap.Message{} + dt.Message.Type = dnstap.Message_Type.Enum(6) + dt.Message.ResponseMessage = dnsanswer + + data, _ := proto.Marshal(dt) + + // start the consumer + go consumer.StartCollect() + + // add packet to consumer + consumer.GetDataChannel() <- data + + // read dns message from dnstap consumer + dm := <-fl.GetInputChannel() + if dm.DNS.MalformedPacket == false { + t.Errorf("malformed packet not detected") + } +} + +func Test_DnstapProcessor_EmptyDnsPayload(t *testing.T) { + // run the consumer with a fake logger + fl := GetWorkerForTest(pkgconfig.DefaultBufferSize) + + // init the dnstap consumer + logger := logger.New(false) + consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetDefaultConfig(), logger, "test", 512) + consumer.AddDefaultRoute(fl) + consumer.AddDroppedRoute(fl) + + // prepare dnstap + dt := &dnstap.Dnstap{} + dt.Type = dnstap.Dnstap_Type.Enum(1) + + dt.Message = &dnstap.Message{} + dt.Message.Type = dnstap.Message_Type.Enum(5) + + data, _ := proto.Marshal(dt) + + // start the consumer + go consumer.StartCollect() + + // add packet to consumer + consumer.GetDataChannel() <- data + + // read dns message from dnstap consumer + dm := <-fl.GetInputChannel() + if dm.DNS.MalformedPacket == true { + t.Errorf("malformed packet detected, should not with empty payload") + } +} + +func Test_DnstapProcessor_DisableDNSParser(t *testing.T) { + // run the consumer with a fake logger + fl := GetWorkerForTest(pkgconfig.DefaultBufferSize) + + // init the dnstap consumer + cfg := pkgconfig.GetDefaultConfig() + cfg.Collectors.Dnstap.DisableDNSParser = true + + logger := logger.New(false) + consumer := NewDNSTapProcessor(0, "peertest", cfg, logger, "test", 512) + consumer.AddDefaultRoute(fl) + consumer.AddDroppedRoute(fl) + + // prepare dns query + dnsmsg := new(dns.Msg) + dnsmsg.SetQuestion("www.google.fr.", dns.TypeA) + dnsquestion, _ := dnsmsg.Pack() + + // prepare dnstap + dt := &dnstap.Dnstap{} + dt.Type = dnstap.Dnstap_Type.Enum(1) + + dt.Message = &dnstap.Message{} + dt.Message.Type = dnstap.Message_Type.Enum(5) + dt.Message.QueryMessage = dnsquestion + + data, _ := proto.Marshal(dt) + + // start the consumer + go consumer.StartCollect() + + // add packet to consumer + consumer.GetDataChannel() <- data + + // read dns message from dnstap consumer + dm := <-fl.GetInputChannel() + if dm.DNS.ID != 0 { + t.Errorf("DNS ID should be equal to zero: %d", dm.DNS.ID) + } +} + +// test to decode the extended part +func Test_DnstapProcessor_Extended(t *testing.T) { + // run the consumer with a fake logger + fl := GetWorkerForTest(pkgconfig.DefaultBufferSize) + + logger := logger.New(true) + var o bytes.Buffer + logger.SetOutput(&o) + + // init the dnstap consumer + cfg := pkgconfig.GetDefaultConfig() + cfg.Collectors.Dnstap.ExtendedSupport = true + + consumer := NewDNSTapProcessor(0, "peertest", cfg, logger, "test", 512) + consumer.AddDefaultRoute(fl) + consumer.AddDroppedRoute(fl) + + // prepare dns query + dnsmsg := new(dns.Msg) + dnsmsg.SetQuestion("www.google.fr.", dns.TypeA) + dnsquestion, _ := dnsmsg.Pack() + + // prepare dnstap + dt := &dnstap.Dnstap{} + dt.Type = dnstap.Dnstap_Type.Enum(1) + + dt.Message = &dnstap.Message{} + dt.Message.Type = dnstap.Message_Type.Enum(5) + dt.Message.QueryMessage = dnsquestion + + edt := &dnsutils.ExtendedDnstap{} + edt.Atags = &dnsutils.ExtendedATags{ + Tags: []string{"atags:value"}, + } + edt.OriginalDnstapExtra = []byte("originalextrafield") + edt.Normalize = &dnsutils.ExtendedNormalize{ + Tld: "org", + EtldPlusOne: "dnscollector.org", + } + edt.Filtering = &dnsutils.ExtendedFiltering{ + SampleRate: 30, + } + edtData, _ := proto.Marshal(edt) + dt.Extra = edtData + + data, _ := proto.Marshal(dt) + + // start the consumer + go consumer.StartCollect() + + // add packet to consumer + consumer.GetDataChannel() <- data + + // read dns message from dnstap consumer + dm := <-fl.GetInputChannel() + if dm.DNSTap.Extra != "originalextrafield" { + t.Errorf("invalid extra field: %s", dm.DNSTap.Extra) + } + if dm.ATags.Tags[0] != "atags:value" { + t.Errorf("invalid atags: %s", dm.ATags.Tags[0]) + } + if dm.PublicSuffix.QnameEffectiveTLDPlusOne != "dnscollector.org" { + t.Errorf("invalid etld+1: %s", dm.PublicSuffix.QnameEffectiveTLDPlusOne) + } + if dm.PublicSuffix.QnamePublicSuffix != "org" { + t.Errorf("invalid tld: %s", dm.PublicSuffix.QnamePublicSuffix) + } + if dm.Filtering.SampleRate != 30 { + t.Errorf("invalid sample rate: %d", dm.Filtering.SampleRate) + } +} + +// test for issue https://github.com/dmachard/go-dnscollector/issues/568 +func Test_DnstapProcessor_BufferLoggerIsFull(t *testing.T) { + // run the consumer with a fake logger + fl := GetWorkerForTest(pkgconfig.DefaultBufferOne) + + // redirect stdout output to bytes buffer + logsChan := make(chan logger.LogEntry, 30) + lg := logger.New(true) + lg.SetOutputChannel((logsChan)) + + // init the dnstap consumer + consumer := NewDNSTapProcessor(0, "peertest", pkgconfig.GetDefaultConfig(), lg, "test", 512) + consumer.AddDefaultRoute(fl) + consumer.AddDroppedRoute(fl) + + // prepare dns query + dnsmsg := new(dns.Msg) + dnsmsg.SetQuestion(pkgconfig.ExpectedQname+".", dns.TypeA) + dnsquestion, _ := dnsmsg.Pack() + + // prepare dnstap + dt := &dnstap.Dnstap{} + dt.Type = dnstap.Dnstap_Type.Enum(1) + + dt.Message = &dnstap.Message{} + dt.Message.Type = dnstap.Message_Type.Enum(5) + dt.Message.QueryMessage = dnsquestion + + data, _ := proto.Marshal(dt) + + // start the consumer + go consumer.StartCollect() + + // add packets to consumer + for i := 0; i < 512; i++ { + consumer.GetDataChannel() <- data + } + + // waiting monitor to run in consumer + time.Sleep(12 * time.Second) + + for entry := range logsChan { + fmt.Println(entry) + pattern := regexp.MustCompile(pkgconfig.ExpectedBufferMsg511) + if pattern.MatchString(entry.Message) { + break + } + } + + // read dns message from dnstap consumer + dm := <-fl.GetInputChannel() + if dm.DNS.Qname != pkgconfig.ExpectedQname { + t.Errorf("invalid qname in dns message: %s", dm.DNS.Qname) + } + + // send second shot of packets to consumer + for i := 0; i < 1024; i++ { + consumer.GetDataChannel() <- data + } + + // waiting monitor to run in consumer + time.Sleep(12 * time.Second) + + for entry := range logsChan { + fmt.Println(entry) + pattern := regexp.MustCompile(pkgconfig.ExpectedBufferMsg1023) + if pattern.MatchString(entry.Message) { + break + } + } + + // read dns message from dnstap consumer + dm2 := <-fl.GetInputChannel() + if dm2.DNS.Qname != pkgconfig.ExpectedQname { + t.Errorf("invalid qname in second dns message: %s", dm2.DNS.Qname) + } +} diff --git a/workers/elasticsearch.go b/workers/elasticsearch.go new file mode 100644 index 00000000..b74fc4ad --- /dev/null +++ b/workers/elasticsearch.go @@ -0,0 +1,257 @@ +package workers + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "path" + "time" + + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" + + "net/http" + "net/url" +) + +type ElasticSearchClient struct { + *GenericWorker + server, index, bulkURL string + httpClient *http.Client +} + +func NewElasticSearchClient(config *pkgconfig.Config, console *logger.Logger, name string) *ElasticSearchClient { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.ElasticSearchClient.ChannelBufferSize > 0 { + bufSize = config.Loggers.ElasticSearchClient.ChannelBufferSize + } + w := &ElasticSearchClient{GenericWorker: NewGenericWorker(config, console, name, "elasticsearch", bufSize, pkgconfig.DefaultMonitor)} + w.ReadConfig() + w.httpClient = &http.Client{Timeout: 5 * time.Second} + return w +} + +func (w *ElasticSearchClient) ReadConfig() { + + if w.GetConfig().Loggers.ElasticSearchClient.Compression != pkgconfig.CompressNone { + w.LogInfo(w.GetConfig().Loggers.ElasticSearchClient.Compression) + switch w.GetConfig().Loggers.ElasticSearchClient.Compression { + case pkgconfig.CompressGzip: + w.LogInfo("gzip compression is enabled") + default: + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] elasticsearch - invalid compress mode: ", w.GetConfig().Loggers.ElasticSearchClient.Compression) + } + } + + w.server = w.GetConfig().Loggers.ElasticSearchClient.Server + w.index = w.GetConfig().Loggers.ElasticSearchClient.Index + + u, err := url.Parse(w.server) + if err != nil { + w.LogError(err.Error()) + } + u.Path = path.Join(u.Path, w.index, "_bulk") + w.bulkURL = u.String() +} + +func (w *ElasticSearchClient) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // goroutine to process transformed dns messages + go w.StartLogging() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + return + + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("input channel closed!") + return + } + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} + +func (w *ElasticSearchClient) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + // create a new encoder that writes to the buffer + buffer := bytes.NewBuffer(make([]byte, 0, w.GetConfig().Loggers.ElasticSearchClient.BulkSize)) + encoder := json.NewEncoder(buffer) + + flushInterval := time.Duration(w.GetConfig().Loggers.ElasticSearchClient.FlushInterval) * time.Second + flushTimer := time.NewTimer(flushInterval) + + dataBuffer := make(chan []byte, w.GetConfig().Loggers.ElasticSearchClient.BulkChannelSize) + go func() { + for data := range dataBuffer { + var err error + if w.GetConfig().Loggers.ElasticSearchClient.Compression == pkgconfig.CompressGzip { + err = w.sendCompressedBulk(data) + } else { + err = w.sendBulk(data) + } + if err != nil { + w.LogError("error sending bulk data: %v", err) + } + } + }() + + for { + select { + case <-w.OnLoggerStopped(): + close(dataBuffer) + return + + // incoming dns message to process + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("output channel closed!") + return + } + + // append dns message to buffer + flat, err := dm.Flatten() + if err != nil { + w.LogError("flattening DNS message failed: %e", err) + } + buffer.WriteString("{ \"create\" : {}}\n") + encoder.Encode(flat) + + // Send data and reset buffer + if buffer.Len() >= w.GetConfig().Loggers.ElasticSearchClient.BulkSize { + bufCopy := make([]byte, buffer.Len()) + buffer.Read(bufCopy) + buffer.Reset() + + select { + case dataBuffer <- bufCopy: + default: + w.LogError("Send buffer is full, bulk dropped") + } + } + + // flush the buffer every ? + case <-flushTimer.C: + + // Send data and reset buffer + if buffer.Len() > 0 { + bufCopy := make([]byte, buffer.Len()) + buffer.Read(bufCopy) + buffer.Reset() + + select { + case dataBuffer <- bufCopy: + default: + w.LogError("automatic flush, send buffer is full, bulk dropped") + } + } + + // restart timer + flushTimer.Reset(flushInterval) + } + } +} + +func (w *ElasticSearchClient) sendBulk(bulk []byte) error { + // Create a new HTTP request + req, err := http.NewRequest("POST", w.bulkURL, bytes.NewReader(bulk)) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + + // Send the request using the HTTP client + resp, err := w.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + // Check the response status code + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + return nil +} + +func (w *ElasticSearchClient) sendCompressedBulk(bulk []byte) error { + var compressedBulk bytes.Buffer + gzipWriter := gzip.NewWriter(&compressedBulk) + + // Write the uncompressed data to the gzip writer + _, err := gzipWriter.Write(bulk) + if err != nil { + fmt.Println("gzip", err) + return err + } + + // Close the gzip writer to flush any remaining data + err = gzipWriter.Close() + if err != nil { + return err + } + + // Create a new HTTP request + req, err := http.NewRequest("POST", w.bulkURL, &compressedBulk) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Content-Encoding", "gzip") // Set Content-Encoding header to gzip + + // Send the request using the HTTP client + resp, err := w.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + // Check the response status code + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + return nil +} diff --git a/loggers/elasticsearch_test.go b/workers/elasticsearch_test.go similarity index 70% rename from loggers/elasticsearch_test.go rename to workers/elasticsearch_test.go index a2f141c4..3bc20ed7 100644 --- a/loggers/elasticsearch_test.go +++ b/workers/elasticsearch_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" @@ -8,6 +8,7 @@ import ( "net/http" "strings" "testing" + "time" "github.com/dmachard/go-dnscollector/dnsutils" "github.com/dmachard/go-dnscollector/pkgconfig" @@ -15,7 +16,7 @@ import ( "github.com/stretchr/testify/assert" ) -func Test_ElasticSearchClient(t *testing.T) { +func Test_ElasticSearchClient_BulkSize_Exceeded(t *testing.T) { testcases := []struct { mode string @@ -24,12 +25,12 @@ func Test_ElasticSearchClient(t *testing.T) { }{ { mode: pkgconfig.ModeFlatJSON, - bulkSize: 10, - inputSize: 500, + bulkSize: 1024, + inputSize: 15, }, } - fakeRcvr, err := net.Listen("tcp", "127.0.0.1:9200") + fakeRcvr, err := net.Listen("tcp", "127.0.0.1:59200") if err != nil { t.Fatal(err) } @@ -37,12 +38,14 @@ func Test_ElasticSearchClient(t *testing.T) { for _, tc := range testcases { t.Run(tc.mode, func(t *testing.T) { - conf := pkgconfig.GetFakeConfig() + conf := pkgconfig.GetDefaultConfig() conf.Loggers.ElasticSearchClient.Index = "indexname" + conf.Loggers.ElasticSearchClient.Server = "http://127.0.0.1:59200/" conf.Loggers.ElasticSearchClient.BulkSize = tc.bulkSize + conf.Loggers.ElasticSearchClient.BulkChannelSize = 50 g := NewElasticSearchClient(conf, logger.New(false), "test") - go g.Run() + go g.StartCollect() dm := dnsutils.GetFakeDNSMessage() @@ -50,7 +53,8 @@ func Test_ElasticSearchClient(t *testing.T) { g.GetInputChannel() <- dm } - for i := 0; i < tc.inputSize/tc.bulkSize; i++ { + totalDm := 0 + for i := 0; i < tc.inputSize; i++ { // accept conn conn, err := fakeRcvr.Accept() if err != nil { @@ -60,7 +64,7 @@ func Test_ElasticSearchClient(t *testing.T) { // read and parse http request on server side connReader := bufio.NewReader(conn) - connReaderT := bufio.NewReaderSize(connReader, tc.bulkSize*100000) + connReaderT := bufio.NewReaderSize(connReader, tc.bulkSize*2) request, err := http.ReadRequest(connReaderT) if err != nil { t.Fatal(err) @@ -74,6 +78,7 @@ func Test_ElasticSearchClient(t *testing.T) { } scanner := bufio.NewScanner(strings.NewReader(string(payload))) + cnt := 0 for scanner.Scan() { if cnt%2 == 0 { @@ -84,18 +89,18 @@ func Test_ElasticSearchClient(t *testing.T) { var bulkDm dnsutils.DNSMessage err := json.Unmarshal(scanner.Bytes(), &bulkDm) assert.NoError(t, err) + totalDm += 1 } cnt++ } - assert.Equal(t, tc.bulkSize*2, cnt) - assert.Equal(t, "http://127.0.0.1:9200/indexname/_bulk", g.bulkURL) } + assert.Equal(t, tc.inputSize, totalDm) }) } } -func Test_ElasticSearchClientFlushINterval(t *testing.T) { +func Test_ElasticSearchClient_FlushInterval_Exceeded(t *testing.T) { testcases := []struct { mode string @@ -105,34 +110,41 @@ func Test_ElasticSearchClientFlushINterval(t *testing.T) { }{ { mode: pkgconfig.ModeFlatJSON, - bulkSize: 100, - inputSize: 99, + bulkSize: 1048576, + inputSize: 50, flushInterval: 5, }, } - fakeRcvr, err := net.Listen("tcp", "127.0.0.1:9200") + fakeRcvr, err := net.Listen("tcp", "127.0.0.1:59200") if err != nil { t.Fatal(err) } defer fakeRcvr.Close() for _, tc := range testcases { + totalDm := 0 t.Run(tc.mode, func(t *testing.T) { - conf := pkgconfig.GetFakeConfig() + conf := pkgconfig.GetDefaultConfig() conf.Loggers.ElasticSearchClient.Index = "indexname" + conf.Loggers.ElasticSearchClient.Server = "http://127.0.0.1:59200/" conf.Loggers.ElasticSearchClient.BulkSize = tc.bulkSize conf.Loggers.ElasticSearchClient.FlushInterval = tc.flushInterval - g := NewElasticSearchClient(conf, logger.New(false), "test") + g := NewElasticSearchClient(conf, logger.New(true), "test") - go g.Run() + // run logger + go g.StartCollect() + time.Sleep(1 * time.Second) + // send DNSmessage dm := dnsutils.GetFakeDNSMessage() - for i := 0; i < tc.inputSize; i++ { g.GetInputChannel() <- dm } + time.Sleep(6 * time.Second) + // accept the new connection from logger + // the connection should contains all packets conn, err := fakeRcvr.Accept() if err != nil { t.Fatal(err) @@ -140,7 +152,7 @@ func Test_ElasticSearchClientFlushINterval(t *testing.T) { defer conn.Close() connReader := bufio.NewReader(conn) - connReaderT := bufio.NewReaderSize(connReader, tc.bulkSize*100000) + connReaderT := bufio.NewReaderSize(connReader, tc.bulkSize*2) request, err := http.ReadRequest(connReaderT) if err != nil { t.Fatal(err) @@ -150,7 +162,7 @@ func Test_ElasticSearchClientFlushINterval(t *testing.T) { // read payload from request body payload, err := io.ReadAll(request.Body) if err != nil { - t.Fatal(err) + t.Fatal("no body in request:", err) } scanner := bufio.NewScanner(strings.NewReader(string(payload))) @@ -164,13 +176,14 @@ func Test_ElasticSearchClientFlushINterval(t *testing.T) { var bulkDm dnsutils.DNSMessage err := json.Unmarshal(scanner.Bytes(), &bulkDm) assert.NoError(t, err) + totalDm += 1 } cnt++ } - assert.Equal(t, tc.inputSize*2, cnt) - assert.Equal(t, "http://127.0.0.1:9200/indexname/_bulk", g.bulkURL) + g.Stop() }) + assert.Equal(t, tc.inputSize, totalDm) } } diff --git a/workers/falco.go b/workers/falco.go new file mode 100644 index 00000000..8f95fb95 --- /dev/null +++ b/workers/falco.go @@ -0,0 +1,118 @@ +package workers + +import ( + "bytes" + "encoding/json" + "net/http" + "time" + + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" +) + +type FalcoClient struct { + *GenericWorker +} + +func NewFalcoClient(config *pkgconfig.Config, console *logger.Logger, name string) *FalcoClient { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.FalcoClient.ChannelBufferSize > 0 { + bufSize = config.Loggers.FalcoClient.ChannelBufferSize + } + w := &FalcoClient{GenericWorker: NewGenericWorker(config, console, name, "falco", bufSize, pkgconfig.DefaultMonitor)} + w.ReadConfig() + return w +} + +func (w *FalcoClient) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // goroutine to process transformed dns messages + go w.StartLogging() + + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("input channel closed!") + return + } + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} + +func (w *FalcoClient) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + buffer := new(bytes.Buffer) + + for { + select { + case <-w.OnLoggerStopped(): + return + + // incoming dns message to process + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("output channel closed!") + return + } + + // encode + json.NewEncoder(buffer).Encode(dm) + + req, _ := http.NewRequest("POST", w.GetConfig().Loggers.FalcoClient.URL, buffer) + req.Header.Set("Content-Type", "application/json") + client := &http.Client{ + Timeout: 5 * time.Second, + } + _, err := client.Do(req) + if err != nil { + w.LogError(err.Error()) + } + + // finally reset the buffer for next iter + buffer.Reset() + } + } +} diff --git a/loggers/falco_test.go b/workers/falco_test.go similarity index 94% rename from loggers/falco_test.go rename to workers/falco_test.go index 77056812..b478f7fb 100644 --- a/loggers/falco_test.go +++ b/workers/falco_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" @@ -33,10 +33,10 @@ func Test_FalcoClient(t *testing.T) { for _, tc := range testcases { t.Run(tc.mode, func(t *testing.T) { - conf := pkgconfig.GetFakeConfig() + conf := pkgconfig.GetDefaultConfig() g := NewFalcoClient(conf, logger.New(false), "test") - go g.Run() + go g.StartCollect() dm := dnsutils.GetFakeDNSMessage() g.GetInputChannel() <- dm diff --git a/workers/file_ingestor.go b/workers/file_ingestor.go new file mode 100644 index 00000000..02195f91 --- /dev/null +++ b/workers/file_ingestor.go @@ -0,0 +1,416 @@ +package workers + +import ( + "errors" + "fmt" + "io" + "math" + "os" + "path/filepath" + "sync" + "time" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" + framestream "github.com/farsightsec/golang-framestream" + "github.com/fsnotify/fsnotify" + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + "github.com/google/gopacket/pcapgo" +) + +var waitFor = 10 * time.Second + +func IsValidMode(mode string) bool { + switch mode { + case + pkgconfig.ModePCAP, + pkgconfig.ModeDNSTap: + return true + } + return false +} + +type FileIngestor struct { + *GenericWorker + watcherTimers map[string]*time.Timer + dnsProcessor DNSProcessor + dnstapProcessor DNSTapProcessor + mu sync.Mutex +} + +func NewFileIngestor(next []Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *FileIngestor { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Collectors.FileIngestor.ChannelBufferSize > 0 { + bufSize = config.Collectors.FileIngestor.ChannelBufferSize + } + w := &FileIngestor{ + GenericWorker: NewGenericWorker(config, logger, name, "fileingestor", bufSize, pkgconfig.DefaultMonitor), + watcherTimers: make(map[string]*time.Timer)} + w.SetDefaultRoutes(next) + w.CheckConfig() + return w +} + +func (w *FileIngestor) CheckConfig() { + if !IsValidMode(w.GetConfig().Collectors.FileIngestor.WatchMode) { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] - invalid mode: ", w.GetConfig().Collectors.FileIngestor.WatchMode) + } + + w.LogInfo("watching directory [%s] to find [%s] files", + w.GetConfig().Collectors.FileIngestor.WatchDir, + w.GetConfig().Collectors.FileIngestor.WatchMode) +} + +func (w *FileIngestor) ProcessFile(filePath string) { + switch w.GetConfig().Collectors.FileIngestor.WatchMode { + case pkgconfig.ModePCAP: + // process file with pcap extension only + if filepath.Ext(filePath) == ".pcap" || filepath.Ext(filePath) == ".pcap.gz" { + w.LogInfo("file ready to process %s", filePath) + go w.ProcessPcap(filePath) + } + case pkgconfig.ModeDNSTap: + // process dnstap + if filepath.Ext(filePath) == ".fstrm" { + w.LogInfo("file ready to process %s", filePath) + go w.ProcessDnstap(filePath) + } + } +} + +func (w *FileIngestor) ProcessPcap(filePath string) { + // open the file + f, err := os.Open(filePath) + if err != nil { + w.LogError("unable to read file: %s", err) + return + } + defer f.Close() + + // it is a pcap file ? + pcapHandler, err := pcapgo.NewReader(f) + if err != nil { + w.LogError("unable to read pcap file: %s", err) + return + } + + fileName := filepath.Base(filePath) + w.LogInfo("processing pcap file [%s]...", fileName) + + if pcapHandler.LinkType() != layers.LinkTypeEthernet { + w.LogError("pcap file [%s] ignored: %s", filePath, pcapHandler.LinkType()) + return + } + + dnsChan := make(chan netutils.DNSPacket) + udpChan := make(chan gopacket.Packet) + tcpChan := make(chan gopacket.Packet) + fragIP4Chan := make(chan gopacket.Packet) + fragIP6Chan := make(chan gopacket.Packet) + + packetSource := gopacket.NewPacketSource(pcapHandler, pcapHandler.LinkType()) + packetSource.DecodeOptions.Lazy = true + packetSource.NoCopy = true + + // defrag ipv4 + go netutils.IPDefragger(fragIP4Chan, udpChan, tcpChan, w.GetConfig().Collectors.FileIngestor.PcapDNSPort) + // defrag ipv6 + go netutils.IPDefragger(fragIP6Chan, udpChan, tcpChan, w.GetConfig().Collectors.FileIngestor.PcapDNSPort) + // tcp assembly + go netutils.TCPAssembler(tcpChan, dnsChan, w.GetConfig().Collectors.FileIngestor.PcapDNSPort) + // udp processor + go netutils.UDPProcessor(udpChan, dnsChan, w.GetConfig().Collectors.FileIngestor.PcapDNSPort) + + go func() { + nbPackets := 0 + lastReceivedTime := time.Now() + for { + select { + case dnsPacket, noMore := <-dnsChan: + if !noMore { + goto end + } + + lastReceivedTime = time.Now() + // prepare dns message + dm := dnsutils.DNSMessage{} + dm.Init() + + dm.NetworkInfo.Family = dnsPacket.IPLayer.EndpointType().String() + dm.NetworkInfo.QueryIP = dnsPacket.IPLayer.Src().String() + dm.NetworkInfo.ResponseIP = dnsPacket.IPLayer.Dst().String() + dm.NetworkInfo.QueryPort = dnsPacket.TransportLayer.Src().String() + dm.NetworkInfo.ResponsePort = dnsPacket.TransportLayer.Dst().String() + dm.NetworkInfo.Protocol = dnsPacket.TransportLayer.EndpointType().String() + dm.NetworkInfo.IPDefragmented = dnsPacket.IPDefragmented + dm.NetworkInfo.TCPReassembled = dnsPacket.TCPReassembled + + dm.DNS.Payload = dnsPacket.Payload + dm.DNS.Length = len(dnsPacket.Payload) + + dm.DNSTap.Identity = w.GetConfig().GetServerIdentity() + dm.DNSTap.TimeSec = dnsPacket.Timestamp.Second() + dm.DNSTap.TimeNsec = int(dnsPacket.Timestamp.UnixNano()) + + // count it + nbPackets++ + + // send DNS message to DNS processor + w.dnsProcessor.GetInputChannel() <- dm + case <-time.After(10 * time.Second): + elapsed := time.Since(lastReceivedTime) + if elapsed >= 10*time.Second { + close(dnsChan) + } + } + } + end: + w.LogInfo("pcap file [%s]: %d DNS packet(s) detected", fileName, nbPackets) + }() + + nbPackets := 0 + for { + packet, err := packetSource.NextPacket() + + if errors.Is(err, io.EOF) { + break + } + if err != nil { + w.LogError("unable to read packet: %s", err) + break + } + + nbPackets++ + + // some security checks + if packet.NetworkLayer() == nil { + continue + } + if packet.TransportLayer() == nil { + continue + } + + // ipv4 fragmented packet ? + if packet.NetworkLayer().LayerType() == layers.LayerTypeIPv4 { + ip4 := packet.NetworkLayer().(*layers.IPv4) + if ip4.Flags&layers.IPv4MoreFragments == 1 || ip4.FragOffset > 0 { + fragIP4Chan <- packet + continue + } + } + + // ipv6 fragmented packet ? + if packet.NetworkLayer().LayerType() == layers.LayerTypeIPv6 { + v6frag := packet.Layer(layers.LayerTypeIPv6Fragment) + if v6frag != nil { + fragIP6Chan <- packet + continue + } + } + + // tcp or udp packets ? + if packet.TransportLayer().LayerType() == layers.LayerTypeUDP { + udpChan <- packet + } + if packet.TransportLayer().LayerType() == layers.LayerTypeTCP { + tcpChan <- packet + } + + } + + w.LogInfo("pcap file [%s] processing terminated, %d packet(s) read", fileName, nbPackets) + + // remove it ? + if w.GetConfig().Collectors.FileIngestor.DeleteAfter { + w.LogInfo("delete file [%s]", fileName) + os.Remove(filePath) + } + + // close chan + close(fragIP4Chan) + close(fragIP6Chan) + close(udpChan) + close(tcpChan) + + // remove event timer for this file + w.RemoveEvent(filePath) +} + +func (w *FileIngestor) ProcessDnstap(filePath string) error { + // open the file + f, err := os.Open(filePath) + if err != nil { + return err + } + defer f.Close() + + dnstapDecoder, err := framestream.NewDecoder(f, &framestream.DecoderOptions{ + ContentType: []byte("protobuf:dnstap.Dnstap"), + Bidirectional: false, + }) + + if err != nil { + return fmt.Errorf("failed to create framestream Decoder: %w", err) + } + + fileName := filepath.Base(filePath) + w.LogInfo("processing dnstap file [%s]", fileName) + for { + buf, err := dnstapDecoder.Decode() + if errors.Is(err, io.EOF) { + break + } + + newbuf := make([]byte, len(buf)) + copy(newbuf, buf) + + w.dnstapProcessor.GetDataChannel() <- newbuf + } + + // remove it ? + w.LogInfo("processing of [%s] terminated", fileName) + if w.GetConfig().Collectors.FileIngestor.DeleteAfter { + w.LogInfo("delete file [%s]", fileName) + os.Remove(filePath) + } + + // remove event timer for this file + w.RemoveEvent(filePath) + + return nil +} + +func (w *FileIngestor) RegisterEvent(filePath string) { + // Get timer. + w.mu.Lock() + t, ok := w.watcherTimers[filePath] + w.mu.Unlock() + + // No timer yet, so create one. + if !ok { + t = time.AfterFunc(math.MaxInt64, func() { w.ProcessFile(filePath) }) + t.Stop() + + w.mu.Lock() + w.watcherTimers[filePath] = t + w.mu.Unlock() + } + + // Reset the timer for this path, so it will start from 100ms again. + t.Reset(waitFor) +} + +func (w *FileIngestor) RemoveEvent(filePath string) { + w.mu.Lock() + delete(w.watcherTimers, filePath) + w.mu.Unlock() +} + +func (w *FileIngestor) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + bufSize := w.GetConfig().Global.Worker.ChannelBufferSize + if w.GetConfig().Collectors.FileIngestor.ChannelBufferSize > 0 { + bufSize = w.GetConfig().Collectors.FileIngestor.ChannelBufferSize + } + + dnsProcessor := NewDNSProcessor(w.GetConfig(), w.GetLogger(), w.GetName(), bufSize) + dnsProcessor.SetDefaultRoutes(w.GetDefaultRoutes()) + dnsProcessor.SetDefaultDropped(w.GetDroppedRoutes()) + go dnsProcessor.StartCollect() + + // start dnstap subprocessor + dnstapProcessor := NewDNSTapProcessor(0, "", w.GetConfig(), w.GetLogger(), w.GetName(), bufSize) + dnstapProcessor.SetDefaultRoutes(w.GetDefaultRoutes()) + dnstapProcessor.SetDefaultDropped(w.GetDroppedRoutes()) + go dnstapProcessor.StartCollect() + + w.dnstapProcessor = dnstapProcessor + w.dnsProcessor = dnsProcessor + + // read current folder content + entries, err := os.ReadDir(w.GetConfig().Collectors.FileIngestor.WatchDir) + if err != nil { + w.LogError("unable to read folder: %s", err) + } + + for _, entry := range entries { + // ignore folder + if entry.IsDir() { + continue + } + + // prepare filepath + fn := filepath.Join(w.GetConfig().Collectors.FileIngestor.WatchDir, entry.Name()) + + switch w.GetConfig().Collectors.FileIngestor.WatchMode { + case pkgconfig.ModePCAP: + // process file with pcap extension + if filepath.Ext(fn) == ".pcap" || filepath.Ext(fn) == ".pcap.gz" { + go w.ProcessPcap(fn) + } + case pkgconfig.ModeDNSTap: + // process dnstap + if filepath.Ext(fn) == ".fstrm" { + go w.ProcessDnstap(fn) + } + } + } + + // then watch for new one + watcher, err := fsnotify.NewWatcher() + if err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] new watcher: ", err) + } + // register the folder to watch + err = watcher.Add(w.GetConfig().Collectors.FileIngestor.WatchDir) + if err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] register folder: ", err) + } + + for { + select { + case <-w.OnStop(): + w.LogInfo("stop to listen...") + + // stop watching + watcher.Close() + + // stop processors + dnsProcessor.Stop() + dnstapProcessor.Stop() + return + + // save the new config + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.CheckConfig() + + dnsProcessor.NewConfig() <- cfg + dnstapProcessor.NewConfig() <- cfg + + case event, ok := <-watcher.Events: + if !ok { // Channel was closed (i.e. Watcher.Close() was called). + return + } + + // detect activity on file + if !event.Has(fsnotify.Create) && !event.Has(fsnotify.Write) { + continue + } + + // register the event by the name + w.RegisterEvent(event.Name) + + case err, ok := <-watcher.Errors: + if !ok { + return + } + w.LogError("error:", err) + } + } +} diff --git a/workers/file_ingestor_test.go b/workers/file_ingestor_test.go new file mode 100644 index 00000000..6ba7dcc1 --- /dev/null +++ b/workers/file_ingestor_test.go @@ -0,0 +1,54 @@ +package workers + +import ( + "testing" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" +) + +func Test_FileIngestor(t *testing.T) { + tests := []struct { + name string + watchMode string + watchDir string + }{ + { + name: "Pcap", + watchMode: "pcap", + watchDir: "./../tests/testsdata/pcap/", + }, + { + name: "Dnstap", + watchMode: "dnstap", + watchDir: "./../tests/testsdata/dnstap/", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := GetWorkerForTest(pkgconfig.DefaultBufferSize) + config := pkgconfig.GetDefaultConfig() + + // watch tests data folder + config.Collectors.FileIngestor.WatchMode = tt.watchMode + config.Collectors.FileIngestor.WatchDir = tt.watchDir + + // init collector + c := NewFileIngestor([]Worker{g}, config, logger.New(false), "test") + go c.StartCollect() + + // waiting message in channel + for { + // read dns message from channel + msg := <-g.GetInputChannel() + + // check qname + if msg.DNSTap.Operation == dnsutils.DNSTapClientQuery { + break + } + } + }) + } +} diff --git a/collectors/file_tail.go b/workers/file_tail.go similarity index 55% rename from collectors/file_tail.go rename to workers/file_tail.go index 48d7b722..23821d8c 100644 --- a/collectors/file_tail.go +++ b/workers/file_tail.go @@ -1,4 +1,4 @@ -package collectors +package workers import ( "fmt" @@ -9,122 +9,59 @@ import ( "time" "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" "github.com/dmachard/go-dnscollector/transformers" "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" "github.com/hpcloud/tail" "github.com/miekg/dns" ) type Tail struct { - doneRun chan bool - stopRun chan bool - tailf *tail.Tail - defaultRoutes []pkgutils.Worker - config *pkgconfig.Config - configChan chan *pkgconfig.Config - logger *logger.Logger - name string + *GenericWorker + tailf *tail.Tail } -func NewTail(loggers []pkgutils.Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *Tail { - logger.Info(pkgutils.PrefixLogCollector+"[%s] tail - enabled", name) - s := &Tail{ - doneRun: make(chan bool), - stopRun: make(chan bool), - config: config, - configChan: make(chan *pkgconfig.Config), - defaultRoutes: loggers, - logger: logger, - name: name, +func NewTail(next []Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *Tail { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Collectors.Tail.ChannelBufferSize > 0 { + bufSize = config.Collectors.Tail.ChannelBufferSize } - s.ReadConfig() - return s + w := &Tail{GenericWorker: NewGenericWorker(config, logger, name, "tail", bufSize, pkgconfig.DefaultMonitor)} + w.SetDefaultRoutes(next) + return w } -func (c *Tail) GetName() string { return c.name } - -func (c *Tail) AddDroppedRoute(wrk pkgutils.Worker) { - // TODO -} - -func (c *Tail) AddDefaultRoute(wrk pkgutils.Worker) { - c.defaultRoutes = append(c.defaultRoutes, wrk) -} - -func (c *Tail) SetLoggers(loggers []pkgutils.Worker) { - c.defaultRoutes = loggers -} - -func (c *Tail) Loggers() []chan dnsutils.DNSMessage { - channels := []chan dnsutils.DNSMessage{} - for _, p := range c.defaultRoutes { - channels = append(channels, p.GetInputChannel()) - } - return channels -} - -func (c *Tail) ReadConfig() {} - -func (c *Tail) ReloadConfig(config *pkgconfig.Config) { - c.LogInfo("reload configuration...") - c.configChan <- config -} - -func (c *Tail) LogInfo(msg string, v ...interface{}) { - c.logger.Info(pkgutils.PrefixLogCollector+"["+c.name+"] tail - "+msg, v...) -} - -func (c *Tail) LogError(msg string, v ...interface{}) { - c.logger.Error(pkgutils.PrefixLogCollector+"["+c.name+"] tail - "+msg, v...) -} - -func (c *Tail) GetInputChannel() chan dnsutils.DNSMessage { - return nil -} - -func (c *Tail) Stop() { - c.LogInfo("stopping collector...") - - // Stop to follow file - c.LogInfo("stop following file...") - c.tailf.Stop() - - // read done channel and block until run is terminated - c.stopRun <- true - <-c.doneRun -} - -func (c *Tail) Follow() error { +func (w *Tail) Follow() error { var err error location := tail.SeekInfo{Offset: 0, Whence: io.SeekEnd} config := tail.Config{Location: &location, ReOpen: true, Follow: true, Logger: tail.DiscardingLogger, Poll: true, MustExist: true} - c.tailf, err = tail.TailFile(c.config.Collectors.Tail.FilePath, config) + w.tailf, err = tail.TailFile(w.GetConfig().Collectors.Tail.FilePath, config) if err != nil { return err } return nil } -func (c *Tail) Run() { - c.LogInfo("starting collector...") - err := c.Follow() +func (w *Tail) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + err := w.Follow() if err != nil { - c.logger.Fatal("collector tail - unable to follow file: ", err) + w.LogFatal("collector tail - unable to follow file: ", err) } - // prepare enabled transformers - subprocessors := transformers.NewTransforms(&c.config.IngoingTransformers, c.logger, c.name, c.Loggers(), 0) + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + subprocessors := transformers.NewTransforms(&w.GetConfig().IngoingTransformers, w.GetLogger(), w.GetName(), defaultRoutes, 0) // init dns message dm := dnsutils.DNSMessage{} dm.Init() // init dns message with additionnals parts - subprocessors.InitDNSMessageFormat(&dm) - hostname, err := os.Hostname() if err == nil { dm.DNSTap.Identity = hostname @@ -132,39 +69,31 @@ func (c *Tail) Run() { dm.DNSTap.Identity = "undefined" } -RUN_LOOP: for { select { - // new config provided? - case cfg, opened := <-c.configChan: - if !opened { - return - } - c.config = cfg - c.ReadConfig() - + // save the new config + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) subprocessors.ReloadConfig(&cfg.IngoingTransformers) - case <-c.stopRun: - // cleanup transformers + case <-w.OnStop(): + w.LogInfo("stopping...") subprocessors.Reset() + return - c.doneRun <- true - break RUN_LOOP - - case line := <-c.tailf.Lines: + case line := <-w.tailf.Lines: var matches []string var re *regexp.Regexp - if len(c.config.Collectors.Tail.PatternQuery) > 0 { - re = regexp.MustCompile(c.config.Collectors.Tail.PatternQuery) + if len(w.GetConfig().Collectors.Tail.PatternQuery) > 0 { + re = regexp.MustCompile(w.GetConfig().Collectors.Tail.PatternQuery) matches = re.FindStringSubmatch(line.Text) dm.DNS.Type = dnsutils.DNSQuery dm.DNSTap.Operation = dnsutils.DNSTapOperationQuery } - if len(c.config.Collectors.Tail.PatternReply) > 0 && len(matches) == 0 { - re = regexp.MustCompile(c.config.Collectors.Tail.PatternReply) + if len(w.GetConfig().Collectors.Tail.PatternReply) > 0 && len(matches) == 0 { + re = regexp.MustCompile(w.GetConfig().Collectors.Tail.PatternReply) matches = re.FindStringSubmatch(line.Text) dm.DNS.Type = dnsutils.DNSReply dm.DNSTap.Operation = dnsutils.DNSTapOperationReply @@ -182,7 +111,7 @@ RUN_LOOP: var t time.Time timestampIndex := re.SubexpIndex("timestamp") if timestampIndex != -1 { - t, err = time.Parse(c.config.Collectors.Tail.TimeLayout, matches[timestampIndex]) + t, err = time.Parse(w.GetConfig().Collectors.Tail.TimeLayout, matches[timestampIndex]) if err != nil { continue } @@ -230,14 +159,14 @@ RUN_LOOP: if familyIndex != -1 { dm.NetworkInfo.Family = matches[familyIndex] } else { - dm.NetworkInfo.Family = netlib.ProtoIPv4 + dm.NetworkInfo.Family = netutils.ProtoIPv4 } protocolIndex := re.SubexpIndex("protocol") if protocolIndex != -1 { dm.NetworkInfo.Protocol = matches[protocolIndex] } else { - dm.NetworkInfo.Protocol = netlib.ProtoUDP + dm.NetworkInfo.Protocol = netutils.ProtoUDP } lengthIndex := re.SubexpIndex("length") @@ -293,18 +222,21 @@ RUN_LOOP: dm.DNS.Payload, _ = dnspkt.Pack() dm.DNS.Length = len(dm.DNS.Payload) + // count output packets + w.CountEgressTraffic() + // apply all enabled transformers - if subprocessors.ProcessMessage(&dm) == transformers.ReturnDrop { + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) continue } - // dispatch dns message to connected loggers - chanLoggers := c.Loggers() - for i := range chanLoggers { - chanLoggers[i] <- dm - } + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) } } - - c.LogInfo("run terminated") } diff --git a/collectors/file_tail_test.go b/workers/file_tail_test.go similarity index 86% rename from collectors/file_tail_test.go rename to workers/file_tail_test.go index 10ed7138..49f91a7e 100644 --- a/collectors/file_tail_test.go +++ b/workers/file_tail_test.go @@ -1,4 +1,4 @@ -package collectors +package workers import ( "bufio" @@ -8,7 +8,6 @@ import ( "time" "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" "github.com/dmachard/go-logger" ) @@ -21,18 +20,18 @@ func TestTailRun(t *testing.T) { defer os.Remove(tmpFile.Name()) // clean up // config - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() config.Collectors.Tail.TimeLayout = "2006-01-02T15:04:05.999999999Z07:00" config.Collectors.Tail.FilePath = tmpFile.Name() config.Collectors.Tail.PatternQuery = "^(?P[^ ]*) (?P[^ ]*) (?P.*_QUERY) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*) (?P[^ ]*)b (?P[^ ]*) (?P[^ ]*) (?P[^ ]*)$" // init collector - g := pkgutils.NewFakeLogger() - c := NewTail([]pkgutils.Worker{g}, config, logger.New(false), "test") + g := GetWorkerForTest(pkgconfig.DefaultBufferSize) + c := NewTail([]Worker{g}, config, logger.New(false), "test") if err := c.Follow(); err != nil { t.Errorf("collector tail following error: %e", err) } - go c.Run() + go c.StartCollect() // write fake log time.Sleep(5 * time.Second) diff --git a/workers/fluentd.go b/workers/fluentd.go new file mode 100644 index 00000000..3cf5c58e --- /dev/null +++ b/workers/fluentd.go @@ -0,0 +1,286 @@ +package workers + +import ( + "crypto/tls" + "strconv" + "time" + + "github.com/IBM/fluent-forward-go/fluent/client" + "github.com/IBM/fluent-forward-go/fluent/protocol" + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" +) + +type FluentdClient struct { + *GenericWorker + transport string + fluentConn *client.Client + transportReady, transportReconnect chan bool + writerReady bool +} + +func NewFluentdClient(config *pkgconfig.Config, logger *logger.Logger, name string) *FluentdClient { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.Fluentd.ChannelBufferSize > 0 { + bufSize = config.Loggers.Fluentd.ChannelBufferSize + } + w := &FluentdClient{GenericWorker: NewGenericWorker(config, logger, name, "fluentd", bufSize, pkgconfig.DefaultMonitor)} + w.transportReady = make(chan bool) + w.transportReconnect = make(chan bool) + w.ReadConfig() + return w +} + +func (w *FluentdClient) ReadConfig() { + w.transport = w.GetConfig().Loggers.Fluentd.Transport + + // begin backward compatibility + if w.GetConfig().Loggers.Fluentd.TLSSupport { + w.transport = netutils.SocketTLS + } + if len(w.GetConfig().Loggers.Fluentd.SockPath) > 0 { + w.transport = netutils.SocketUnix + } +} + +func (w *FluentdClient) Disconnect() { + if w.fluentConn != nil { + w.LogInfo("closing fluentd connection") + w.fluentConn.Disconnect() + } +} + +func (w *FluentdClient) ConnectToRemote() { + for { + if w.fluentConn != nil { + w.fluentConn.Disconnect() + w.fluentConn = nil + } + + address := w.GetConfig().Loggers.Fluentd.RemoteAddress + ":" + strconv.Itoa(w.GetConfig().Loggers.Fluentd.RemotePort) + connTimeout := time.Duration(w.GetConfig().Loggers.Fluentd.ConnectTimeout) * time.Second + + // make the connection + var c *client.Client + var err error + + switch w.transport { + case netutils.SocketUnix: + address = w.GetConfig().Loggers.Fluentd.RemoteAddress + if len(w.GetConfig().Loggers.Fluentd.SockPath) > 0 { + address = w.GetConfig().Loggers.Fluentd.SockPath + } + w.LogInfo("connecting to %s://%s", w.transport, address) + c = client.New(client.ConnectionOptions{ + Factory: &client.ConnFactory{ + Network: "unix", + Address: address, + }, + ConnectionTimeout: connTimeout, + }) + + case netutils.SocketTCP: + w.LogInfo("connecting to %s://%s", w.transport, address) + c = client.New(client.ConnectionOptions{ + Factory: &client.ConnFactory{ + Network: "tcp", + Address: address, + }, + ConnectionTimeout: connTimeout, + }) + + case netutils.SocketTLS: + w.LogInfo("connecting to %s://%s", w.transport, address) + + var tlsConfig *tls.Config + + tlsOptions := netutils.TLSOptions{ + InsecureSkipVerify: w.GetConfig().Loggers.Fluentd.TLSInsecure, + MinVersion: w.GetConfig().Loggers.Fluentd.TLSMinVersion, + CAFile: w.GetConfig().Loggers.Fluentd.CAFile, + CertFile: w.GetConfig().Loggers.Fluentd.CertFile, + KeyFile: w.GetConfig().Loggers.Fluentd.KeyFile, + } + tlsConfig, _ = netutils.TLSClientConfig(tlsOptions) + + c = client.New(client.ConnectionOptions{ + Factory: &client.ConnFactory{ + Network: "tcp+tls", + Address: address, + TLSConfig: tlsConfig, + }, + ConnectionTimeout: connTimeout, + }) + + default: + w.LogFatal("logger=fluent - invalid transport:", w.transport) + } + + // something is wrong during connection ? + err = c.Connect() + if err != nil { + w.LogError("connect error: %s", err) + w.LogInfo("retry to connect in %d seconds", w.GetConfig().Loggers.Fluentd.RetryInterval) + time.Sleep(time.Duration(w.GetConfig().Loggers.Fluentd.RetryInterval) * time.Second) + continue + } + + // save current connection + w.fluentConn = c + + // block until framestream is ready + w.transportReady <- true + + // block until an error occurred, need to reconnect + w.transportReconnect <- true + } +} + +func (w *FluentdClient) FlushBuffer(buf *[]dnsutils.DNSMessage) { + + entries := []protocol.EntryExt{} + + for _, dm := range *buf { + // Convert DNSMessage to map[] + flatDm, _ := dm.Flatten() + + // get timestamp from DNSMessage + timestamp, _ := time.Parse(time.RFC3339, dm.DNSTap.TimestampRFC3339) + + // append DNSMessage to the list + entries = append(entries, protocol.EntryExt{ + Timestamp: protocol.EventTime{Time: timestamp}, + Record: flatDm, + }) + } + + // send all entries with tag, check error on write ? + err := w.fluentConn.SendForward(w.GetConfig().Loggers.Fluentd.Tag, entries) + if err != nil { + w.LogError("forward fluent error", err.Error()) + w.writerReady = false + <-w.transportReconnect + } + + // reset buffer + *buf = nil +} + +func (w *FluentdClient) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // goroutine to process transformed dns messages + go w.StartLogging() + + // init remote conn + go w.ConnectToRemote() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("input channel closed!") + return + } + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} + +func (w *FluentdClient) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + // init buffer + bufferDm := []dnsutils.DNSMessage{} + + // init flust timer for buffer + flushInterval := time.Duration(w.GetConfig().Loggers.Fluentd.FlushInterval) * time.Second + flushTimer := time.NewTimer(flushInterval) + + for { + select { + case <-w.OnLoggerStopped(): + return + + case <-w.transportReady: + w.LogInfo("connected with remote side") + w.writerReady = true + + // incoming dns message to process + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("output channel closed!") + return + } + + // drop dns message if the connection is not ready to avoid memory leak or + // to block the channel + if !w.writerReady { + continue + } + + // append dns message to buffer + bufferDm = append(bufferDm, dm) + + // buffer is full ? + if len(bufferDm) >= w.GetConfig().Loggers.Fluentd.BufferSize { + w.FlushBuffer(&bufferDm) + } + + // flush the buffer + case <-flushTimer.C: + if !w.writerReady { + bufferDm = nil + } + + if len(bufferDm) > 0 { + w.FlushBuffer(&bufferDm) + } + + // restart timer + flushTimer.Reset(flushInterval) + } + } +} diff --git a/workers/fluentd_test.go b/workers/fluentd_test.go new file mode 100644 index 00000000..06faa3ea --- /dev/null +++ b/workers/fluentd_test.go @@ -0,0 +1,131 @@ +package workers + +import ( + "bytes" + "net" + "testing" + "time" + + "github.com/IBM/fluent-forward-go/fluent/protocol" + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" + "github.com/tinylib/msgp/msgp" +) + +func Test_FluentdClient(t *testing.T) { + testcases := []struct { + name string + transport string + address string + bufferSize int + flushInterval int + }{ + { + name: "with_buffer", + transport: netutils.SocketTCP, + address: ":24224", + bufferSize: 100, + flushInterval: 1, + }, + } + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + // init logger + cfg := pkgconfig.GetDefaultConfig() + cfg.Loggers.Fluentd.FlushInterval = tc.flushInterval + cfg.Loggers.Fluentd.BufferSize = tc.bufferSize + g := NewFluentdClient(cfg, logger.New(false), "test") + + // fake msgpack receiver + fakeRcvr, err := net.Listen(tc.transport, tc.address) + if err != nil { + t.Fatal(err) + } + defer fakeRcvr.Close() + + // start the logger + go g.StartCollect() + + // accept conn from logger + conn, err := fakeRcvr.Accept() + if err != nil { + return + } + defer conn.Close() + time.Sleep(time.Second) + + // send fake dns message to logger + dm := dnsutils.GetFakeDNSMessage() + maxDm := 256 + for i := 0; i < maxDm; i++ { + g.GetInputChannel() <- dm + } + time.Sleep(time.Second) + + // read data on fake server side + nb := 0 + bytesSize := 0 + conn.SetReadDeadline(time.Now().Add(5 * time.Second)) + fullBuffer := make([]byte, 0) + for { + buf := make([]byte, 4096) + n, _ := conn.Read(buf) + if n == 0 { + break + } + bytesSize += n + fullBuffer = append(fullBuffer, buf[:n]...) + } + + // code msgpack + msgpr := msgp.NewReader(bytes.NewReader(fullBuffer[:bytesSize])) + for { + sz, err := msgpr.ReadArrayHeader() + if err != nil { + t.Errorf("decode Array Header failed: %v", err) + break + } + if sz != 3 { + t.Errorf("decode expect 3 elements: %d", sz) + break + } + tag, err := msgpr.ReadString() + if err != nil { + t.Errorf("Decode tag: %v", err) + break + } + if tag != pkgconfig.ProgQname { + t.Errorf("invalid tag: %s", tag) + break + } + + entries := protocol.EntryList{} + if err = entries.DecodeMsg(msgpr); err != nil { + t.Errorf("decode Entries: %v", err) + break + } + nb += len(entries) + + options := &protocol.MessageOptions{} + if err = options.DecodeMsg(msgpr); err != nil { + t.Errorf("decode options: %v", err) + break + } + + if msgpr.Buffered() == 0 { + break + } + } + + if nb != maxDm { + t.Errorf("invalid numer of msgpack: expected=%d received=%d", maxDm, nb) + } + + // stop all + fakeRcvr.Close() + g.Stop() + }) + } +} diff --git a/workers/influxdb.go b/workers/influxdb.go new file mode 100644 index 00000000..6438f003 --- /dev/null +++ b/workers/influxdb.go @@ -0,0 +1,156 @@ +package workers + +import ( + "time" + + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" + + influxdb2 "github.com/influxdata/influxdb-client-go" + "github.com/influxdata/influxdb-client-go/api" +) + +type InfluxDBClient struct { + *GenericWorker + influxdbConn influxdb2.Client + writeAPI api.WriteAPI +} + +func NewInfluxDBClient(config *pkgconfig.Config, logger *logger.Logger, name string) *InfluxDBClient { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.InfluxDB.ChannelBufferSize > 0 { + bufSize = config.Loggers.InfluxDB.ChannelBufferSize + } + w := &InfluxDBClient{GenericWorker: NewGenericWorker(config, logger, name, "influxdb", bufSize, pkgconfig.DefaultMonitor)} + w.ReadConfig() + return w +} + +func (w *InfluxDBClient) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // goroutine to process transformed dns messages + go w.StartLogging() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("input channel closed!") + return + } + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} + +func (w *InfluxDBClient) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + // prepare options for influxdb + opts := influxdb2.DefaultOptions() + opts.SetUseGZip(true) + if w.GetConfig().Loggers.InfluxDB.TLSSupport { + tlsOptions := netutils.TLSOptions{ + InsecureSkipVerify: w.GetConfig().Loggers.InfluxDB.TLSInsecure, + MinVersion: w.GetConfig().Loggers.InfluxDB.TLSMinVersion, + CAFile: w.GetConfig().Loggers.InfluxDB.CAFile, + CertFile: w.GetConfig().Loggers.InfluxDB.CertFile, + KeyFile: w.GetConfig().Loggers.InfluxDB.KeyFile, + } + + tlsConfig, err := netutils.TLSClientConfig(tlsOptions) + if err != nil { + w.LogFatal("logger=influxdb - tls config failed:", err) + } + + opts.SetTLSConfig(tlsConfig) + } + // init the client + influxClient := influxdb2.NewClientWithOptions( + w.GetConfig().Loggers.InfluxDB.ServerURL, + w.GetConfig().Loggers.InfluxDB.AuthToken, + opts, + ) + + writeAPI := influxClient.WriteAPI( + w.GetConfig().Loggers.InfluxDB.Organization, + w.GetConfig().Loggers.InfluxDB.Bucket, + ) + + w.influxdbConn = influxClient + w.writeAPI = writeAPI + + for { + select { + case <-w.OnLoggerStopped(): + // Force all unwritten data to be sent + w.writeAPI.Flush() + // Ensures background processes finishes + w.influxdbConn.Close() + return + + // incoming dns message to process + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("output channel closed!") + return + } + + p := influxdb2.NewPointWithMeasurement("dns"). + AddTag("Identity", dm.DNSTap.Identity). + AddTag("QueryIP", dm.NetworkInfo.QueryIP). + AddTag("Qname", dm.DNS.Qname). + AddField("Operation", dm.DNSTap.Operation). + AddField("Family", dm.NetworkInfo.Family). + AddField("Protocol", dm.NetworkInfo.Protocol). + AddField("Qtype", dm.DNS.Qtype). + AddField("Rcode", dm.DNS.Rcode). + SetTime(time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec))) + + // write asynchronously + w.writeAPI.WritePoint(p) + } + } +} diff --git a/loggers/influxdb_test.go b/workers/influxdb_test.go similarity index 80% rename from loggers/influxdb_test.go rename to workers/influxdb_test.go index 78233cbf..5efd2b1d 100644 --- a/loggers/influxdb_test.go +++ b/workers/influxdb_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" @@ -8,24 +8,24 @@ import ( "testing" "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" ) func Test_InfluxDB(t *testing.T) { // init logger - g := NewInfluxDBClient(pkgconfig.GetFakeConfig(), logger.New(false), "test") + g := NewInfluxDBClient(pkgconfig.GetDefaultConfig(), logger.New(false), "test") // fake msgpack receiver - fakeRcvr, err := net.Listen(netlib.SocketTCP, "127.0.0.1:8086") + fakeRcvr, err := net.Listen(netutils.SocketTCP, "127.0.0.1:8086") if err != nil { t.Fatal(err) } defer fakeRcvr.Close() // start the logger - go g.Run() + go g.StartCollect() // send fake dns message to logger dm := dnsutils.GetFakeDNSMessage() diff --git a/workers/kafkaproducer.go b/workers/kafkaproducer.go new file mode 100644 index 00000000..a149a524 --- /dev/null +++ b/workers/kafkaproducer.go @@ -0,0 +1,328 @@ +package workers + +import ( + "bytes" + "context" + "encoding/json" + "strconv" + "strings" + "time" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" + "github.com/segmentio/kafka-go" + "github.com/segmentio/kafka-go/compress" + "github.com/segmentio/kafka-go/sasl/plain" + "github.com/segmentio/kafka-go/sasl/scram" +) + +type KafkaProducer struct { + *GenericWorker + textFormat []string + kafkaConn *kafka.Conn + kafkaReady, kafkaReconnect chan bool + kafkaConnected bool + compressCodec compress.Codec +} + +func NewKafkaProducer(config *pkgconfig.Config, logger *logger.Logger, name string) *KafkaProducer { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.KafkaProducer.ChannelBufferSize > 0 { + bufSize = config.Loggers.KafkaProducer.ChannelBufferSize + } + w := &KafkaProducer{GenericWorker: NewGenericWorker(config, logger, name, "kafka", bufSize, pkgconfig.DefaultMonitor)} + w.kafkaReady = make(chan bool) + w.kafkaReconnect = make(chan bool) + w.ReadConfig() + return w +} + +func (w *KafkaProducer) ReadConfig() { + if len(w.GetConfig().Loggers.KafkaProducer.TextFormat) > 0 { + w.textFormat = strings.Fields(w.GetConfig().Loggers.KafkaProducer.TextFormat) + } else { + w.textFormat = strings.Fields(w.GetConfig().Global.TextFormat) + } + + if w.GetConfig().Loggers.KafkaProducer.Compression != pkgconfig.CompressNone { + switch w.GetConfig().Loggers.KafkaProducer.Compression { + case pkgconfig.CompressGzip: + w.compressCodec = &compress.GzipCodec + case pkgconfig.CompressLz4: + w.compressCodec = &compress.Lz4Codec + case pkgconfig.CompressSnappy: + w.compressCodec = &compress.SnappyCodec + case pkgconfig.CompressZstd: + w.compressCodec = &compress.ZstdCodec + case pkgconfig.CompressNone: + w.compressCodec = nil + default: + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] kafka - invalid compress mode: ", w.GetConfig().Loggers.KafkaProducer.Compression) + } + } +} + +func (w *KafkaProducer) Disconnect() { + if w.kafkaConn != nil { + w.LogInfo("closing connection") + w.kafkaConn.Close() + } +} + +func (w *KafkaProducer) ConnectToKafka(ctx context.Context, readyTimer *time.Timer) { + for { + readyTimer.Reset(time.Duration(10) * time.Second) + + if w.kafkaConn != nil { + w.kafkaConn.Close() + w.kafkaConn = nil + } + + topic := w.GetConfig().Loggers.KafkaProducer.Topic + partition := w.GetConfig().Loggers.KafkaProducer.Partition + address := w.GetConfig().Loggers.KafkaProducer.RemoteAddress + ":" + strconv.Itoa(w.GetConfig().Loggers.KafkaProducer.RemotePort) + + w.LogInfo("connecting to kafka=%s partition=%d topic=%s", address, partition, topic) + + dialer := &kafka.Dialer{ + Timeout: time.Duration(w.GetConfig().Loggers.KafkaProducer.ConnectTimeout) * time.Second, + Deadline: time.Now().Add(5 * time.Second), + DualStack: true, + } + + // enable TLS + if w.GetConfig().Loggers.KafkaProducer.TLSSupport { + tlsOptions := netutils.TLSOptions{ + InsecureSkipVerify: w.GetConfig().Loggers.KafkaProducer.TLSInsecure, + MinVersion: w.GetConfig().Loggers.KafkaProducer.TLSMinVersion, + CAFile: w.GetConfig().Loggers.KafkaProducer.CAFile, + CertFile: w.GetConfig().Loggers.KafkaProducer.CertFile, + KeyFile: w.GetConfig().Loggers.KafkaProducer.KeyFile, + } + + tlsConfig, err := netutils.TLSClientConfig(tlsOptions) + if err != nil { + w.LogFatal("logger=kafka - tls config failed:", err) + } + dialer.TLS = tlsConfig + } + + // SASL Support + if w.GetConfig().Loggers.KafkaProducer.SaslSupport { + switch w.GetConfig().Loggers.KafkaProducer.SaslMechanism { + case pkgconfig.SASLMechanismPlain: + mechanism := plain.Mechanism{ + Username: w.GetConfig().Loggers.KafkaProducer.SaslUsername, + Password: w.GetConfig().Loggers.KafkaProducer.SaslPassword, + } + dialer.SASLMechanism = mechanism + case pkgconfig.SASLMechanismScram: + mechanism, err := scram.Mechanism( + scram.SHA512, + w.GetConfig().Loggers.KafkaProducer.SaslUsername, + w.GetConfig().Loggers.KafkaProducer.SaslPassword, + ) + if err != nil { + panic(err) + } + dialer.SASLMechanism = mechanism + } + + } + + // connect + conn, err := dialer.DialLeader(ctx, "tcp", address, topic, partition) + if err != nil { + w.LogError("%s", err) + w.LogInfo("retry to connect in %d seconds", w.GetConfig().Loggers.KafkaProducer.RetryInterval) + time.Sleep(time.Duration(w.GetConfig().Loggers.KafkaProducer.RetryInterval) * time.Second) + continue + } + + w.kafkaConn = conn + + // block until is ready + w.kafkaReady <- true + w.kafkaReconnect <- true + } +} + +func (w *KafkaProducer) FlushBuffer(buf *[]dnsutils.DNSMessage) { + msgs := []kafka.Message{} + buffer := new(bytes.Buffer) + strDm := "" + + for _, dm := range *buf { + switch w.GetConfig().Loggers.KafkaProducer.Mode { + case pkgconfig.ModeText: + strDm = dm.String(w.textFormat, w.GetConfig().Global.TextFormatDelimiter, w.GetConfig().Global.TextFormatBoundary) + case pkgconfig.ModeJSON: + json.NewEncoder(buffer).Encode(dm) + strDm = buffer.String() + buffer.Reset() + case pkgconfig.ModeFlatJSON: + flat, err := dm.Flatten() + if err != nil { + w.LogError("flattening DNS message failed: %e", err) + } + json.NewEncoder(buffer).Encode(flat) + strDm = buffer.String() + buffer.Reset() + } + + msg := kafka.Message{ + Key: []byte(dm.DNSTap.Identity), + Value: []byte(strDm), + } + msgs = append(msgs, msg) + + } + + // add support for msg compression + var err error + if w.GetConfig().Loggers.KafkaProducer.Compression == pkgconfig.CompressNone { + _, err = w.kafkaConn.WriteMessages(msgs...) + } else { + _, err = w.kafkaConn.WriteCompressedMessages(w.compressCodec, msgs...) + } + + if err != nil { + w.LogError("unable to write message", err.Error()) + w.kafkaConnected = false + <-w.kafkaReconnect + } + + // reset buffer + *buf = nil +} + +func (w *KafkaProducer) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // goroutine to process transformed dns messages + go w.StartLogging() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("input channel closed!") + return + } + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} + +func (w *KafkaProducer) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + ctx, cancelKafka := context.WithCancel(context.Background()) + defer cancelKafka() // Libérez les ressources liées au contexte + + // init buffer + bufferDm := []dnsutils.DNSMessage{} + + // init flust timer for buffer + readyTimer := time.NewTimer(time.Duration(10) * time.Second) + + // init flust timer for buffer + flushInterval := time.Duration(w.GetConfig().Loggers.KafkaProducer.FlushInterval) * time.Second + flushTimer := time.NewTimer(flushInterval) + + go w.ConnectToKafka(ctx, readyTimer) + + for { + select { + case <-w.OnLoggerStopped(): + // closing kafka connection if exist + w.Disconnect() + return + + case <-readyTimer.C: + w.LogError("failed to established connection") + cancelKafka() + + case <-w.kafkaReady: + w.LogInfo("connected with success") + readyTimer.Stop() + w.kafkaConnected = true + + // incoming dns message to process + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("output channel closed!") + return + } + + // drop dns message if the connection is not ready to avoid memory leak or + // to block the channel + if !w.kafkaConnected { + continue + } + + // append dns message to buffer + bufferDm = append(bufferDm, dm) + + // buffer is full ? + if len(bufferDm) >= w.GetConfig().Loggers.KafkaProducer.BufferSize { + w.FlushBuffer(&bufferDm) + } + + // flush the buffer + case <-flushTimer.C: + if !w.kafkaConnected { + bufferDm = nil + } + + if len(bufferDm) > 0 { + w.FlushBuffer(&bufferDm) + } + + // restart timer + flushTimer.Reset(flushInterval) + } + } +} diff --git a/loggers/kafkaproducer_test.go b/workers/kafkaproducer_test.go similarity index 95% rename from loggers/kafkaproducer_test.go rename to workers/kafkaproducer_test.go index f333afd4..6ebd49e3 100644 --- a/loggers/kafkaproducer_test.go +++ b/workers/kafkaproducer_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "log" @@ -10,7 +10,7 @@ import ( "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-logger" - sarama "github.com/Shopify/sarama" + sarama "github.com/IBM/sarama" ) func Test_KafkaProducer(t *testing.T) { @@ -48,7 +48,7 @@ func Test_KafkaProducer(t *testing.T) { defer mockListener.Close() // init logger - cfg := pkgconfig.GetFakeConfig() + cfg := pkgconfig.GetDefaultConfig() cfg.Loggers.KafkaProducer.BufferSize = 0 cfg.Loggers.KafkaProducer.RemotePort = 9092 cfg.Loggers.KafkaProducer.Topic = tc.topic @@ -83,7 +83,7 @@ func Test_KafkaProducer(t *testing.T) { // start the logger g := NewKafkaProducer(cfg, logger.New(false), "test") - go g.Run() + go g.StartCollect() // wait connection time.Sleep(1 * time.Second) diff --git a/workers/logfile.go b/workers/logfile.go new file mode 100644 index 00000000..c2e8b849 --- /dev/null +++ b/workers/logfile.go @@ -0,0 +1,561 @@ +package workers + +import ( + "bufio" + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + "github.com/google/gopacket/pcapgo" + + framestream "github.com/farsightsec/golang-framestream" +) + +const ( + compressSuffix = ".gz" +) + +func IsValid(mode string) bool { + switch mode { + case + pkgconfig.ModeText, + pkgconfig.ModeJSON, + pkgconfig.ModeFlatJSON, + pkgconfig.ModePCAP, + pkgconfig.ModeDNSTap: + return true + } + return false +} + +type LogFile struct { + *GenericWorker + writerPlain *bufio.Writer + writerPcap *pcapgo.Writer + writerDnstap *framestream.Encoder + fileFd *os.File + fileSize int64 + fileDir, fileName, fileExt, filePrefix string + commpressTimer *time.Timer + textFormat []string +} + +func NewLogFile(config *pkgconfig.Config, logger *logger.Logger, name string) *LogFile { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.LogFile.ChannelBufferSize > 0 { + bufSize = config.Loggers.LogFile.ChannelBufferSize + } + w := &LogFile{GenericWorker: NewGenericWorker(config, logger, name, "file", bufSize, pkgconfig.DefaultMonitor)} + w.ReadConfig() + if err := w.OpenFile(); err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+name+"] file - unable to open output file:", err) + } + return w +} + +func (w *LogFile) ReadConfig() { + if !IsValid(w.GetConfig().Loggers.LogFile.Mode) { + w.LogFatal("["+w.GetName()+"] logger=file - invalid mode: ", w.GetConfig().Loggers.LogFile.Mode) + } + w.fileDir = filepath.Dir(w.GetConfig().Loggers.LogFile.FilePath) + w.fileName = filepath.Base(w.GetConfig().Loggers.LogFile.FilePath) + w.fileExt = filepath.Ext(w.fileName) + w.filePrefix = strings.TrimSuffix(w.fileName, w.fileExt) + + if len(w.GetConfig().Loggers.LogFile.TextFormat) > 0 { + w.textFormat = strings.Fields(w.GetConfig().Loggers.LogFile.TextFormat) + } else { + w.textFormat = strings.Fields(w.GetConfig().Global.TextFormat) + } + + w.LogInfo("running in mode: %s", w.GetConfig().Loggers.LogFile.Mode) +} + +func (w *LogFile) Cleanup() error { + if w.GetConfig().Loggers.LogFile.MaxFiles == 0 { + return nil + } + + // remove old files ? keep only max files number + entries, err := os.ReadDir(w.fileDir) + if err != nil { + return err + } + + logFiles := []int{} + for _, entry := range entries { + if entry.IsDir() { + continue + } + + // extract timestamp from filename + re := regexp.MustCompile(`^` + w.filePrefix + `-(?P\d+)` + w.fileExt) + matches := re.FindStringSubmatch(entry.Name()) + + if len(matches) == 0 { + continue + } + + // convert timestamp to int + tsIndex := re.SubexpIndex("ts") + i, err := strconv.Atoi(matches[tsIndex]) + if err != nil { + continue + } + logFiles = append(logFiles, i) + } + sort.Ints(logFiles) + + // too much log files ? + diffNB := len(logFiles) - w.GetConfig().Loggers.LogFile.MaxFiles + if diffNB > 0 { + for i := 0; i < diffNB; i++ { + filename := fmt.Sprintf("%s-%d%s", w.filePrefix, logFiles[i], w.fileExt) + f := filepath.Join(w.fileDir, filename) + if _, err := os.Stat(f); os.IsNotExist(err) { + f = filepath.Join(w.fileDir, filename+compressSuffix) + } + + // ignore errors on deletion + os.Remove(f) + } + } + + return nil +} + +func (w *LogFile) OpenFile() error { + + fd, err := os.OpenFile(w.GetConfig().Loggers.LogFile.FilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644) + if err != nil { + return err + } + w.fileFd = fd + + fileinfo, err := os.Stat(w.GetConfig().Loggers.LogFile.FilePath) + if err != nil { + return err + } + + w.fileSize = fileinfo.Size() + + switch w.GetConfig().Loggers.LogFile.Mode { + case pkgconfig.ModeText, pkgconfig.ModeJSON, pkgconfig.ModeFlatJSON: + bufferSize := 4096 + w.writerPlain = bufio.NewWriterSize(fd, bufferSize) + + case pkgconfig.ModePCAP: + w.writerPcap = pcapgo.NewWriter(fd) + if w.fileSize == 0 { + if err := w.writerPcap.WriteFileHeader(65536, layers.LinkTypeEthernet); err != nil { + return err + } + } + + case pkgconfig.ModeDNSTap: + fsOptions := &framestream.EncoderOptions{ContentType: []byte("protobuf:dnstap.Dnstap"), Bidirectional: false} + w.writerDnstap, err = framestream.NewEncoder(fd, fsOptions) + if err != nil { + return err + } + + } + + w.LogInfo("file opened with success: %s", w.GetConfig().Loggers.LogFile.FilePath) + return nil +} + +func (w *LogFile) GetMaxSize() int64 { + return int64(1024*1024) * int64(w.GetConfig().Loggers.LogFile.MaxSize) +} + +func (w *LogFile) CompressFile() { + entries, err := os.ReadDir(w.fileDir) + if err != nil { + w.LogError("unable to list all files: %s", err) + return + } + + for _, entry := range entries { + // ignore folder + if entry.IsDir() { + continue + } + + matched, _ := regexp.MatchString(`^`+w.filePrefix+`-\d+`+w.fileExt+`$`, entry.Name()) + if matched { + src := filepath.Join(w.fileDir, entry.Name()) + dst := filepath.Join(w.fileDir, entry.Name()+compressSuffix) + + fd, err := os.Open(src) + if err != nil { + w.LogError("compress - failed to open file: ", err) + continue + } + defer fd.Close() + + fi, err := os.Stat(src) + if err != nil { + w.LogError("compress - failed to stat file: ", err) + continue + } + + gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode()) + if err != nil { + w.LogError("compress - failed to open compressed file: ", err) + continue + } + defer gzf.Close() + + gz := gzip.NewWriter(gzf) + + if _, err := io.Copy(gz, fd); err != nil { + w.LogError("compress - failed to compress file: ", err) + os.Remove(dst) + continue + } + if err := gz.Close(); err != nil { + w.LogError("compress - failed to close gz writer: ", err) + os.Remove(dst) + continue + } + if err := gzf.Close(); err != nil { + w.LogError("compress - failed to close gz file: ", err) + os.Remove(dst) + continue + } + + if err := fd.Close(); err != nil { + w.LogError("compress - failed to close log file: ", err) + os.Remove(dst) + continue + } + if err := os.Remove(src); err != nil { + w.LogError("compress - failed to remove log file: ", err) + os.Remove(dst) + continue + } + + // post rotate command? + w.CompressPostRotateCommand(dst) + } + } + + w.commpressTimer.Reset(time.Duration(w.GetConfig().Loggers.LogFile.CompressInterval) * time.Second) +} + +func (w *LogFile) PostRotateCommand(filename string) { + if len(w.GetConfig().Loggers.LogFile.PostRotateCommand) > 0 { + w.LogInfo("execute postrotate command: %s", filename) + _, err := exec.Command(w.GetConfig().Loggers.LogFile.PostRotateCommand, filename).Output() + if err != nil { + w.LogError("postrotate command error: %s", err) + } else if w.GetConfig().Loggers.LogFile.PostRotateDelete { + os.Remove(filename) + } + } +} + +func (w *LogFile) CompressPostRotateCommand(filename string) { + if len(w.GetConfig().Loggers.LogFile.CompressPostCommand) > 0 { + + w.LogInfo("execute compress postrotate command: %s", filename) + _, err := exec.Command(w.GetConfig().Loggers.LogFile.CompressPostCommand, filename).Output() + if err != nil { + w.LogError("compress - postcommand error: %s", err) + } + } +} + +func (w *LogFile) FlushWriters() { + switch w.GetConfig().Loggers.LogFile.Mode { + case pkgconfig.ModeText, pkgconfig.ModeJSON, pkgconfig.ModeFlatJSON: + w.writerPlain.Flush() + case pkgconfig.ModeDNSTap: + w.writerDnstap.Flush() + } +} + +func (w *LogFile) RotateFile() error { + // close writer and existing file + w.FlushWriters() + + if w.GetConfig().Loggers.LogFile.Mode == pkgconfig.ModeDNSTap { + w.writerDnstap.Close() + } + + if err := w.fileFd.Close(); err != nil { + return err + } + + // Rename current log file + bfpath := filepath.Join(w.fileDir, fmt.Sprintf("%s-%d%s", w.filePrefix, time.Now().UnixNano(), w.fileExt)) + err := os.Rename(w.GetConfig().Loggers.LogFile.FilePath, bfpath) + if err != nil { + return err + } + + // post rotate command? + w.PostRotateCommand(bfpath) + + // keep only max files + err = w.Cleanup() + if err != nil { + w.LogError("unable to cleanup log files: %s", err) + return err + } + + // re-create new one + if err := w.OpenFile(); err != nil { + w.LogError("unable to re-create file: %s", err) + return err + } + + return nil +} + +func (w *LogFile) WriteToPcap(dm dnsutils.DNSMessage, pkt []gopacket.SerializableLayer) { + // create the packet with the layers + buf := gopacket.NewSerializeBuffer() + opts := gopacket.SerializeOptions{ + FixLengths: true, + ComputeChecksums: true, + } + for _, layer := range pkt { + layer.SerializeTo(buf, opts) + } + + // rotate pcap file ? + bufSize := len(buf.Bytes()) + + if (w.fileSize + int64(bufSize)) > w.GetMaxSize() { + if err := w.RotateFile(); err != nil { + w.LogError("failed to rotate file: %s", err) + return + } + } + + ci := gopacket.CaptureInfo{ + Timestamp: time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)), + CaptureLength: bufSize, + Length: bufSize, + } + + w.writerPcap.WritePacket(ci, buf.Bytes()) + + // increase size file + w.fileSize += int64(bufSize) +} + +func (w *LogFile) WriteToPlain(data []byte) { + dataSize := int64(len(data)) + + // rotate file ? + if (w.fileSize + dataSize) > w.GetMaxSize() { + if err := w.RotateFile(); err != nil { + w.LogError("failed to rotate file: %s", err) + return + } + } + + // write log to file + n, _ := w.writerPlain.Write(data) + + // increase size file + w.fileSize += int64(n) +} + +func (w *LogFile) WriteToDnstap(data []byte) { + dataSize := int64(len(data)) + + // rotate file ? + if (w.fileSize + dataSize) > w.GetMaxSize() { + if err := w.RotateFile(); err != nil { + w.LogError("failed to rotate file: %s", err) + return + } + } + + // write log to file + n, _ := w.writerDnstap.Write(data) + + // increase size file + w.fileSize += int64(n) +} + +func (w *LogFile) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // goroutine to process transformed dns messages + go w.StartLogging() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("input channel closed!") + return + } + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} + +func (w *LogFile) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + // prepare some timers + flushInterval := time.Duration(w.GetConfig().Loggers.LogFile.FlushInterval) * time.Second + flushTimer := time.NewTimer(flushInterval) + w.commpressTimer = time.NewTimer(time.Duration(w.GetConfig().Loggers.LogFile.CompressInterval) * time.Second) + + buffer := new(bytes.Buffer) + var data []byte + var err error + + for { + select { + case <-w.OnLoggerStopped(): + // stop timer + flushTimer.Stop() + w.commpressTimer.Stop() + + // flush writer + w.FlushWriters() + + // closing file + w.LogInfo("closing log file") + if w.GetConfig().Loggers.LogFile.Mode == pkgconfig.ModeDNSTap { + w.writerDnstap.Close() + } + w.fileFd.Close() + + return + + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("output channel closed!") + return + } + + // write to file + switch w.GetConfig().Loggers.LogFile.Mode { + + // with basic text mode + case pkgconfig.ModeText: + w.WriteToPlain(dm.Bytes(w.textFormat, + w.GetConfig().Global.TextFormatDelimiter, + w.GetConfig().Global.TextFormatBoundary)) + + var delimiter bytes.Buffer + delimiter.WriteString("\n") + w.WriteToPlain(delimiter.Bytes()) + + // with json mode + case pkgconfig.ModeFlatJSON: + flat, err := dm.Flatten() + if err != nil { + w.LogError("flattening DNS message failed: %e", err) + } + json.NewEncoder(buffer).Encode(flat) + w.WriteToPlain(buffer.Bytes()) + buffer.Reset() + + // with json mode + case pkgconfig.ModeJSON: + json.NewEncoder(buffer).Encode(dm) + w.WriteToPlain(buffer.Bytes()) + buffer.Reset() + + // with dnstap mode + case pkgconfig.ModeDNSTap: + data, err = dm.ToDNSTap(w.GetConfig().Loggers.LogFile.ExtendedSupport) + if err != nil { + w.LogError("failed to encode to DNStap protobuf: %s", err) + continue + } + w.WriteToDnstap(data) + + // with pcap mode + case pkgconfig.ModePCAP: + pkt, err := dm.ToPacketLayer() + if err != nil { + w.LogError("failed to encode to packet layer: %s", err) + continue + } + + // write the packet + w.WriteToPcap(dm, pkt) + } + + case <-flushTimer.C: + // flush writer + w.FlushWriters() + + // reset flush timer and buffer + buffer.Reset() + flushTimer.Reset(flushInterval) + + case <-w.commpressTimer.C: + if w.GetConfig().Loggers.LogFile.Compress { + w.CompressFile() + } + + } + } +} diff --git a/loggers/logfile_test.go b/workers/logfile_test.go similarity index 96% rename from loggers/logfile_test.go rename to workers/logfile_test.go index e1bb9660..f30f726b 100644 --- a/loggers/logfile_test.go +++ b/workers/logfile_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "fmt" @@ -46,7 +46,7 @@ func Test_LogFileText(t *testing.T) { defer os.Remove(f.Name()) // clean up // config - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() config.Loggers.LogFile.FilePath = f.Name() config.Loggers.LogFile.Mode = tc.mode config.Loggers.LogFile.FlushInterval = 0 @@ -55,7 +55,7 @@ func Test_LogFileText(t *testing.T) { g := NewLogFile(config, logger.New(false), "test") // start the logger - go g.Run() + go g.StartCollect() // send fake dns message to logger dm := dnsutils.GetFakeDNSMessage() @@ -89,7 +89,7 @@ func Test_LogFileWrite_PcapMode(t *testing.T) { defer os.Remove(f.Name()) // clean up // config - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() config.Loggers.LogFile.FilePath = f.Name() config.Loggers.LogFile.Mode = pkgconfig.ModePCAP diff --git a/workers/lokiclient.go b/workers/lokiclient.go new file mode 100644 index 00000000..10168737 --- /dev/null +++ b/workers/lokiclient.go @@ -0,0 +1,401 @@ +package workers + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" + "github.com/gogo/protobuf/proto" + "github.com/grafana/dskit/backoff" + "github.com/klauspost/compress/snappy" + + // go get github.com/grafana/loki/v3/pkg/logproto + "github.com/grafana/loki/v3/pkg/logproto" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/model/relabel" +) + +type LokiStream struct { + labels labels.Labels + config *pkgconfig.Config + logger *logger.Logger + stream *logproto.Stream + pushrequest *logproto.PushRequest + sizeentries int +} + +func (w *LokiStream) Init() { + // prepare stream with label name + w.stream = &logproto.Stream{} + w.stream.Labels = w.labels.String() + + // creates push request + w.pushrequest = &logproto.PushRequest{ + Streams: make([]logproto.Stream, 0, 1), + } +} + +func (w *LokiStream) ResetEntries() { + w.stream.Entries = nil + w.sizeentries = 0 + w.pushrequest.Reset() +} + +func (w *LokiStream) Encode2Proto() ([]byte, error) { + w.pushrequest.Streams = append(w.pushrequest.Streams, *w.stream) + + buf, err := proto.Marshal(w.pushrequest) + if err != nil { + fmt.Println(err) + } + buf = snappy.Encode(nil, buf) + return buf, nil +} + +type LokiClient struct { + *GenericWorker + httpclient *http.Client + textFormat []string + streams map[string]*LokiStream +} + +func NewLokiClient(config *pkgconfig.Config, logger *logger.Logger, name string) *LokiClient { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.LokiClient.ChannelBufferSize > 0 { + bufSize = config.Loggers.LokiClient.ChannelBufferSize + } + w := &LokiClient{GenericWorker: NewGenericWorker(config, logger, name, "loki", bufSize, pkgconfig.DefaultMonitor)} + w.streams = make(map[string]*LokiStream) + w.ReadConfig() + return w +} + +func (w *LokiClient) ReadConfig() { + if len(w.GetConfig().Loggers.LokiClient.TextFormat) > 0 { + w.textFormat = strings.Fields(w.GetConfig().Loggers.LokiClient.TextFormat) + } else { + w.textFormat = strings.Fields(w.GetConfig().Global.TextFormat) + } + + // tls client config + tlsOptions := netutils.TLSOptions{ + InsecureSkipVerify: w.GetConfig().Loggers.LokiClient.TLSInsecure, + MinVersion: w.GetConfig().Loggers.LokiClient.TLSMinVersion, + CAFile: w.GetConfig().Loggers.LokiClient.CAFile, + CertFile: w.GetConfig().Loggers.LokiClient.CertFile, + KeyFile: w.GetConfig().Loggers.LokiClient.KeyFile, + } + + tlsConfig, err := netutils.TLSClientConfig(tlsOptions) + if err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] loki - tls config failed:", err) + } + + // prepare http client + tr := &http.Transport{ + MaxIdleConns: 10, + IdleConnTimeout: 30 * time.Second, + DisableCompression: false, + TLSClientConfig: tlsConfig, + } + + // use proxy + if len(w.GetConfig().Loggers.LokiClient.ProxyURL) > 0 { + proxyURL, err := url.Parse(w.GetConfig().Loggers.LokiClient.ProxyURL) + if err != nil { + w.LogFatal("logger=loki - unable to parse proxy url: ", err) + } + tr.Proxy = http.ProxyURL(proxyURL) + } + + w.httpclient = &http.Client{Transport: tr} + + if w.GetConfig().Loggers.LokiClient.BasicAuthPwdFile != "" { + content, err := os.ReadFile(w.GetConfig().Loggers.LokiClient.BasicAuthPwdFile) + if err != nil { + w.LogFatal("logger=loki - unable to load password from file: ", err) + } + w.GetConfig().Loggers.LokiClient.BasicAuthPwd = string(content) + } +} + +func (w *LokiClient) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // goroutine to process transformed dns messages + go w.StartLogging() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("input channel closed!") + return + } + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} + +func (w *LokiClient) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + // prepare buffer + buffer := new(bytes.Buffer) + var byteBuffer []byte + + // prepare timers + tflushInterval := time.Duration(w.GetConfig().Loggers.LokiClient.FlushInterval) * time.Second + tflush := time.NewTimer(tflushInterval) + + for { + select { + case <-w.OnLoggerStopped(): + return + + // incoming dns message to process + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("output channel closed!") + return + } + + lbls := labels.Labels{ + labels.Label{Name: "identity", Value: dm.DNSTap.Identity}, + labels.Label{Name: "job", Value: w.GetConfig().Loggers.LokiClient.JobName}, + } + var err error + var flat map[string]interface{} + if len(w.GetConfig().Loggers.LokiClient.RelabelConfigs) > 0 { + // Save flattened JSON in case it's used when populating the message of the log entry. + // There is more room for improvement for reusing data though. Flatten() internally + // does a JSON encode of the DnsMessage, but it's not saved to use when the mode + // is JSON. + flat, err = dm.Flatten() + if err != nil { + w.LogError("flattening DNS message failed: %e", err) + } + sb := labels.NewScratchBuilder(len(lbls) + len(flat)) + sb.Assign(lbls) + for k, v := range flat { + sb.Add(fmt.Sprintf("__%s", strings.ReplaceAll(k, ".", "_")), fmt.Sprint(v)) + } + sb.Sort() + lbls, _ = relabel.Process(sb.Labels(), w.GetConfig().Loggers.LokiClient.RelabelConfigs...) + + // Drop all labels starting with __ from the map if a relabel config is used. + // These labels are just exposed to relabel for the user and should not be + // shipped to loki by default. + lb := labels.NewBuilder(lbls) + lbls.Range(func(l labels.Label) { + if l.Name[0:2] == "__" { + lb.Del(l.Name) + } + }) + lbls = lb.Labels() + + if len(lbls) == 0 { + w.LogInfo("dropping %v since it has no labels", dm) + continue + } + } + + // prepare entry + entry := logproto.Entry{} + entry.Timestamp = time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)) + + switch w.GetConfig().Loggers.LokiClient.Mode { + case pkgconfig.ModeText: + entry.Line = string(dm.Bytes(w.textFormat, + w.GetConfig().Global.TextFormatDelimiter, + w.GetConfig().Global.TextFormatBoundary)) + case pkgconfig.ModeJSON: + json.NewEncoder(buffer).Encode(dm) + entry.Line = buffer.String() + buffer.Reset() + case pkgconfig.ModeFlatJSON: + if len(flat) == 0 { + flat, err = dm.Flatten() + if err != nil { + w.LogError("flattening DNS message failed: %e", err) + } + } + json.NewEncoder(buffer).Encode(flat) + entry.Line = buffer.String() + buffer.Reset() + } + key := string(lbls.Bytes(byteBuffer)) + ls, ok := w.streams[key] + if !ok { + ls = &LokiStream{config: w.GetConfig(), logger: w.GetLogger(), labels: lbls} + ls.Init() + w.streams[key] = ls + } + ls.sizeentries += len(entry.Line) + + // append entry to the stream + ls.stream.Entries = append(ls.stream.Entries, entry) + + // flush ? + if ls.sizeentries >= w.GetConfig().Loggers.LokiClient.BatchSize { + // encode log entries + buf, err := ls.Encode2Proto() + if err != nil { + w.LogError("error encoding log entries - %v", err) + // reset push request and entries + ls.ResetEntries() + return + } + + // send all entries + w.SendEntries(buf) + + // reset entries and push request + ls.ResetEntries() + } + + case <-tflush.C: + for _, s := range w.streams { + if len(s.stream.Entries) > 0 { + // timeout + // encode log entries + buf, err := s.Encode2Proto() + if err != nil { + w.LogError("error encoding log entries - %v", err) + // reset push request and entries + s.ResetEntries() + // restart timer + tflush.Reset(tflushInterval) + return + } + + // send all entries + w.SendEntries(buf) + + // reset entries and push request + s.ResetEntries() + } + } + + // restart timer + tflush.Reset(tflushInterval) + } + } +} + +func (w *LokiClient) SendEntries(buf []byte) { + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + MinBackoff := 500 * time.Millisecond + MaxBackoff := 5 * time.Minute + MaxRetries := 10 + + backoff := backoff.New(ctx, backoff.Config{ + MaxBackoff: MaxBackoff, + MaxRetries: MaxRetries, + MinBackoff: MinBackoff, + }) + + for { + // send post http + post, err := http.NewRequest("POST", w.GetConfig().Loggers.LokiClient.ServerURL, bytes.NewReader(buf)) + if err != nil { + w.LogError("new http error: %s", err) + return + } + post = post.WithContext(ctx) + post.Header.Set("Content-Type", "application/x-protobuf") + post.Header.Set("User-Agent", w.GetConfig().GetServerIdentity()) + if len(w.GetConfig().Loggers.LokiClient.TenantID) > 0 { + post.Header.Set("X-Scope-OrgID", w.GetConfig().Loggers.LokiClient.TenantID) + } + + post.SetBasicAuth( + w.GetConfig().Loggers.LokiClient.BasicAuthLogin, + w.GetConfig().Loggers.LokiClient.BasicAuthPwd, + ) + + // send post and read response + resp, err := w.httpclient.Do(post) + if err != nil { + w.LogError("do http error: %s", err) + return + } + + // success ? + if resp.StatusCode > 0 && resp.StatusCode != 429 && resp.StatusCode/100 != 5 { + break + } + + // something is wrong, retry ? + if resp.StatusCode/100 != 2 { + scanner := bufio.NewScanner(io.LimitReader(resp.Body, 1024)) + line := "" + if scanner.Scan() { + line = scanner.Text() + } + w.LogError("server returned HTTP status %s (%d): %s", resp.Status, resp.StatusCode, line) + } + + // wait before retry + backoff.Wait() + + // Make sure it sends at least once before checking for retry. + if !backoff.Ongoing() { + break + } + } +} diff --git a/loggers/lokiclient_test.go b/workers/lokiclient_test.go similarity index 97% rename from loggers/lokiclient_test.go rename to workers/lokiclient_test.go index 0203e809..30055c88 100644 --- a/loggers/lokiclient_test.go +++ b/workers/lokiclient_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" @@ -46,13 +46,13 @@ func Test_LokiClientRun(t *testing.T) { for _, tc := range testcases { t.Run(tc.mode, func(t *testing.T) { // init logger - cfg := pkgconfig.GetFakeConfig() + cfg := pkgconfig.GetDefaultConfig() cfg.Loggers.LokiClient.Mode = tc.mode cfg.Loggers.LokiClient.BatchSize = 0 g := NewLokiClient(cfg, logger.New(false), "test") // start the logger - go g.Run() + go g.StartCollect() // send fake dns message to logger dm := dnsutils.GetFakeDNSMessage() @@ -148,14 +148,14 @@ func Test_LokiClientRelabel(t *testing.T) { for _, m := range []string{pkgconfig.ModeText, pkgconfig.ModeJSON, pkgconfig.ModeFlatJSON} { t.Run(fmt.Sprint(m, tc.relabelConfig, tc.labelsPattern), func(t *testing.T) { // init logger - cfg := pkgconfig.GetFakeConfig() + cfg := pkgconfig.GetDefaultConfig() cfg.Loggers.LokiClient.Mode = m cfg.Loggers.LokiClient.BatchSize = 0 cfg.Loggers.LokiClient.RelabelConfigs = tc.relabelConfig g := NewLokiClient(cfg, logger.New(false), "test") // start the logger - go g.Run() + go g.StartCollect() // send fake dns message to logger dm := dnsutils.GetFakeDNSMessage() diff --git a/processors/powerdns.go b/workers/powerdns.go similarity index 51% rename from processors/powerdns.go rename to workers/powerdns.go index 402ca175..b533a41f 100644 --- a/processors/powerdns.go +++ b/workers/powerdns.go @@ -1,143 +1,259 @@ -package processors +package workers import ( + "bufio" + "errors" "fmt" + "io" "net" "strconv" "strings" + "sync" + "sync/atomic" "time" "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" "github.com/dmachard/go-dnscollector/transformers" "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" powerdns_protobuf "github.com/dmachard/go-powerdns-protobuf" "github.com/miekg/dns" "google.golang.org/protobuf/proto" ) -var ( - ProtobufPowerDNSToDNSTap = map[string]string{ - "DNSQueryType": "CLIENT_QUERY", - "DNSResponseType": "CLIENT_RESPONSE", - "DNSOutgoingQueryType": "RESOLVER_QUERY", - "DNSIncomingResponseType": "RESOLVER_RESPONSE", - } -) - -type PdnsProcessor struct { - ConnID int - doneRun chan bool - stopRun chan bool - doneMonitor chan bool - stopMonitor chan bool - recvFrom chan []byte - logger *logger.Logger - config *pkgconfig.Config - ConfigChan chan *pkgconfig.Config - name string - chanSize int - RoutingHandler pkgutils.RoutingHandler - dropped chan string - droppedCount map[string]int +type PdnsServer struct { + *GenericWorker + connCounter uint64 } -func NewPdnsProcessor(connID int, config *pkgconfig.Config, logger *logger.Logger, name string, size int) PdnsProcessor { - logger.Info(pkgutils.PrefixLogProcessor+"[%s] powerdns - conn #%d - initialization...", name, connID) - d := PdnsProcessor{ - ConnID: connID, - doneMonitor: make(chan bool), - doneRun: make(chan bool), - stopMonitor: make(chan bool), - stopRun: make(chan bool), - recvFrom: make(chan []byte, size), - chanSize: size, - logger: logger, - config: config, - ConfigChan: make(chan *pkgconfig.Config), - name: name, - RoutingHandler: pkgutils.NewRoutingHandler(config, logger, name), - dropped: make(chan string), - droppedCount: map[string]int{}, +func NewPdnsServer(next []Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *PdnsServer { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Collectors.PowerDNS.ChannelBufferSize > 0 { + bufSize = config.Collectors.PowerDNS.ChannelBufferSize } - return d + w := &PdnsServer{GenericWorker: NewGenericWorker(config, logger, name, "powerdns", bufSize, pkgconfig.DefaultMonitor)} + w.SetDefaultRoutes(next) + w.CheckConfig() + return w } -func (p *PdnsProcessor) LogInfo(msg string, v ...interface{}) { - var log string - if p.ConnID == 0 { - log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] powerdns - ", p.name) - } else { - log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] powerdns - conn #%d - ", p.name, p.ConnID) +func (w *PdnsServer) CheckConfig() { + if !netutils.IsValidTLS(w.GetConfig().Collectors.PowerDNS.TLSMinVersion) { + w.LogFatal(pkgconfig.PrefixLogWorker + "[" + w.GetName() + "] invalid tls min version") } - p.logger.Info(log+msg, v...) } -func (p *PdnsProcessor) LogError(msg string, v ...interface{}) { - var log string - if p.ConnID == 0 { - log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] powerdns - ", p.name) - } else { - log = fmt.Sprintf(pkgutils.PrefixLogProcessor+"[%s] powerdns - conn #%d - ", p.name, p.ConnID) +func (w *PdnsServer) HandleConn(conn net.Conn, connID uint64, forceClose chan bool, wg *sync.WaitGroup) { + // close connection on function exit + defer func() { + w.LogInfo("conn #%d - connection handler terminated", connID) + netutils.Close(conn, w.GetConfig().Collectors.Dnstap.ResetConn) + wg.Done() + }() + + // get peer address + peer := conn.RemoteAddr().String() + peerName := netutils.GetPeerName(peer) + w.LogInfo("new connection #%d from %s (%s)", connID, peer, peerName) + + // start protobuf subprocessor + bufSize := w.GetConfig().Global.Worker.ChannelBufferSize + if w.GetConfig().Collectors.PowerDNS.ChannelBufferSize > 0 { + bufSize = w.GetConfig().Collectors.PowerDNS.ChannelBufferSize + } + pdnsProcessor := NewPdnsProcessor(int(connID), peerName, w.GetConfig(), w.GetLogger(), w.GetName(), bufSize) + pdnsProcessor.SetDefaultRoutes(w.GetDefaultRoutes()) + pdnsProcessor.SetDefaultDropped(w.GetDroppedRoutes()) + go pdnsProcessor.StartCollect() + + r := bufio.NewReader(conn) + pbs := powerdns_protobuf.NewProtobufStream(r, conn, 5*time.Second) + + var err error + var payload *powerdns_protobuf.ProtoPayload + cleanup := make(chan struct{}) + + // goroutine to close the connection properly + go func() { + defer func() { + pdnsProcessor.Stop() + w.LogInfo("conn #%d - cleanup connection handler terminated", connID) + }() + + for { + select { + case <-forceClose: + w.LogInfo("conn #%d - force to cleanup the connection handler", connID) + netutils.Close(conn, w.GetConfig().Collectors.Dnstap.ResetConn) + return + case <-cleanup: + w.LogInfo("conn #%d - cleanup the connection handler", connID) + return + } + } + }() + + for { + payload, err = pbs.RecvPayload(false) + if err != nil { + connClosed := false + + var opErr *net.OpError + if errors.As(err, &opErr) { + if errors.Is(opErr, net.ErrClosed) { + connClosed = true + } + } + if errors.Is(err, io.EOF) { + connClosed = true + } + + if connClosed { + w.LogInfo("conn #%d - connection closed with peer %s", connID, peer) + } else { + w.LogError("conn #%d - powerdns reader error: %s", connID, err) + } + + // exit goroutine + close(cleanup) + break + } + + // send payload to the channel + select { + case pdnsProcessor.GetDataChannel() <- payload.Data(): // Successful send + default: + w.WorkerIsBusy("dnstap-processor") + } } - p.logger.Error(log+msg, v...) } -func (p *PdnsProcessor) GetChannel() chan []byte { - return p.recvFrom +func (w *PdnsServer) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + var connWG sync.WaitGroup + connCleanup := make(chan bool) + cfg := w.GetConfig().Collectors.PowerDNS + + // start to listen + listener, err := netutils.StartToListen( + cfg.ListenIP, cfg.ListenPort, "", + cfg.TLSSupport, netutils.TLSVersion[cfg.TLSMinVersion], + cfg.CertFile, cfg.KeyFile) + if err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] listening failed: ", err) + } + w.LogInfo("listening on %s", listener.Addr()) + + // goroutine to Accept() blocks waiting for new connection. + acceptChan := make(chan net.Conn) + netutils.AcceptConnections(listener, acceptChan) + + // main loop + for { + select { + case <-w.OnStop(): + w.LogInfo("stop to listen...") + listener.Close() + + w.LogInfo("closing connected peers...") + close(connCleanup) + connWG.Wait() + return + + // save the new config + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.CheckConfig() + + case conn, opened := <-acceptChan: + if !opened { + return + } + + if w.GetConfig().Collectors.Dnstap.RcvBufSize > 0 { + before, actual, err := netutils.SetSockRCVBUF(conn, cfg.RcvBufSize, cfg.TLSSupport) + if err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] unable to set SO_RCVBUF: ", err) + } + w.LogInfo("set SO_RCVBUF option, value before: %d, desired: %d, actual: %d", before, cfg.RcvBufSize, actual) + } + + // handle the connection + connWG.Add(1) + connID := atomic.AddUint64(&w.connCounter, 1) + go w.HandleConn(conn, connID, connCleanup, &connWG) + + } + } } -func (p *PdnsProcessor) Stop() { - p.LogInfo("stopping processor...") - p.RoutingHandler.Stop() +var ( + ProtobufPowerDNSToDNSTap = map[string]string{ + "DNSQueryType": "CLIENT_QUERY", + "DNSResponseType": "CLIENT_RESPONSE", + "DNSOutgoingQueryType": "RESOLVER_QUERY", + "DNSIncomingResponseType": "RESOLVER_RESPONSE", + } +) - p.LogInfo("stopping to process...") - p.stopRun <- true - <-p.doneRun +type PdnsProcessor struct { + *GenericWorker + ConnID int + PeerName string + dataChannel chan []byte +} + +func NewPdnsProcessor(connID int, peerName string, config *pkgconfig.Config, logger *logger.Logger, name string, size int) PdnsProcessor { + w := PdnsProcessor{GenericWorker: NewGenericWorker(config, logger, name, "powerdns processor #"+strconv.Itoa(connID), size, pkgconfig.DefaultMonitor)} + w.ConnID = connID + w.PeerName = peerName + w.dataChannel = make(chan []byte, size) + return w +} - p.LogInfo("stopping to monitor loggers...") - p.stopMonitor <- true - <-p.doneMonitor +func (w *PdnsProcessor) GetDataChannel() chan []byte { + return w.dataChannel } -func (p *PdnsProcessor) Run(defaultWorkers []pkgutils.Worker, droppedworkers []pkgutils.Worker) { +func (w *PdnsProcessor) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + pbdm := &powerdns_protobuf.PBDNSMessage{} // prepare next channels - defaultRoutes, defaultNames := pkgutils.GetRoutes(defaultWorkers) - droppedRoutes, droppedNames := pkgutils.GetRoutes(droppedworkers) + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) // prepare enabled transformers - transforms := transformers.NewTransforms(&p.config.IngoingTransformers, p.logger, p.name, defaultRoutes, p.ConnID) - - // start goroutine to count dropped messsages - go p.MonitorLoggers() + transforms := transformers.NewTransforms(&w.GetConfig().IngoingTransformers, w.GetLogger(), w.GetName(), defaultRoutes, w.ConnID) // read incoming dns message - p.LogInfo("waiting dns message to process...") -RUN_LOOP: for { select { - case cfg := <-p.ConfigChan: - p.config = cfg + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) transforms.ReloadConfig(&cfg.IngoingTransformers) - case <-p.stopRun: + case <-w.OnStop(): transforms.Reset() - p.doneRun <- true - break RUN_LOOP + close(w.GetDataChannel()) + return - case data, opened := <-p.recvFrom: + case data, opened := <-w.GetDataChannel(): if !opened { - p.LogInfo("channel closed, exit") + w.LogInfo("channel closed, exit") return } + // count global messages + w.CountIngressTraffic() err := proto.Unmarshal(data, pbdm) if err != nil { - p.LogError("pbdm decoding, %s", err) + w.LogError("pbdm decoding, %s", err) continue } @@ -145,9 +261,6 @@ RUN_LOOP: dm := dnsutils.DNSMessage{} dm.Init() - // init dns message with additionnals parts - transforms.InitDNSMessageFormat(&dm) - // init powerdns with default values dm.PowerDNS = &dnsutils.PowerDNS{ Tags: []string{}, @@ -159,7 +272,7 @@ RUN_LOOP: dm.DNSTap.Identity = string(pbdm.GetServerIdentity()) dm.DNSTap.Operation = ProtobufPowerDNSToDNSTap[pbdm.GetType().String()] - if ipVersion, valid := netlib.IPVersion[pbdm.GetSocketFamily().String()]; valid { + if ipVersion, valid := netutils.IPVersion[pbdm.GetSocketFamily().String()]; valid { dm.NetworkInfo.Family = ipVersion } else { dm.NetworkInfo.Family = pkgconfig.StrUnknown @@ -241,6 +354,11 @@ RUN_LOOP: } pdns.Metadata = metas + // get http protocol version + if pbdm.GetSocketProtocol().String() == "DOH" { + pdns.HTTPVersion = pbdm.GetHttpVersion().String() + } + // finally set pdns to dns message dm.PowerDNS = &pdns @@ -263,7 +381,7 @@ RUN_LOOP: rr := dnsutils.DNSAnswer{ Name: RRs[j].GetName(), Rdatatype: dnsutils.RdatatypeToString(int(RRs[j].GetType())), - Class: int(RRs[j].GetClass()), + Class: dnsutils.ClassToString(int(RRs[j].GetClass())), TTL: int(RRs[j].GetTtl()), Rdata: rdata, } @@ -271,7 +389,7 @@ RUN_LOOP: } dm.DNS.DNSRRs.Answers = answers - if p.config.Collectors.PowerDNS.AddDNSPayload { + if w.GetConfig().Collectors.PowerDNS.AddDNSPayload { qname := dns.Fqdn(pbdm.Question.GetQName()) newDNS := new(dns.Msg) @@ -332,61 +450,21 @@ RUN_LOOP: } } + // count output packets + w.CountEgressTraffic() + // apply all enabled transformers - if transforms.ProcessMessage(&dm) == transformers.ReturnDrop { - for i := range droppedRoutes { - select { - case droppedRoutes[i] <- dm: // Successful send to logger channel - default: - p.dropped <- droppedNames[i] - } - } + transformResult, err := transforms.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) continue } // dispatch dns messages to connected loggers - for i := range defaultRoutes { - select { - case defaultRoutes[i] <- dm: // Successful send to logger channel - default: - p.dropped <- defaultNames[i] - } - } - } - } - p.LogInfo("processing terminated") -} - -func (p *PdnsProcessor) MonitorLoggers() { - watchInterval := 10 * time.Second - bufferFull := time.NewTimer(watchInterval) -FOLLOW_LOOP: - for { - select { - case <-p.stopMonitor: - close(p.dropped) - bufferFull.Stop() - p.doneMonitor <- true - break FOLLOW_LOOP - - case loggerName := <-p.dropped: - if _, ok := p.droppedCount[loggerName]; !ok { - p.droppedCount[loggerName] = 1 - } else { - p.droppedCount[loggerName]++ - } - - case <-bufferFull.C: - - for v, k := range p.droppedCount { - if k > 0 { - p.LogError("logger[%s] buffer is full, %d packet(s) dropped", v, k) - p.droppedCount[v] = 0 - } - } - bufferFull.Reset(watchInterval) - + w.SendForwardedTo(defaultRoutes, defaultNames, dm) } } - p.LogInfo("monitor terminated") } diff --git a/processors/powerdns_test.go b/workers/powerdns_test.go similarity index 71% rename from processors/powerdns_test.go rename to workers/powerdns_test.go index 8b72dc6d..2d8cce7b 100644 --- a/processors/powerdns_test.go +++ b/workers/powerdns_test.go @@ -1,29 +1,50 @@ -package processors +package workers import ( "fmt" + "net" "regexp" "testing" "time" "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" powerdns_protobuf "github.com/dmachard/go-powerdns-protobuf" "github.com/miekg/dns" "google.golang.org/protobuf/proto" ) +func TestPowerDNS_Run(t *testing.T) { + g := GetWorkerForTest(pkgconfig.DefaultBufferSize) + + c := NewPdnsServer([]Worker{g}, pkgconfig.GetDefaultConfig(), logger.New(false), "test") + go c.StartCollect() + + // wait before to connect + time.Sleep(1 * time.Second) + conn, err := net.Dial(netutils.SocketTCP, ":6001") + if err != nil { + t.Error("could not connect to TCP server: ", err) + } + defer conn.Close() +} + func Test_PowerDNSProcessor(t *testing.T) { + + fl := GetWorkerForTest(pkgconfig.DefaultBufferSize) + // init the dnstap consumer - consumer := NewPdnsProcessor(0, pkgconfig.GetFakeConfig(), logger.New(false), "test", 512) + consumer := NewPdnsProcessor(0, "peername", pkgconfig.GetDefaultConfig(), logger.New(false), "test", 512) + consumer.AddDefaultRoute(fl) + consumer.AddDroppedRoute(fl) // init the powerdns processor dnsQname := pkgconfig.ValidDomain dnsQuestion := powerdns_protobuf.PBDNSMessage_DNSQuestion{QName: &dnsQname} dm := &powerdns_protobuf.PBDNSMessage{} - dm.ServerIdentity = []byte(ExpectedIdentity) + dm.ServerIdentity = []byte(pkgconfig.ExpectedIdentity) dm.Type = powerdns_protobuf.PBDNSMessage_DNSQueryType.Enum() dm.SocketProtocol = powerdns_protobuf.PBDNSMessage_DNSCryptUDP.Enum() dm.SocketFamily = powerdns_protobuf.PBDNSMessage_INET.Enum() @@ -32,32 +53,36 @@ func Test_PowerDNSProcessor(t *testing.T) { data, _ := proto.Marshal(dm) // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + go consumer.StartCollect() // add packet to consumer - consumer.GetChannel() <- data + consumer.GetDataChannel() <- data // read dns message from dnstap consumer msg := <-fl.GetInputChannel() - if msg.DNSTap.Identity != ExpectedIdentity { + if msg.DNSTap.Identity != pkgconfig.ExpectedIdentity { t.Errorf("invalid identity in dns message: %s", msg.DNSTap.Identity) } } func Test_PowerDNSProcessor_AddDNSPayload_Valid(t *testing.T) { - cfg := pkgconfig.GetFakeConfig() + // run the consumer with a fake logger + fl := GetWorkerForTest(pkgconfig.DefaultBufferSize) + + cfg := pkgconfig.GetDefaultConfig() cfg.Collectors.PowerDNS.AddDNSPayload = true // init the powerdns processor - consumer := NewPdnsProcessor(0, cfg, logger.New(false), "test", 512) + consumer := NewPdnsProcessor(0, "peername", cfg, logger.New(false), "test", 512) + consumer.AddDefaultRoute(fl) + consumer.AddDroppedRoute(fl) // prepare powerdns message dnsQname := pkgconfig.ValidDomain dnsQuestion := powerdns_protobuf.PBDNSMessage_DNSQuestion{QName: &dnsQname} dm := &powerdns_protobuf.PBDNSMessage{} - dm.ServerIdentity = []byte(ExpectedIdentity) + dm.ServerIdentity = []byte(pkgconfig.ExpectedIdentity) dm.Id = proto.Uint32(2000) dm.Type = powerdns_protobuf.PBDNSMessage_DNSQueryType.Enum() dm.SocketProtocol = powerdns_protobuf.PBDNSMessage_DNSCryptUDP.Enum() @@ -67,11 +92,9 @@ func Test_PowerDNSProcessor_AddDNSPayload_Valid(t *testing.T) { data, _ := proto.Marshal(dm) // start the consumer and add packet - // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + go consumer.StartCollect() - consumer.GetChannel() <- data + consumer.GetDataChannel() <- data // read dns message msg := <-fl.GetInputChannel() @@ -96,11 +119,16 @@ func Test_PowerDNSProcessor_AddDNSPayload_Valid(t *testing.T) { } func Test_PowerDNSProcessor_AddDNSPayload_InvalidLabelLength(t *testing.T) { - cfg := pkgconfig.GetFakeConfig() + + fl := GetWorkerForTest(pkgconfig.DefaultBufferSize) + + cfg := pkgconfig.GetDefaultConfig() cfg.Collectors.PowerDNS.AddDNSPayload = true // init the dnstap consumer - consumer := NewPdnsProcessor(0, cfg, logger.New(false), "test", 512) + consumer := NewPdnsProcessor(0, "peername", cfg, logger.New(false), "test", 512) + consumer.AddDefaultRoute(fl) + consumer.AddDroppedRoute(fl) // prepare dnstap dnsQname := pkgconfig.BadDomainLabel @@ -117,11 +145,10 @@ func Test_PowerDNSProcessor_AddDNSPayload_InvalidLabelLength(t *testing.T) { data, _ := proto.Marshal(dm) // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + go consumer.StartCollect() // add packet to consumer - consumer.GetChannel() <- data + consumer.GetDataChannel() <- data // read dns message from dnstap consumer msg := <-fl.GetInputChannel() @@ -131,11 +158,16 @@ func Test_PowerDNSProcessor_AddDNSPayload_InvalidLabelLength(t *testing.T) { } func Test_PowerDNSProcessor_AddDNSPayload_QnameTooLongDomain(t *testing.T) { - cfg := pkgconfig.GetFakeConfig() + + fl := GetWorkerForTest(pkgconfig.DefaultBufferSize) + + cfg := pkgconfig.GetDefaultConfig() cfg.Collectors.PowerDNS.AddDNSPayload = true // init the dnstap consumer - consumer := NewPdnsProcessor(0, cfg, logger.New(false), "test", 512) + consumer := NewPdnsProcessor(0, "peername", cfg, logger.New(false), "test", 512) + consumer.AddDefaultRoute(fl) + consumer.AddDroppedRoute(fl) // prepare dnstap dnsQname := pkgconfig.BadVeryLongDomain @@ -151,11 +183,10 @@ func Test_PowerDNSProcessor_AddDNSPayload_QnameTooLongDomain(t *testing.T) { data, _ := proto.Marshal(dm) // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + go consumer.StartCollect() // add packet to consumer - consumer.GetChannel() <- data + consumer.GetDataChannel() <- data // read dns message from dnstap consumer msg := <-fl.GetInputChannel() @@ -165,11 +196,16 @@ func Test_PowerDNSProcessor_AddDNSPayload_QnameTooLongDomain(t *testing.T) { } func Test_PowerDNSProcessor_AddDNSPayload_AnswersTooLongDomain(t *testing.T) { - cfg := pkgconfig.GetFakeConfig() + + fl := GetWorkerForTest(pkgconfig.DefaultBufferSize) + + cfg := pkgconfig.GetDefaultConfig() cfg.Collectors.PowerDNS.AddDNSPayload = true // init the dnstap consumer - consumer := NewPdnsProcessor(0, cfg, logger.New(false), "test", 512) + consumer := NewPdnsProcessor(0, "peername", cfg, logger.New(false), "test", 512) + consumer.AddDefaultRoute(fl) + consumer.AddDroppedRoute(fl) // prepare dnstap dnsQname := pkgconfig.ValidDomain @@ -196,11 +232,10 @@ func Test_PowerDNSProcessor_AddDNSPayload_AnswersTooLongDomain(t *testing.T) { data, _ := proto.Marshal(dm) // run the consumer with a fake logger - fl := pkgutils.NewFakeLogger() - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + go consumer.StartCollect() // add packet to consumer - consumer.GetChannel() <- data + consumer.GetDataChannel() <- data // read dns message from dnstap consumer msg := <-fl.GetInputChannel() @@ -213,21 +248,26 @@ func Test_PowerDNSProcessor_AddDNSPayload_AnswersTooLongDomain(t *testing.T) { // test for issue https://github.com/dmachard/go-dnscollector/issues/568 func Test_PowerDNSProcessor_BufferLoggerIsFull(t *testing.T) { + + fl := GetWorkerForTest(pkgconfig.DefaultBufferOne) + // redirect stdout output to bytes buffer logsChan := make(chan logger.LogEntry, 10) lg := logger.New(true) lg.SetOutputChannel((logsChan)) // init the dnstap consumer - cfg := pkgconfig.GetFakeConfig() - consumer := NewPdnsProcessor(0, cfg, lg, "test", 512) + cfg := pkgconfig.GetDefaultConfig() + consumer := NewPdnsProcessor(0, "peername", cfg, lg, "test", 512) + consumer.AddDefaultRoute(fl) + consumer.AddDroppedRoute(fl) // init the powerdns processor dnsQname := pkgconfig.ValidDomain dnsQuestion := powerdns_protobuf.PBDNSMessage_DNSQuestion{QName: &dnsQname} dm := &powerdns_protobuf.PBDNSMessage{} - dm.ServerIdentity = []byte(ExpectedIdentity) + dm.ServerIdentity = []byte(pkgconfig.ExpectedIdentity) dm.Type = powerdns_protobuf.PBDNSMessage_DNSQueryType.Enum() dm.SocketProtocol = powerdns_protobuf.PBDNSMessage_DNSCryptUDP.Enum() dm.SocketFamily = powerdns_protobuf.PBDNSMessage_INET.Enum() @@ -236,12 +276,11 @@ func Test_PowerDNSProcessor_BufferLoggerIsFull(t *testing.T) { data, _ := proto.Marshal(dm) // run the consumer with a fake logger - fl := pkgutils.NewFakeLoggerWithBufferSize(1) - go consumer.Run([]pkgutils.Worker{fl}, []pkgutils.Worker{fl}) + go consumer.StartCollect() // add packets to consumer for i := 0; i < 512; i++ { - consumer.GetChannel() <- data + consumer.GetDataChannel() <- data } // waiting monitor to run in consumer @@ -249,7 +288,7 @@ func Test_PowerDNSProcessor_BufferLoggerIsFull(t *testing.T) { for entry := range logsChan { fmt.Println(entry) - pattern := regexp.MustCompile(ExpectedBufferMsg511) + pattern := regexp.MustCompile(pkgconfig.ExpectedBufferMsg511) if pattern.MatchString(entry.Message) { break } @@ -257,20 +296,20 @@ func Test_PowerDNSProcessor_BufferLoggerIsFull(t *testing.T) { // read dns message from dnstap consumer msg := <-fl.GetInputChannel() - if msg.DNSTap.Identity != ExpectedIdentity { + if msg.DNSTap.Identity != pkgconfig.ExpectedIdentity { t.Errorf("invalid identity in dns message: %s", msg.DNSTap.Identity) } // send second shot of packets to consumer for i := 0; i < 1024; i++ { - consumer.GetChannel() <- data + consumer.GetDataChannel() <- data } // waiting monitor to run in consumer time.Sleep(12 * time.Second) for entry := range logsChan { fmt.Println(entry) - pattern := regexp.MustCompile(ExpectedBufferMsg1023) + pattern := regexp.MustCompile(pkgconfig.ExpectedBufferMsg1023) if pattern.MatchString(entry.Message) { break } @@ -278,7 +317,7 @@ func Test_PowerDNSProcessor_BufferLoggerIsFull(t *testing.T) { // read just one dns message from dnstap consumer msg2 := <-fl.GetInputChannel() - if msg2.DNSTap.Identity != ExpectedIdentity { + if msg2.DNSTap.Identity != pkgconfig.ExpectedIdentity { t.Errorf("invalid identity in second dns message: %s", msg2.DNSTap.Identity) } } diff --git a/workers/prometheus.go b/workers/prometheus.go new file mode 100644 index 00000000..a0c94b12 --- /dev/null +++ b/workers/prometheus.go @@ -0,0 +1,1234 @@ +package workers + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/http" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/telemetry" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" + "github.com/dmachard/go-topmap" + "github.com/hashicorp/golang-lru/v2/expirable" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/collectors/version" + "github.com/prometheus/client_golang/prometheus/promhttp" + // _ "net/http/pprof" +) + +/* +This is the list of available label values selectors. +Configuration may specify a list of lables to use for metrics. +Any label in this catalogueSelectors can be specidied in config (prometheus-labels stanza) +*/ +var catalogueSelectors map[string]func(*dnsutils.DNSMessage) string = map[string]func(*dnsutils.DNSMessage) string{ + "stream_id": GetStreamID, + "resolver": GetResolverIP, + "stream_global": GetStreamGlobal, +} + +/* +EpsCounters (Events Per Second) - is a set of metrics we calculate on per-second basis. +For others we rely on averaging by collector +*/ +type EpsCounters struct { + Eps, EpsMax uint64 + TotalEvents, TotalEventsPrev uint64 + + TotalRcodes, TotalQtypes map[string]float64 + TotalIPVersion, TotalIPProtocol map[string]float64 + TotalDNSMessages float64 + TotalQueries, TotalReplies int + TotalBytes, TotalBytesSent, TotalBytesReceived int + + TotalTC, TotalAA, TotalRA, TotalAD float64 + TotalMalformed, TotalFragmented, TotalReasembled float64 +} + +type PrometheusCountersCatalogue interface { + // Prometheus logger encapsulates stats counters (PrometheusCounterSet) inside + // PromCounterCatalogueContainer's. For each label the logger creates a nested level + // of containers. + // Containers and CounterSets must implemnent PrometheusCountersCatalogue interface + // to allow fetching a CounterSet by the list of metric/values by fetching values from + // the DNS message it logs. + // There is a schematic sample layout when there are 2 labels considered at the end of this file + GetCountersSet(*dnsutils.DNSMessage) PrometheusCountersCatalogue +} + +// This type represents a set of counters for a unique set of label name=value pairs. +// By default, we create a set per setream_id for backward compatibility +// However, we can allow slicing and dicing data using more dimensions. +// Each CounterSet is registered with Prometheus collection independently (wrapping label values) +type PrometheusCountersSet struct { + prom *Prometheus + + // LRU cache counters per domains and IP + requesters, allDomains *expirable.LRU[string, int] // Requests number made by a specific requestor and to find out about a specific domain + validDomains, nxDomains, sfDomains *expirable.LRU[string, int] // Requests number ended up in NOERROR, NXDOMAIN and in SERVFAIL + tlds, etldplusone *expirable.LRU[string, int] // Requests number for a specific TLD and eTLD+1 + suspicious, evicted *expirable.LRU[string, int] // Requests number for a specific name that looked suspicious and for a specific name that timed out + + epsCounters EpsCounters + + topRequesters, topAllDomains, topEvicted *topmap.TopMap + topValidDomains, topSfDomains, topNxDomains *topmap.TopMap + topTlds, topETLDPlusOne *topmap.TopMap + topSuspicious *topmap.TopMap + + labels prometheus.Labels // Do we really need to keep that map outside of registration? + sync.Mutex // Each PrometheusCountersSet locks independently +} + +// PromCounterCatalogueContainer is the implementation of PrometheusCountersCatalogue interface +// That maps a single label into other Containers or CounterSet +// The 'chain' of nested Containers keep track of labelNames requested by the config +// to figure out whether nested Container should be created, or, if all labels but the last one +// already considered at the upper levels, it is time to create individual CounterSet +type PromCounterCatalogueContainer struct { + prom *Prometheus + + // labelNames - is a list of label *names* for PromCounterCatalogueContainer's in stats + // map to use to get proper selectors. + // The topmost instance of PromCounterCatalogueContainer has the full list of all names to + // consider (the one provided by the config). Whenver it needs to create a new item in + // it's stats map, it suplies labelNames[1:] to the constructor for the lower level + // container to get the selector for the next level + labelNames []string // This is list of label names for nested containers + + // This is the unique set of label-value pairs for this catalogue element. + // The topmost Catalog has it empty, when it creates a new entry it provides the pair of + // labelNames[0]->selector(message) to the constructor. Lower levels get these pair + // collected. Ultimately, when all label names in labelNames is exausted, Catalogue creates + // an instance of newPrometheusCounterSet and provides it with labels map to properly wrap + // in Prometheus registry. + // The goal is to separate label/values pairs construction and individual counters collection + labels map[string]string // This is the set of label=value pairs we collected to this level + stats map[string]PrometheusCountersCatalogue + + // selector is a function that obtains a value for a label considering DNS Message data + // in most cases - just a field of that message + selector func(*dnsutils.DNSMessage) string + + sync.RWMutex +} + +/* +Selectors +*/ +func GetStreamGlobal(dm *dnsutils.DNSMessage) string { + return "enabled" +} + +func GetStreamID(dm *dnsutils.DNSMessage) string { + return dm.DNSTap.Identity +} + +func GetResolverIP(dm *dnsutils.DNSMessage) string { + return dm.NetworkInfo.ResponseIP +} + +type Prometheus struct { + *GenericWorker + doneAPI chan bool + httpServer *http.Server + netListener net.Listener + promRegistry *prometheus.Registry + + sync.Mutex + catalogueLabels []string + counters *PromCounterCatalogueContainer + + // All metrics use these descriptions when regestering + gaugeTopDomains, gaugeTopRequesters *prometheus.Desc + gaugeTopNoerrDomains, gaugeTopNxDomains, gaugeTopSfDomains *prometheus.Desc + gaugeTopTlds, gaugeTopETldsPlusOne *prometheus.Desc + gaugeTopSuspicious, gaugeTopEvicted *prometheus.Desc + + gaugeDomainsAll, gaugeRequesters *prometheus.Desc + gaugeDomainsValid, gaugeDomainsNx, gaugeDomainsSf *prometheus.Desc + gaugeTlds, gaugeETldPlusOne *prometheus.Desc + gaugeSuspicious, gaugeEvicted *prometheus.Desc + + gaugeEps, gaugeEpsMax *prometheus.Desc + + counterQtypes, counterRcodes *prometheus.Desc + counterIPProtocol, counterIPVersion *prometheus.Desc + counterDNSMessages, counterDNSQueries, counterDNSReplies *prometheus.Desc + + counterFlagsTC, counterFlagsAA *prometheus.Desc + counterFlagsRA, counterFlagsAD *prometheus.Desc + counterFlagsMalformed, counterFlagsFragmented, counterFlagsReassembled *prometheus.Desc + + totalBytes, totalReceivedBytes, totalSentBytes *prometheus.Desc + + // Histograms are heavy and expensive, turned off + // by default in configuration + histogramQueriesLength, histogramRepliesLength *prometheus.HistogramVec + histogramQnamesLength, histogramLatencies *prometheus.HistogramVec +} + +func newPrometheusCounterSet(w *Prometheus, labels prometheus.Labels) *PrometheusCountersSet { + pcs := &PrometheusCountersSet{ + prom: w, + labels: labels, + requesters: expirable.NewLRU[string, int](w.GetConfig().Loggers.Prometheus.RequestersCacheSize, nil, time.Second*time.Duration(w.GetConfig().Loggers.Prometheus.RequestersCacheTTL)), + allDomains: expirable.NewLRU[string, int](w.GetConfig().Loggers.Prometheus.DomainsCacheSize, nil, time.Second*time.Duration(w.GetConfig().Loggers.Prometheus.DomainsCacheTTL)), + validDomains: expirable.NewLRU[string, int](w.GetConfig().Loggers.Prometheus.NoErrorDomainsCacheSize, nil, time.Second*time.Duration(w.GetConfig().Loggers.Prometheus.NoErrorDomainsCacheTTL)), + nxDomains: expirable.NewLRU[string, int](w.GetConfig().Loggers.Prometheus.NXDomainsCacheSize, nil, time.Second*time.Duration(w.GetConfig().Loggers.Prometheus.NXDomainsCacheTTL)), + sfDomains: expirable.NewLRU[string, int](w.GetConfig().Loggers.Prometheus.ServfailDomainsCacheSize, nil, time.Second*time.Duration(w.GetConfig().Loggers.Prometheus.ServfailDomainsCacheTTL)), + tlds: expirable.NewLRU[string, int](w.GetConfig().Loggers.Prometheus.DefaultDomainsCacheSize, nil, time.Second*time.Duration(w.GetConfig().Loggers.Prometheus.DefaultDomainsCacheTTL)), + etldplusone: expirable.NewLRU[string, int](w.GetConfig().Loggers.Prometheus.DefaultDomainsCacheSize, nil, time.Second*time.Duration(w.GetConfig().Loggers.Prometheus.DefaultDomainsCacheTTL)), + suspicious: expirable.NewLRU[string, int](w.GetConfig().Loggers.Prometheus.DefaultDomainsCacheSize, nil, time.Second*time.Duration(w.GetConfig().Loggers.Prometheus.DefaultDomainsCacheTTL)), + evicted: expirable.NewLRU[string, int](w.GetConfig().Loggers.Prometheus.DefaultDomainsCacheSize, nil, time.Second*time.Duration(w.GetConfig().Loggers.Prometheus.DefaultDomainsCacheTTL)), + + epsCounters: EpsCounters{ + TotalRcodes: make(map[string]float64), TotalQtypes: make(map[string]float64), + TotalIPVersion: make(map[string]float64), TotalIPProtocol: make(map[string]float64), + }, + + topRequesters: topmap.NewTopMap(w.GetConfig().Loggers.Prometheus.TopN), + topEvicted: topmap.NewTopMap(w.GetConfig().Loggers.Prometheus.TopN), + topAllDomains: topmap.NewTopMap(w.GetConfig().Loggers.Prometheus.TopN), + topValidDomains: topmap.NewTopMap(w.GetConfig().Loggers.Prometheus.TopN), + topSfDomains: topmap.NewTopMap(w.GetConfig().Loggers.Prometheus.TopN), + topNxDomains: topmap.NewTopMap(w.GetConfig().Loggers.Prometheus.TopN), + topTlds: topmap.NewTopMap(w.GetConfig().Loggers.Prometheus.TopN), + topETLDPlusOne: topmap.NewTopMap(w.GetConfig().Loggers.Prometheus.TopN), + topSuspicious: topmap.NewTopMap(w.GetConfig().Loggers.Prometheus.TopN), + } + prometheus.WrapRegistererWith(labels, w.promRegistry).MustRegister(pcs) + return pcs +} + +func (w *PrometheusCountersSet) GetCountersSet(dm *dnsutils.DNSMessage) PrometheusCountersCatalogue { + return w +} + +// each CounterSet has the same list of timeseries descriptors, +// so it uses descriptros from the Prometheus instance the set belongs to. +func (w *PrometheusCountersSet) Describe(ch chan<- *prometheus.Desc) { + // Gauge metrcis + w.Lock() + defer w.Unlock() + ch <- w.prom.gaugeTopDomains + ch <- w.prom.gaugeTopNoerrDomains + ch <- w.prom.gaugeTopNxDomains + ch <- w.prom.gaugeTopSfDomains + ch <- w.prom.gaugeTopRequesters + ch <- w.prom.gaugeTopTlds + ch <- w.prom.gaugeTopETldsPlusOne + ch <- w.prom.gaugeTopSuspicious + ch <- w.prom.gaugeTopEvicted + + // Counter metrics + ch <- w.prom.gaugeDomainsAll + ch <- w.prom.gaugeDomainsValid + ch <- w.prom.gaugeDomainsNx + ch <- w.prom.gaugeDomainsSf + ch <- w.prom.gaugeRequesters + ch <- w.prom.gaugeTlds + ch <- w.prom.gaugeETldPlusOne + ch <- w.prom.gaugeSuspicious + ch <- w.prom.gaugeEvicted + + ch <- w.prom.gaugeEps + ch <- w.prom.gaugeEpsMax + + ch <- w.prom.counterQtypes + ch <- w.prom.counterRcodes + ch <- w.prom.counterIPProtocol + ch <- w.prom.counterIPVersion + ch <- w.prom.counterDNSMessages + ch <- w.prom.counterDNSQueries + ch <- w.prom.counterDNSReplies + + ch <- w.prom.counterFlagsTC + ch <- w.prom.counterFlagsAA + ch <- w.prom.counterFlagsRA + ch <- w.prom.counterFlagsAD + ch <- w.prom.counterFlagsMalformed + ch <- w.prom.counterFlagsFragmented + ch <- w.prom.counterFlagsReassembled + + ch <- w.prom.totalBytes + ch <- w.prom.totalReceivedBytes + ch <- w.prom.totalSentBytes +} + +// Updates all counters for a specific set of labelName=labelValue +func (w *PrometheusCountersSet) Record(dm dnsutils.DNSMessage) { + w.Lock() + defer w.Unlock() + + // count all uniq requesters if enabled + if w.prom.GetConfig().Loggers.Prometheus.RequestersMetricsEnabled { + count, _ := w.requesters.Get(dm.NetworkInfo.QueryIP) + w.requesters.Add(dm.NetworkInfo.QueryIP, count+1) + w.topRequesters.Record(dm.NetworkInfo.QueryIP, count+1) + } + + // count all uniq domains if enabled + if w.prom.GetConfig().Loggers.Prometheus.DomainsMetricsEnabled { + count, _ := w.allDomains.Get(dm.DNS.Qname) + w.allDomains.Add(dm.DNS.Qname, count+1) + w.topAllDomains.Record(dm.DNS.Qname, count+1) + } + + // top domains + switch { + case dm.DNS.Rcode == dnsutils.DNSRcodeTimeout && w.prom.GetConfig().Loggers.Prometheus.TimeoutMetricsEnabled: + count, _ := w.evicted.Get(dm.DNS.Qname) + w.evicted.Add(dm.DNS.Qname, count+1) + w.topEvicted.Record(dm.DNS.Qname, count+1) + + case dm.DNS.Rcode == dnsutils.DNSRcodeServFail && w.prom.GetConfig().Loggers.Prometheus.ServfailMetricsEnabled: + count, _ := w.sfDomains.Get(dm.DNS.Qname) + w.sfDomains.Add(dm.DNS.Qname, count+1) + w.topSfDomains.Record(dm.DNS.Qname, count+1) + + case dm.DNS.Rcode == dnsutils.DNSRcodeNXDomain && w.prom.GetConfig().Loggers.Prometheus.NonExistentMetricsEnabled: + count, _ := w.nxDomains.Get(dm.DNS.Qname) + w.nxDomains.Add(dm.DNS.Qname, count+1) + w.topNxDomains.Record(dm.DNS.Qname, count+1) + + case dm.DNS.Rcode == dnsutils.DNSRcodeNoError && w.prom.GetConfig().Loggers.Prometheus.NoErrorMetricsEnabled: + count, _ := w.validDomains.Get(dm.DNS.Qname) + w.validDomains.Add(dm.DNS.Qname, count+1) + w.topValidDomains.Record(dm.DNS.Qname, count+1) + } + + // count and top tld + if dm.PublicSuffix != nil && dm.PublicSuffix.QnamePublicSuffix != "-" { + count, _ := w.tlds.Get(dm.PublicSuffix.QnamePublicSuffix) + w.tlds.Add(dm.PublicSuffix.QnamePublicSuffix, count+1) + w.topTlds.Record(dm.PublicSuffix.QnamePublicSuffix, count+1) + } + + // count TLD+1 if it is set + if dm.PublicSuffix != nil && dm.PublicSuffix.QnameEffectiveTLDPlusOne != "-" { + count, _ := w.etldplusone.Get(dm.PublicSuffix.QnameEffectiveTLDPlusOne) + w.etldplusone.Add(dm.PublicSuffix.QnameEffectiveTLDPlusOne, count+1) + w.topETLDPlusOne.Record(dm.PublicSuffix.QnameEffectiveTLDPlusOne, count+1) + } + + // suspicious domains + if dm.Suspicious != nil && dm.Suspicious.Score > 0.0 { + count, _ := w.suspicious.Get(dm.DNS.Qname) + w.suspicious.Add(dm.DNS.Qname, count+1) + w.topSuspicious.Record(dm.DNS.Qname, count+1) + } + + // compute histograms, no more enabled by default to avoid to hurt performance. + if w.prom.GetConfig().Loggers.Prometheus.HistogramMetricsEnabled { + w.prom.histogramQnamesLength.With(w.labels).Observe(float64(len(dm.DNS.Qname))) + + if dm.DNSTap.Latency > 0.0 { + w.prom.histogramLatencies.With(w.labels).Observe(dm.DNSTap.Latency) + } + + if dm.DNS.Type == dnsutils.DNSQuery { + w.prom.histogramQueriesLength.With(w.labels).Observe(float64(dm.DNS.Length)) + } else { + w.prom.histogramRepliesLength.With(w.labels).Observe(float64(dm.DNS.Length)) + } + } + + // Record EPS related data + w.epsCounters.TotalEvents++ + w.epsCounters.TotalBytes += dm.DNS.Length + w.epsCounters.TotalDNSMessages++ + + if _, exists := w.epsCounters.TotalIPVersion[dm.NetworkInfo.Family]; !exists { + w.epsCounters.TotalIPVersion[dm.NetworkInfo.Family] = 1 + } else { + w.epsCounters.TotalIPVersion[dm.NetworkInfo.Family]++ + } + + if _, exists := w.epsCounters.TotalIPProtocol[dm.NetworkInfo.Protocol]; !exists { + w.epsCounters.TotalIPProtocol[dm.NetworkInfo.Protocol] = 1 + } else { + w.epsCounters.TotalIPProtocol[dm.NetworkInfo.Protocol]++ + } + + if _, exists := w.epsCounters.TotalQtypes[dm.DNS.Qtype]; !exists { + w.epsCounters.TotalQtypes[dm.DNS.Qtype] = 1 + } else { + w.epsCounters.TotalQtypes[dm.DNS.Qtype]++ + } + + if _, exists := w.epsCounters.TotalRcodes[dm.DNS.Rcode]; !exists { + w.epsCounters.TotalRcodes[dm.DNS.Rcode] = 1 + } else { + w.epsCounters.TotalRcodes[dm.DNS.Rcode]++ + } + + if dm.DNS.Type == dnsutils.DNSQuery { + w.epsCounters.TotalBytesReceived += dm.DNS.Length + w.epsCounters.TotalQueries++ + } + if dm.DNS.Type == dnsutils.DNSReply { + w.epsCounters.TotalBytesSent += dm.DNS.Length + w.epsCounters.TotalReplies++ + } + + // flags + if dm.DNS.Flags.TC { + w.epsCounters.TotalTC++ + } + if dm.DNS.Flags.AA { + w.epsCounters.TotalAA++ + } + if dm.DNS.Flags.RA { + w.epsCounters.TotalRA++ + } + if dm.DNS.Flags.AD { + w.epsCounters.TotalAD++ + } + if dm.DNS.MalformedPacket { + w.epsCounters.TotalMalformed++ + } + if dm.NetworkInfo.IPDefragmented { + w.epsCounters.TotalFragmented++ + } + if dm.NetworkInfo.TCPReassembled { + w.epsCounters.TotalReasembled++ + } + +} + +func (w *PrometheusCountersSet) Collect(ch chan<- prometheus.Metric) { + w.Lock() + defer w.Unlock() + // Update number of all domains + ch <- prometheus.MustNewConstMetric(w.prom.gaugeDomainsAll, prometheus.GaugeValue, + float64(w.allDomains.Len()), + ) + // Update number of valid domains (noerror) + ch <- prometheus.MustNewConstMetric(w.prom.gaugeDomainsValid, prometheus.GaugeValue, + float64(w.validDomains.Len()), + ) + // Count NX domains + ch <- prometheus.MustNewConstMetric(w.prom.gaugeDomainsNx, prometheus.GaugeValue, + float64(w.nxDomains.Len()), + ) + // Count SERVFAIL domains + ch <- prometheus.MustNewConstMetric(w.prom.gaugeDomainsSf, prometheus.GaugeValue, + float64(w.sfDomains.Len()), + ) + // Requesters counter + ch <- prometheus.MustNewConstMetric(w.prom.gaugeRequesters, prometheus.GaugeValue, + float64(w.requesters.Len()), + ) + + // Count number of unique TLDs + ch <- prometheus.MustNewConstMetric(w.prom.gaugeTlds, prometheus.GaugeValue, + float64(w.tlds.Len()), + ) + + ch <- prometheus.MustNewConstMetric(w.prom.gaugeETldPlusOne, prometheus.GaugeValue, + float64(w.etldplusone.Len()), + ) + + // Count number of unique suspicious names + ch <- prometheus.MustNewConstMetric(w.prom.gaugeSuspicious, prometheus.GaugeValue, + float64(w.suspicious.Len()), + ) + + // Count number of unique unanswered (timedout) names + ch <- prometheus.MustNewConstMetric(w.prom.gaugeEvicted, prometheus.GaugeValue, + float64(w.evicted.Len()), + ) + + // Count for all top domains + for _, r := range w.topAllDomains.Get() { + ch <- prometheus.MustNewConstMetric(w.prom.gaugeTopDomains, prometheus.GaugeValue, + float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) + } + + for _, r := range w.topValidDomains.Get() { + ch <- prometheus.MustNewConstMetric(w.prom.gaugeTopNoerrDomains, prometheus.GaugeValue, + float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) + } + + for _, r := range w.topNxDomains.Get() { + ch <- prometheus.MustNewConstMetric(w.prom.gaugeTopNxDomains, prometheus.GaugeValue, + float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) + } + + for _, r := range w.topSfDomains.Get() { + ch <- prometheus.MustNewConstMetric(w.prom.gaugeTopSfDomains, prometheus.GaugeValue, + float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) + } + + for _, r := range w.topRequesters.Get() { + ch <- prometheus.MustNewConstMetric(w.prom.gaugeTopRequesters, prometheus.GaugeValue, + float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) + } + + for _, r := range w.topTlds.Get() { + ch <- prometheus.MustNewConstMetric(w.prom.gaugeTopTlds, prometheus.GaugeValue, + float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) + } + + for _, r := range w.topETLDPlusOne.Get() { + ch <- prometheus.MustNewConstMetric(w.prom.gaugeTopETldsPlusOne, prometheus.GaugeValue, + float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) + } + + for _, r := range w.topSuspicious.Get() { + ch <- prometheus.MustNewConstMetric(w.prom.gaugeTopSuspicious, prometheus.GaugeValue, + float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) + } + + for _, r := range w.topEvicted.Get() { + ch <- prometheus.MustNewConstMetric(w.prom.gaugeTopEvicted, prometheus.GaugeValue, + float64(r.Hit), strings.ToValidUTF8(r.Name, "�")) + } + + ch <- prometheus.MustNewConstMetric(w.prom.gaugeEps, prometheus.GaugeValue, + float64(w.epsCounters.Eps), + ) + ch <- prometheus.MustNewConstMetric(w.prom.gaugeEpsMax, prometheus.GaugeValue, + float64(w.epsCounters.EpsMax), + ) + + // Update qtypes counter + for k, v := range w.epsCounters.TotalQtypes { + ch <- prometheus.MustNewConstMetric(w.prom.counterQtypes, prometheus.CounterValue, + v, k, + ) + } + + // Update Return Codes counter + for k, v := range w.epsCounters.TotalRcodes { + ch <- prometheus.MustNewConstMetric(w.prom.counterRcodes, prometheus.CounterValue, + v, k, + ) + } + + // Update IP protocol counter + for k, v := range w.epsCounters.TotalIPProtocol { + ch <- prometheus.MustNewConstMetric(w.prom.counterIPProtocol, prometheus.CounterValue, + v, k, + ) + } + + // Update IP version counter + for k, v := range w.epsCounters.TotalIPVersion { + ch <- prometheus.MustNewConstMetric(w.prom.counterIPVersion, prometheus.CounterValue, + v, k, + ) + } + + // Update global number of dns messages + ch <- prometheus.MustNewConstMetric(w.prom.counterDNSMessages, prometheus.CounterValue, + w.epsCounters.TotalDNSMessages) + + // Update number of dns queries + ch <- prometheus.MustNewConstMetric(w.prom.counterDNSQueries, prometheus.CounterValue, + float64(w.epsCounters.TotalQueries)) + + // Update number of dns replies + ch <- prometheus.MustNewConstMetric(w.prom.counterDNSReplies, prometheus.CounterValue, + float64(w.epsCounters.TotalReplies)) + + // Update flags + ch <- prometheus.MustNewConstMetric(w.prom.counterFlagsTC, prometheus.CounterValue, + w.epsCounters.TotalTC) + ch <- prometheus.MustNewConstMetric(w.prom.counterFlagsAA, prometheus.CounterValue, + w.epsCounters.TotalAA) + ch <- prometheus.MustNewConstMetric(w.prom.counterFlagsRA, prometheus.CounterValue, + w.epsCounters.TotalRA) + ch <- prometheus.MustNewConstMetric(w.prom.counterFlagsAD, prometheus.CounterValue, + w.epsCounters.TotalAD) + ch <- prometheus.MustNewConstMetric(w.prom.counterFlagsMalformed, prometheus.CounterValue, + w.epsCounters.TotalMalformed) + ch <- prometheus.MustNewConstMetric(w.prom.counterFlagsFragmented, prometheus.CounterValue, + w.epsCounters.TotalFragmented) + ch <- prometheus.MustNewConstMetric(w.prom.counterFlagsReassembled, prometheus.CounterValue, + w.epsCounters.TotalReasembled) + + ch <- prometheus.MustNewConstMetric(w.prom.totalBytes, + prometheus.CounterValue, float64(w.epsCounters.TotalBytes), + ) + ch <- prometheus.MustNewConstMetric(w.prom.totalReceivedBytes, prometheus.CounterValue, + float64(w.epsCounters.TotalBytesReceived), + ) + ch <- prometheus.MustNewConstMetric(w.prom.totalSentBytes, prometheus.CounterValue, + float64(w.epsCounters.TotalBytesSent)) + +} + +func (w *PrometheusCountersSet) ComputeEventsPerSecond() { + w.Lock() + defer w.Unlock() + if w.epsCounters.TotalEvents > 0 && w.epsCounters.TotalEventsPrev > 0 { + w.epsCounters.Eps = w.epsCounters.TotalEvents - w.epsCounters.TotalEventsPrev + } + w.epsCounters.TotalEventsPrev = w.epsCounters.TotalEvents + if w.epsCounters.Eps > w.epsCounters.EpsMax { + w.epsCounters.EpsMax = w.epsCounters.Eps + } +} + +func NewPromCounterCatalogueContainer(w *Prometheus, selLabels []string, l map[string]string) *PromCounterCatalogueContainer { + if len(selLabels) == 0 { + w.LogFatal("Cannot create a new PromCounterCatalogueContainer with empty list of selLabels") + } + sel, ok := catalogueSelectors[selLabels[0]] + if !ok { + w.LogFatal(fmt.Sprintf("No selector for %v label", selLabels[0])) + } + + // copy all the data over, to make sure this container does not share memory with other containers + r := &PromCounterCatalogueContainer{ + prom: w, + stats: make(map[string]PrometheusCountersCatalogue), + selector: sel, + labelNames: make([]string, len(selLabels)), + labels: make(map[string]string), + } + for k, v := range l { + r.labels[k] = v + } + copy(r.labelNames, selLabels) + return r +} + +// Returns a slice of all PrometheusCountersSet in a Container +func (w *PromCounterCatalogueContainer) GetAllCounterSets() []*PrometheusCountersSet { + ret := []*PrometheusCountersSet{} + w.RLock() + for _, v := range w.stats { + switch elem := v.(type) { + case *PrometheusCountersSet: + ret = append(ret, elem) + case *PromCounterCatalogueContainer: + ret = append(ret, elem.GetAllCounterSets()...) + default: + panic(fmt.Sprintf("Unexpected element in PromCounterCatalogueContainer of %T: %v", v, v)) + } + } + w.RUnlock() + return ret +} + +// Searches for an existing element for a label value, creating one if not found +func (w *PromCounterCatalogueContainer) GetCountersSet(dm *dnsutils.DNSMessage) PrometheusCountersCatalogue { + if w.selector == nil { + panic(fmt.Sprintf("%v: nil selector", w)) + } + + // w.selector fetches the value for the label *this* Catalogue Element considers. + // Check if we alreday have item for it, and return it if we do (it is either catalogue or counter set) + lbl := w.selector(dm) + w.Lock() + defer w.Unlock() + if r, ok := w.stats[lbl]; ok { + return r.GetCountersSet(dm) + } + + // there is no existing element in the catalogue. We need to create a new entry. + // Entry may be a new Catalogue, or PrometheusCounterSet. + // If selector_labels consists of single element, we need to create a PrometheusCounterSet. + // Otherwise, there is another layer of labels. + var newElem PrometheusCountersCatalogue + // Prepare labels for the new element (needed for ether CatalogueContainer and CounterSet) + newLables := map[string]string{ + w.labelNames[0]: lbl, + } + for k, v := range w.labels { + newLables[k] = v + } + if len(w.labelNames) > 1 { + newElem = NewPromCounterCatalogueContainer( + w.prom, + w.labelNames[1:], + newLables, // Here we'll do an extra map copy... + ) + } else { + newElem = newPrometheusCounterSet( + w.prom, + prometheus.Labels(newLables), + ) + + } + w.stats[lbl] = newElem + + // GetCountersSet of the newly created element may take some time, and we will be holding the lock + // of the current Container until it is done. This may be improved if we separate w.stats[lbl] + // update and calling GetCountersSet on the new element. + return w.stats[lbl].GetCountersSet(dm) +} + +// This function checks the configuration, to determine which label dimensions were requested +// by configuration, and returns correct implementation of Catalogue. +func CreateSystemCatalogue(w *Prometheus) ([]string, *PromCounterCatalogueContainer) { + lbls := w.GetConfig().Loggers.Prometheus.LabelsList + + // Default configuration is label with stream_id, to keep us backward compatible + if len(lbls) == 0 { + lbls = []string{"stream_id"} + } + return lbls, NewPromCounterCatalogueContainer(w, lbls, make(map[string]string)) +} + +func NewPrometheus(config *pkgconfig.Config, logger *logger.Logger, name string) *Prometheus { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.Prometheus.ChannelBufferSize > 0 { + bufSize = config.Loggers.Prometheus.ChannelBufferSize + } + w := &Prometheus{GenericWorker: NewGenericWorker(config, logger, name, "prometheus", bufSize, pkgconfig.DefaultMonitor)} + w.doneAPI = make(chan bool) + w.promRegistry = prometheus.NewPedanticRegistry() + + // This will create a catalogue of counters indexed by fileds requested by config + w.catalogueLabels, w.counters = CreateSystemCatalogue(w) + + // init prometheus + w.InitProm() + + // midleware to add basic authentication + authMiddleware := func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(httpWriter http.ResponseWriter, r *http.Request) { + username, password, ok := r.BasicAuth() + if !ok || username != w.GetConfig().Loggers.Prometheus.BasicAuthLogin || password != w.GetConfig().Loggers.Prometheus.BasicAuthPwd { + httpWriter.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) + httpWriter.WriteHeader(http.StatusUnauthorized) + fmt.Fprintf(httpWriter, "Unauthorized\n") + return + } + + handler.ServeHTTP(httpWriter, r) + }) + } + + mux := http.NewServeMux() + mux.Handle("/metrics", promhttp.HandlerFor(w.promRegistry, promhttp.HandlerOpts{})) + + handler := authMiddleware(mux) + + w.httpServer = &http.Server{} + if w.GetConfig().Loggers.Prometheus.BasicAuthEnabled { + w.httpServer.Handler = handler + } else { + w.httpServer.Handler = mux + } + + w.httpServer.ErrorLog = logger.ErrorLogger() + return w +} + +func (w *Prometheus) InitProm() { + + promPrefix := telemetry.SanitizeMetricName(w.GetConfig().Loggers.Prometheus.PromPrefix) + + // register metric about current version information. + w.promRegistry.MustRegister(version.NewCollector(promPrefix)) + + // export Go runtime metrics + w.promRegistry.MustRegister( + collectors.NewGoCollector(collectors.WithGoCollectorMemStatsMetricsDisabled()), + collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), + ) + // also try collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}), + + // Metric description created in Prometheus object, but used in Describe method of PrometheusCounterSet + // Prometheus class itself reports signle metric - BuildInfo. + w.gaugeTopDomains = prometheus.NewDesc( + fmt.Sprintf("%s_top_domains", promPrefix), + "Number of hit per domain topN, partitioned by qname", + []string{"domain"}, nil, + ) + + w.gaugeTopNoerrDomains = prometheus.NewDesc( + fmt.Sprintf("%s_top_noerror_domains", promPrefix), + "Number of hit per domain topN, partitioned by qname", + []string{"domain"}, nil, + ) + + w.gaugeTopNxDomains = prometheus.NewDesc( + fmt.Sprintf("%s_top_nonexistent_domains", promPrefix), + "Number of hit per nx domain topN, partitioned by qname", + []string{"domain"}, nil, + ) + + w.gaugeTopSfDomains = prometheus.NewDesc( + fmt.Sprintf("%s_top_servfail_domains", promPrefix), + "Number of hit per servfail domain topN, partitioned by stream and qname", + []string{"domain"}, nil, + ) + + w.gaugeTopRequesters = prometheus.NewDesc( + fmt.Sprintf("%s_top_requesters", promPrefix), + "Number of hit per requester topN, partitioned by client IP", + []string{"ip"}, nil, + ) + + w.gaugeTopTlds = prometheus.NewDesc( + fmt.Sprintf("%s_top_tlds", promPrefix), + "Number of hit per tld - topN", + []string{"suffix"}, nil, + ) + // etldplusone_top_total + w.gaugeTopETldsPlusOne = prometheus.NewDesc( + fmt.Sprintf("%s_top_etlds_plusone", promPrefix), + "Number of hit per eTLD+1 - topN", + []string{"suffix"}, nil, + ) + + w.gaugeTopSuspicious = prometheus.NewDesc( + fmt.Sprintf("%s_top_suspicious", promPrefix), + "Number of hit per suspicious domain - topN", + []string{"domain"}, nil, + ) + + w.gaugeTopEvicted = prometheus.NewDesc( + fmt.Sprintf("%s_top_unanswered", promPrefix), + "Number of hit per unanswered domain - topN", + []string{"domain"}, nil, + ) + + w.gaugeEps = prometheus.NewDesc( + fmt.Sprintf("%s_throughput_ops", promPrefix), + "Number of ops per second received, partitioned by stream", + nil, nil, + ) + + w.gaugeEpsMax = prometheus.NewDesc( + fmt.Sprintf("%s_throughput_ops_max", promPrefix), + "Max number of ops per second observed, partitioned by stream", + nil, nil, + ) + + // Counter metrics + w.gaugeDomainsAll = prometheus.NewDesc( + fmt.Sprintf("%s_total_domains_lru", promPrefix), + "Total number of uniq domains most recently observed per stream identity ", + nil, nil, + ) + + w.gaugeDomainsValid = prometheus.NewDesc( + fmt.Sprintf("%s_total_noerror_domains_lru", promPrefix), + "Total number of NOERROR domains most recently observed per stream identity ", + nil, nil, + ) + + w.gaugeDomainsNx = prometheus.NewDesc( + fmt.Sprintf("%s_total_nonexistent_domains_lru", promPrefix), + "Total number of NX domains most recently observed per stream identity", + nil, nil, + ) + + w.gaugeDomainsSf = prometheus.NewDesc( + fmt.Sprintf("%s_total_servfail_domains_lru", promPrefix), + "Total number of SERVFAIL domains most recently observed per stream identity", + nil, nil, + ) + + w.gaugeRequesters = prometheus.NewDesc( + fmt.Sprintf("%s_total_requesters_lru", promPrefix), + "Total number of DNS clients most recently observed per stream identity.", + nil, nil, + ) + + w.gaugeTlds = prometheus.NewDesc( + fmt.Sprintf("%s_total_tlds_lru", promPrefix), + "Total number of tld most recently observed per stream identity", + nil, nil, + ) + + w.gaugeETldPlusOne = prometheus.NewDesc( + fmt.Sprintf("%s_total_etlds_plusone_lru", promPrefix), + "Total number of etld+one most recently observed per stream identity", + nil, nil, + ) + + w.gaugeSuspicious = prometheus.NewDesc( + fmt.Sprintf("%s_total_suspicious_lru", promPrefix), + "Total number of suspicious domains most recently observed per stream identity", + nil, nil, + ) + + w.gaugeEvicted = prometheus.NewDesc( + fmt.Sprintf("%s_total_unanswered_lru", promPrefix), + "Total number of unanswered domains most recently observed per stream identity", + nil, nil, + ) + + w.counterQtypes = prometheus.NewDesc( + fmt.Sprintf("%s_qtypes_total", promPrefix), + "Counter of queries per qtypes", + []string{"query_type"}, nil, + ) + + w.counterRcodes = prometheus.NewDesc( + fmt.Sprintf("%s_rcodes_total", promPrefix), + "Counter of replies per return codes", + []string{"return_code"}, nil, + ) + + w.counterIPProtocol = prometheus.NewDesc( + fmt.Sprintf("%s_ipprotocol_total", promPrefix), + "Counter of packets per IP protocol", + []string{"net_transport"}, nil, + ) + + w.counterIPVersion = prometheus.NewDesc( + fmt.Sprintf("%s_ipversion_total", promPrefix), + "Counter of packets per IP version", + []string{"net_family"}, nil, + ) + + w.counterDNSMessages = prometheus.NewDesc( + fmt.Sprintf("%s_dnsmessages_total", promPrefix), + "Counter of DNS messages per stream", + nil, nil, + ) + + w.counterDNSQueries = prometheus.NewDesc( + fmt.Sprintf("%s_queries_total", promPrefix), + "Counter of DNS queries per stream", + nil, nil, + ) + + w.counterDNSReplies = prometheus.NewDesc( + fmt.Sprintf("%s_replies_total", promPrefix), + "Counter of DNS replies per stream", + nil, nil, + ) + + w.counterFlagsTC = prometheus.NewDesc( + fmt.Sprintf("%s_flag_tc_total", promPrefix), + "Number of packet with flag TC", + nil, nil, + ) + + w.counterFlagsAA = prometheus.NewDesc( + fmt.Sprintf("%s_flag_aa_total", promPrefix), + "Number of packet with flag AA", + nil, nil, + ) + + w.counterFlagsRA = prometheus.NewDesc( + fmt.Sprintf("%s_flag_ra_total", promPrefix), + "Number of packet with flag RA", + nil, nil, + ) + + w.counterFlagsAD = prometheus.NewDesc( + fmt.Sprintf("%s_flag_ad_total", promPrefix), + "Number of packet with flag AD", + nil, nil, + ) + + w.counterFlagsMalformed = prometheus.NewDesc( + fmt.Sprintf("%s_malformed_total", promPrefix), + "Number of malformed packets", + nil, nil, + ) + + w.counterFlagsFragmented = prometheus.NewDesc( + fmt.Sprintf("%s_fragmented_total", promPrefix), + "Number of IP fragmented packets", + nil, nil, + ) + + w.counterFlagsReassembled = prometheus.NewDesc( + fmt.Sprintf("%s_reassembled_total", promPrefix), + "Number of TCP reassembled packets", + nil, nil, + ) + + w.totalBytes = prometheus.NewDesc( + fmt.Sprintf("%s_bytes_total", promPrefix), + "The total bytes received and sent", + nil, nil, + ) + + w.totalReceivedBytes = prometheus.NewDesc( + fmt.Sprintf("%s_received_bytes_total", promPrefix), + "The total bytes received", + nil, nil, + ) + + w.totalSentBytes = prometheus.NewDesc( + fmt.Sprintf("%s_sent_bytes_total", promPrefix), + "The total bytes sent", + nil, nil, + ) + + w.histogramQueriesLength = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: fmt.Sprintf("%s_queries_size_bytes", promPrefix), + Help: "Size of the queries in bytes.", + Buckets: []float64{50, 100, 250, 500}, + }, + w.catalogueLabels, + ) + w.promRegistry.MustRegister(w.histogramQueriesLength) + + w.histogramRepliesLength = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: fmt.Sprintf("%s_replies_size_bytes", promPrefix), + Help: "Size of the replies in bytes.", + Buckets: []float64{50, 100, 250, 500}, + }, + w.catalogueLabels, + ) + w.promRegistry.MustRegister(w.histogramRepliesLength) + + w.histogramQnamesLength = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: fmt.Sprintf("%s_qnames_size_bytes", promPrefix), + Help: "Size of the qname in bytes.", + Buckets: []float64{10, 20, 40, 60, 100}, + }, + w.catalogueLabels, + ) + w.promRegistry.MustRegister(w.histogramQnamesLength) + + w.histogramLatencies = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: fmt.Sprintf("%s_latencies", promPrefix), + Help: "Latency between query and reply", + Buckets: []float64{0.001, 0.010, 0.050, 0.100, 0.5, 1.0}, + }, + w.catalogueLabels, + ) + w.promRegistry.MustRegister(w.histogramLatencies) +} + +func (w *Prometheus) ReadConfig() { + if !netutils.IsValidTLS(w.GetConfig().Loggers.Prometheus.TLSMinVersion) { + w.LogFatal(pkgconfig.PrefixLogWorker + "[" + w.GetName() + "] prometheus - invalid tls min version") + } +} + +func (w *Prometheus) Record(dm dnsutils.DNSMessage) { + // record stream identity + w.Lock() + + // count number of dns messages per network family (ipv4 or v6) + v := w.counters.GetCountersSet(&dm) + counterSet, ok := v.(*PrometheusCountersSet) + w.Unlock() + if !ok { + w.LogError(fmt.Sprintf("GetCountersSet returned an invalid value of %T, expected *PrometheusCountersSet", v)) + } else { + counterSet.Record(dm) + } + +} + +func (w *Prometheus) ComputeEventsPerSecond() { + // for each stream compute the number of events per second + w.Lock() + defer w.Unlock() + for _, cntrSet := range w.counters.GetAllCounterSets() { + cntrSet.ComputeEventsPerSecond() + } +} + +func (w *Prometheus) ListenAndServe() { + w.LogInfo("starting http server...") + + var err error + var listener net.Listener + addrlisten := w.GetConfig().Loggers.Prometheus.ListenIP + ":" + strconv.Itoa(w.GetConfig().Loggers.Prometheus.ListenPort) + // listening with tls enabled ? + if w.GetConfig().Loggers.Prometheus.TLSSupport { + w.LogInfo("tls support enabled") + var cer tls.Certificate + cer, err = tls.LoadX509KeyPair(w.GetConfig().Loggers.Prometheus.CertFile, w.GetConfig().Loggers.Prometheus.KeyFile) + if err != nil { + w.LogFatal("loading certificate failed:", err) + } + + // prepare tls configuration + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cer}, + MinVersion: tls.VersionTLS12, + } + + // update tls min version according to the user config + tlsConfig.MinVersion = netutils.TLSVersion[w.GetConfig().Loggers.Prometheus.TLSMinVersion] + + if w.GetConfig().Loggers.Prometheus.TLSMutual { + + // Create a CA certificate pool and add cert.pem to it + var caCert []byte + caCert, err = os.ReadFile(w.GetConfig().Loggers.Prometheus.CertFile) + if err != nil { + w.LogFatal(err) + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(caCert) + + tlsConfig.ClientCAs = caCertPool + tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert + } + + listener, err = tls.Listen(netutils.SocketTCP, addrlisten, tlsConfig) + + } else { + // basic listening + listener, err = net.Listen(netutils.SocketTCP, addrlisten) + } + + // something wrong ? + if err != nil { + w.LogFatal("http server listening failed:", err) + } + + w.netListener = listener + w.LogInfo("is listening on %s", listener.Addr()) + + w.httpServer.Serve(w.netListener) + + w.LogInfo("http server terminated") + w.doneAPI <- true +} + +func (w *Prometheus) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // start http server + go w.ListenAndServe() + + // goroutine to process transformed dns messages + go w.StartLogging() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + w.LogInfo("stopping http server...") + w.netListener.Close() + <-w.doneAPI + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("input channel closed!") + return + } + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} + +func (w *Prometheus) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + // init timer to compute qps + t1Interval := 1 * time.Second + t1 := time.NewTimer(t1Interval) + + for { + select { + case <-w.OnLoggerStopped(): + return + + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("output channel closed!") + return + } + + // record the dnstap message + w.Record(dm) + + case <-t1.C: + // compute eps each second + w.ComputeEventsPerSecond() + + // reset the timer + t1.Reset(t1Interval) + } + } +} + +/* +This is an implementation of variadic dimensions map of label values. +Having nested structure offers the fastest operations, compared to super-flexibile approach that prom client +uses with arbitrary set of labels. + +Label values are obtained by the means of 'selectors' - functions that fetch a specific field of a DNS Message +offering fast operations. + +Example of conterSet/Container for 2 labels + ++----------------------------------------------------------------------------------------------------------+ +| Container for label1 | +| Container maps different values of label1 to other containers | +| until the chain for all required label names is built. | +| | +| Label1 values: | +| value11 value12 | +| +---------------------------------------------------------------------------+ +-------------------------+| +| | Container for label2 | | Container for label2 || +| | in this container ALL elements | | all elemenens share || +| | have the same value for label1 | | the same value of label1|| +| | | | || +| | Label2 values: | | +----------++----------+|| +| | value21 value22 | | | .... || ,,,,,, ||| +| | +-----------------------------------++-----------------------------------+| | | || ||| +| | | CounterSet || CounterSet || | | || ||| +| | | In this set all metrics share the || In this set all metrics share the || | +----------++----------+|| +| | | same values for both labels, so || same values for both labels, so || | || +| | | no need to keep label values here || no need to keep label values here || | || +| | | || || | || +| | | metric1 || metric1 || | || +| | | metric2 || metric2 || | || +| | +-----------------------------------++-----------------------------------+| | || +| +---------------------------------------------------------------------------+ +-------------------------+| + +*/ diff --git a/loggers/prometheus_test.go b/workers/prometheus_test.go similarity index 96% rename from loggers/prometheus_test.go rename to workers/prometheus_test.go index db61794c..f7c35160 100644 --- a/loggers/prometheus_test.go +++ b/workers/prometheus_test.go @@ -1,6 +1,7 @@ -package loggers +package workers import ( + "fmt" "net/http" "net/http/httptest" "strings" @@ -21,7 +22,7 @@ const ( func TestPrometheus_BadAuth(t *testing.T) { // init the logger - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() g := NewPrometheus(config, logger.New(false), "test") tt := []struct { @@ -60,7 +61,7 @@ func TestPrometheus_BadAuth(t *testing.T) { func TestPrometheus_GetMetrics(t *testing.T) { // init the logger - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() config.Loggers.Prometheus.HistogramMetricsEnabled = true // By default, prometheus uses 'stream_id' as the label @@ -164,7 +165,7 @@ func getMetricsTestCase(config *pkgconfig.Config, labels map[string]string) func // Test that EPS (Events Per Second) Counters increment correctly func TestPrometheus_EPS_Counters(t *testing.T) { - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() g := NewPrometheus(config, logger.New(false), "test") // record one dns message to simulate some incoming data @@ -196,12 +197,13 @@ func TestPrometheus_EPS_Counters(t *testing.T) { } func TestPrometheus_BuildInfo(t *testing.T) { - config := pkgconfig.GetFakeConfig() - // config.Loggers.Prometheus.HistogramMetricsEnabled = true + config := pkgconfig.GetDefaultConfig() + g := NewPrometheus(config, logger.New(false), "test") mf := getMetrics(g, t) + fmt.Println(mf) if !ensureMetricValue(t, mf, "dnscollector_build_info", map[string]string{}, 1) { t.Errorf("Cannot validate build info!") } @@ -209,7 +211,7 @@ func TestPrometheus_BuildInfo(t *testing.T) { } func TestPrometheus_ConfirmDifferentResolvers(t *testing.T) { - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() config.Loggers.Prometheus.LabelsList = []string{"resolver"} g := NewPrometheus(config, logger.New(false), "test") noErrorRecord := dnsutils.GetFakeDNSMessage() @@ -226,7 +228,7 @@ func TestPrometheus_ConfirmDifferentResolvers(t *testing.T) { } func TestPrometheus_Etldplusone(t *testing.T) { - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() config.Loggers.Prometheus.LabelsList = []string{"stream_id"} g := NewPrometheus(config, logger.New(false), "test") @@ -301,7 +303,7 @@ func ensureMetricValue(t *testing.T, mf map[string]*dto.MetricFamily, name strin func getMetrics(prom *Prometheus, t *testing.T) map[string]*dto.MetricFamily { request := httptest.NewRequest(http.MethodGet, "/metrics", strings.NewReader("")) - request.SetBasicAuth(prom.config.Loggers.Prometheus.BasicAuthLogin, prom.config.Loggers.Prometheus.BasicAuthPwd) + request.SetBasicAuth(prom.GetConfig().Loggers.Prometheus.BasicAuthLogin, prom.GetConfig().Loggers.Prometheus.BasicAuthPwd) responseRecorder := httptest.NewRecorder() // call handler @@ -321,7 +323,7 @@ func getMetrics(prom *Prometheus, t *testing.T) map[string]*dto.MetricFamily { } func TestPrometheus_QnameInvalidChars(t *testing.T) { - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() // config.Loggers.Prometheus.HistogramMetricsEnabled = true g := NewPrometheus(config, logger.New(false), "test") diff --git a/workers/redispub.go b/workers/redispub.go new file mode 100644 index 00000000..b47f93d7 --- /dev/null +++ b/workers/redispub.go @@ -0,0 +1,337 @@ +package workers + +import ( + "bufio" + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "io" + "net" + "strconv" + "strings" + "time" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" +) + +type RedisPub struct { + *GenericWorker + stopRead, doneRead chan bool + textFormat []string + transport string + transportWriter *bufio.Writer + transportConn net.Conn + transportReady, transportReconnect chan bool + writerReady bool +} + +func NewRedisPub(config *pkgconfig.Config, logger *logger.Logger, name string) *RedisPub { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.RedisPub.ChannelBufferSize > 0 { + bufSize = config.Loggers.RedisPub.ChannelBufferSize + } + w := &RedisPub{GenericWorker: NewGenericWorker(config, logger, name, "redispub", bufSize, pkgconfig.DefaultMonitor)} + w.stopRead = make(chan bool) + w.doneRead = make(chan bool) + w.transportReady = make(chan bool) + w.transportReconnect = make(chan bool) + w.ReadConfig() + return w +} + +func (w *RedisPub) ReadConfig() { + + w.transport = w.GetConfig().Loggers.RedisPub.Transport + + // begin backward compatibility + if w.GetConfig().Loggers.RedisPub.TLSSupport { + w.transport = netutils.SocketTLS + } + if len(w.GetConfig().Loggers.RedisPub.SockPath) > 0 { + w.transport = netutils.SocketUnix + } + // end + + if len(w.GetConfig().Loggers.RedisPub.TextFormat) > 0 { + w.textFormat = strings.Fields(w.GetConfig().Loggers.RedisPub.TextFormat) + } else { + w.textFormat = strings.Fields(w.GetConfig().Global.TextFormat) + } +} + +func (w *RedisPub) Disconnect() { + if w.transportConn != nil { + w.LogInfo("closing redispub connection") + w.transportConn.Close() + } +} + +func (w *RedisPub) ReadFromConnection() { + buffer := make([]byte, 4096) + + go func() { + for { + _, err := w.transportConn.Read(buffer) + if err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, net.ErrClosed) { + w.LogInfo("read from connection terminated") + break + } + w.LogError("Error on reading: %s", err.Error()) + } + // We just discard the data + } + }() + + // block goroutine until receive true event in stopRead channel + <-w.stopRead + w.doneRead <- true + + w.LogInfo("read goroutine terminated") +} + +func (w *RedisPub) ConnectToRemote() { + for { + if w.transportConn != nil { + w.transportConn.Close() + w.transportConn = nil + } + + address := w.GetConfig().Loggers.RedisPub.RemoteAddress + ":" + strconv.Itoa(w.GetConfig().Loggers.RedisPub.RemotePort) + connTimeout := time.Duration(w.GetConfig().Loggers.RedisPub.ConnectTimeout) * time.Second + + var conn net.Conn + var err error + + switch w.transport { + case netutils.SocketUnix: + address = w.GetConfig().Loggers.RedisPub.RemoteAddress + if len(w.GetConfig().Loggers.RedisPub.SockPath) > 0 { + address = w.GetConfig().Loggers.RedisPub.SockPath + } + w.LogInfo("connecting to %s://%s", w.transport, address) + conn, err = net.DialTimeout(w.transport, address, connTimeout) + + case netutils.SocketTCP: + w.LogInfo("connecting to %s://%s", w.transport, address) + conn, err = net.DialTimeout(w.transport, address, connTimeout) + + case netutils.SocketTLS: + w.LogInfo("connecting to %s://%s", w.transport, address) + + var tlsConfig *tls.Config + + tlsOptions := netutils.TLSOptions{ + InsecureSkipVerify: w.GetConfig().Loggers.RedisPub.TLSInsecure, + MinVersion: w.GetConfig().Loggers.RedisPub.TLSMinVersion, + CAFile: w.GetConfig().Loggers.RedisPub.CAFile, + CertFile: w.GetConfig().Loggers.RedisPub.CertFile, + KeyFile: w.GetConfig().Loggers.RedisPub.KeyFile, + } + + tlsConfig, err = netutils.TLSClientConfig(tlsOptions) + if err == nil { + dialer := &net.Dialer{Timeout: connTimeout} + conn, err = tls.DialWithDialer(dialer, netutils.SocketTCP, address, tlsConfig) + } + + default: + w.LogFatal("logger=redispub - invalid transport:", w.transport) + } + + // something is wrong during connection ? + if err != nil { + w.LogError("%s", err) + w.LogInfo("retry to connect in %d seconds", w.GetConfig().Loggers.RedisPub.RetryInterval) + time.Sleep(time.Duration(w.GetConfig().Loggers.RedisPub.RetryInterval) * time.Second) + continue + } + + w.transportConn = conn + + // block until framestream is ready + w.transportReady <- true + + // block until an error occurred, need to reconnect + w.transportReconnect <- true + } +} + +func (w *RedisPub) FlushBuffer(buf *[]dnsutils.DNSMessage) { + // create escaping buffer + escapeBuffer := new(bytes.Buffer) + // create a new encoder that writes to the buffer + encoder := json.NewEncoder(escapeBuffer) + + for _, dm := range *buf { + escapeBuffer.Reset() + + cmd := "PUBLISH " + strconv.Quote(w.GetConfig().Loggers.RedisPub.RedisChannel) + " " + w.transportWriter.WriteString(cmd) + + if w.GetConfig().Loggers.RedisPub.Mode == pkgconfig.ModeText { + w.transportWriter.WriteString(strconv.Quote(dm.String(w.textFormat, w.GetConfig().Global.TextFormatDelimiter, w.GetConfig().Global.TextFormatBoundary))) + w.transportWriter.WriteString(w.GetConfig().Loggers.RedisPub.PayloadDelimiter) + } + + if w.GetConfig().Loggers.RedisPub.Mode == pkgconfig.ModeJSON { + encoder.Encode(dm) + w.transportWriter.WriteString(strconv.Quote(escapeBuffer.String())) + w.transportWriter.WriteString(w.GetConfig().Loggers.RedisPub.PayloadDelimiter) + } + + if w.GetConfig().Loggers.RedisPub.Mode == pkgconfig.ModeFlatJSON { + flat, err := dm.Flatten() + if err != nil { + w.LogError("flattening DNS message failed: %e", err) + continue + } + encoder.Encode(flat) + w.transportWriter.WriteString(strconv.Quote(escapeBuffer.String())) + w.transportWriter.WriteString(w.GetConfig().Loggers.RedisPub.PayloadDelimiter) + } + + // flush the transport buffer + err := w.transportWriter.Flush() + if err != nil { + w.LogError("send frame error", err.Error()) + w.writerReady = false + <-w.transportReconnect + break + } + } + + // reset buffer + *buf = nil +} + +func (w *RedisPub) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // goroutine to process transformed dns messages + go w.StartLogging() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + + w.stopRead <- true + <-w.doneRead + + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("input channel closed!") + return + } + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} + +func (w *RedisPub) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + // init buffer + bufferDm := []dnsutils.DNSMessage{} + + // init flust timer for buffer + flushInterval := time.Duration(w.GetConfig().Loggers.RedisPub.FlushInterval) * time.Second + flushTimer := time.NewTimer(flushInterval) + + // init remote conn + go w.ConnectToRemote() + + for { + select { + case <-w.OnLoggerStopped(): + // closing remote connection if exist + w.Disconnect() + return + + case <-w.transportReady: + w.LogInfo("transport connected with success") + w.transportWriter = bufio.NewWriter(w.transportConn) + w.writerReady = true + // read from the connection until we stop + go w.ReadFromConnection() + + // incoming dns message to process + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("output channel closed!") + return + } + + // drop dns message if the connection is not ready to avoid memory leak or + // to block the channel + if !w.writerReady { + continue + } + + // append dns message to buffer + bufferDm = append(bufferDm, dm) + + // buffer is full ? + if len(bufferDm) >= w.GetConfig().Loggers.RedisPub.BufferSize { + w.FlushBuffer(&bufferDm) + } + + // flush the buffer + case <-flushTimer.C: + if !w.writerReady { + bufferDm = nil + } + + if len(bufferDm) > 0 { + w.FlushBuffer(&bufferDm) + } + + // restart timer + flushTimer.Reset(flushInterval) + + } + } +} diff --git a/loggers/redispub_test.go b/workers/redispub_test.go similarity index 91% rename from loggers/redispub_test.go rename to workers/redispub_test.go index f832a524..396ddc3f 100644 --- a/loggers/redispub_test.go +++ b/workers/redispub_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" @@ -8,9 +8,9 @@ import ( "time" "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" ) func Test_RedisPubRun(t *testing.T) { @@ -34,7 +34,7 @@ func Test_RedisPubRun(t *testing.T) { for _, tc := range testcases { t.Run(tc.mode, func(t *testing.T) { // init logger - cfg := pkgconfig.GetFakeConfig() + cfg := pkgconfig.GetDefaultConfig() cfg.Loggers.RedisPub.FlushInterval = 1 cfg.Loggers.RedisPub.BufferSize = 0 cfg.Loggers.RedisPub.Mode = tc.mode @@ -43,14 +43,14 @@ func Test_RedisPubRun(t *testing.T) { g := NewRedisPub(cfg, logger.New(false), "test") // fake json receiver - fakeRcvr, err := net.Listen(netlib.SocketTCP, ":6379") + fakeRcvr, err := net.Listen(netutils.SocketTCP, ":6379") if err != nil { t.Fatal(err) } defer fakeRcvr.Close() // start the logger - go g.Run() + go g.StartCollect() // accept conn from logger conn, err := fakeRcvr.Accept() diff --git a/workers/restapi.go b/workers/restapi.go new file mode 100644 index 00000000..66d84c54 --- /dev/null +++ b/workers/restapi.go @@ -0,0 +1,724 @@ +package workers + +import ( + "crypto/tls" + "encoding/json" + "net" + "net/http" + "strconv" + "sync" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" + "github.com/dmachard/go-topmap" +) + +type HitsRecord struct { + TotalHits int `json:"total-hits"` + Hits map[string]int `json:"hits"` +} + +type SearchBy struct { + Clients map[string]*HitsRecord + Domains map[string]*HitsRecord +} + +type HitsStream struct { + Streams map[string]SearchBy +} + +type HitsUniq struct { + Clients map[string]int + Domains map[string]int + NxDomains map[string]int + SfDomains map[string]int + PublicSuffixes map[string]int + Suspicious map[string]*dnsutils.TransformSuspicious +} + +type KeyHit struct { + Key string `json:"key"` + Hit int `json:"hit"` +} + +type RestAPI struct { + *GenericWorker + doneAPI chan bool + httpserver net.Listener + httpmux *http.ServeMux + + HitsStream HitsStream + HitsUniq HitsUniq + + Streams map[string]int `json:"streams"` + + TopQnames *topmap.TopMap + TopClients *topmap.TopMap + TopTLDs *topmap.TopMap + TopNonExistent *topmap.TopMap + TopServFail *topmap.TopMap + + sync.RWMutex +} + +func NewRestAPI(config *pkgconfig.Config, logger *logger.Logger, name string) *RestAPI { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.RestAPI.ChannelBufferSize > 0 { + bufSize = config.Loggers.RestAPI.ChannelBufferSize + } + w := &RestAPI{GenericWorker: NewGenericWorker(config, logger, name, "restapi", bufSize, pkgconfig.DefaultMonitor)} + w.HitsStream = HitsStream{ + Streams: make(map[string]SearchBy), + } + w.HitsUniq = HitsUniq{ + Clients: make(map[string]int), + Domains: make(map[string]int), + NxDomains: make(map[string]int), + SfDomains: make(map[string]int), + PublicSuffixes: make(map[string]int), + Suspicious: make(map[string]*dnsutils.TransformSuspicious), + } + w.Streams = make(map[string]int) + w.TopQnames = topmap.NewTopMap(config.Loggers.RestAPI.TopN) + w.TopClients = topmap.NewTopMap(config.Loggers.RestAPI.TopN) + w.TopTLDs = topmap.NewTopMap(config.Loggers.RestAPI.TopN) + w.TopNonExistent = topmap.NewTopMap(config.Loggers.RestAPI.TopN) + w.TopServFail = topmap.NewTopMap(config.Loggers.RestAPI.TopN) + return w +} + +func (w *RestAPI) ReadConfig() { + if !netutils.IsValidTLS(w.GetConfig().Loggers.RestAPI.TLSMinVersion) { + w.LogFatal(pkgconfig.PrefixLogWorker + "[" + w.GetName() + "]restapi - invalid tls min version") + } +} + +func (w *RestAPI) BasicAuth(httpWriter http.ResponseWriter, r *http.Request) bool { + login, password, authOK := r.BasicAuth() + if !authOK { + return false + } + + return (login == w.GetConfig().Loggers.RestAPI.BasicAuthLogin) && + (password == w.GetConfig().Loggers.RestAPI.BasicAuthPwd) +} + +func (w *RestAPI) DeleteResetHandler(httpWriter http.ResponseWriter, r *http.Request) { + w.RLock() + defer w.RUnlock() + + if !w.BasicAuth(httpWriter, r) { + http.Error(httpWriter, "Not authorized", http.StatusUnauthorized) + return + } + + switch r.Method { + case http.MethodDelete: + + w.HitsUniq.Clients = make(map[string]int) + w.HitsUniq.Domains = make(map[string]int) + w.HitsUniq.NxDomains = make(map[string]int) + w.HitsUniq.SfDomains = make(map[string]int) + w.HitsUniq.PublicSuffixes = make(map[string]int) + w.HitsUniq.Suspicious = make(map[string]*dnsutils.TransformSuspicious) + + w.Streams = make(map[string]int) + + w.TopQnames = topmap.NewTopMap(w.GetConfig().Loggers.RestAPI.TopN) + w.TopClients = topmap.NewTopMap(w.GetConfig().Loggers.RestAPI.TopN) + w.TopTLDs = topmap.NewTopMap(w.GetConfig().Loggers.RestAPI.TopN) + w.TopNonExistent = topmap.NewTopMap(w.GetConfig().Loggers.RestAPI.TopN) + w.TopServFail = topmap.NewTopMap(w.GetConfig().Loggers.RestAPI.TopN) + + w.HitsStream.Streams = make(map[string]SearchBy) + + httpWriter.Header().Set("Content-Type", "application/text") + httpWriter.Write([]byte("OK")) + default: + http.Error(httpWriter, "Method not allowed", http.StatusMethodNotAllowed) + } +} + +func (w *RestAPI) GetTopTLDsHandler(httpWriter http.ResponseWriter, r *http.Request) { + w.RLock() + defer w.RUnlock() + + if !w.BasicAuth(httpWriter, r) { + http.Error(httpWriter, "Not authorized", http.StatusUnauthorized) + return + } + + httpWriter.Header().Set("Content-Type", "application/json") + + switch r.Method { + case http.MethodGet: + json.NewEncoder(httpWriter).Encode(w.TopTLDs.Get()) + default: + http.Error(httpWriter, "Method not allowed", http.StatusMethodNotAllowed) + } +} + +func (w *RestAPI) GetTopClientsHandler(httpWriter http.ResponseWriter, r *http.Request) { + w.RLock() + defer w.RUnlock() + + if !w.BasicAuth(httpWriter, r) { + http.Error(httpWriter, "Not authorized", http.StatusUnauthorized) + return + } + + httpWriter.Header().Set("Content-Type", "application/json") + + switch r.Method { + case http.MethodGet: + json.NewEncoder(httpWriter).Encode(w.TopClients.Get()) + default: + http.Error(httpWriter, "Method not allowed", http.StatusMethodNotAllowed) + } +} + +func (w *RestAPI) GetTopDomainsHandler(httpWriter http.ResponseWriter, r *http.Request) { + w.RLock() + defer w.RUnlock() + + if !w.BasicAuth(httpWriter, r) { + http.Error(httpWriter, "Not authorized", http.StatusUnauthorized) + return + } + + httpWriter.Header().Set("Content-Type", "application/json") + + switch r.Method { + case http.MethodGet: + json.NewEncoder(httpWriter).Encode(w.TopQnames.Get()) + default: + http.Error(httpWriter, "Method not allowed", http.StatusMethodNotAllowed) + } +} + +func (w *RestAPI) GetTopNxDomainsHandler(httpWriter http.ResponseWriter, r *http.Request) { + w.RLock() + defer w.RUnlock() + + if !w.BasicAuth(httpWriter, r) { + http.Error(httpWriter, "Not authorized", http.StatusUnauthorized) + return + } + + httpWriter.Header().Set("Content-Type", "application/json") + + switch r.Method { + case http.MethodGet: + json.NewEncoder(httpWriter).Encode(w.TopNonExistent.Get()) + default: + http.Error(httpWriter, "Method not allowed", http.StatusMethodNotAllowed) + } +} + +func (w *RestAPI) GetTopSfDomainsHandler(httpWriter http.ResponseWriter, r *http.Request) { + w.RLock() + defer w.RUnlock() + + if !w.BasicAuth(httpWriter, r) { + http.Error(httpWriter, "Not authorized", http.StatusUnauthorized) + return + } + + httpWriter.Header().Set("Content-Type", "application/json") + + switch r.Method { + case http.MethodGet: + json.NewEncoder(httpWriter).Encode(w.TopServFail.Get()) + default: + http.Error(httpWriter, "Method not allowed", http.StatusMethodNotAllowed) + } +} + +func (w *RestAPI) GetTLDsHandler(httpWriter http.ResponseWriter, r *http.Request) { + w.RLock() + defer w.RUnlock() + + if !w.BasicAuth(httpWriter, r) { + http.Error(httpWriter, "Not authorized", http.StatusUnauthorized) + return + } + + httpWriter.Header().Set("Content-Type", "application/json") + + switch r.Method { + case http.MethodGet: + // return as array + dataArray := []KeyHit{} + for tld, hit := range w.HitsUniq.PublicSuffixes { + dataArray = append(dataArray, KeyHit{Key: tld, Hit: hit}) + } + + // encode + json.NewEncoder(httpWriter).Encode(dataArray) + default: + http.Error(httpWriter, "Method not allowed", http.StatusMethodNotAllowed) + } +} + +func (w *RestAPI) GetClientsHandler(httpWriter http.ResponseWriter, r *http.Request) { + w.RLock() + defer w.RUnlock() + + if !w.BasicAuth(httpWriter, r) { + http.Error(httpWriter, "Not authorized", http.StatusUnauthorized) + return + } + + httpWriter.Header().Set("Content-Type", "application/json") + + switch r.Method { + case http.MethodGet: + // return as array + dataArray := []KeyHit{} + for address, hit := range w.HitsUniq.Clients { + dataArray = append(dataArray, KeyHit{Key: address, Hit: hit}) + } + // encode + json.NewEncoder(httpWriter).Encode(dataArray) + default: + http.Error(httpWriter, "Method not allowed", http.StatusMethodNotAllowed) + } +} + +func (w *RestAPI) GetDomainsHandler(httpWriter http.ResponseWriter, r *http.Request) { + w.RLock() + defer w.RUnlock() + + if !w.BasicAuth(httpWriter, r) { + http.Error(httpWriter, "Not authorized", http.StatusUnauthorized) + return + } + + httpWriter.Header().Set("Content-Type", "application/json") + + switch r.Method { + case http.MethodGet: + // return as array + dataArray := []KeyHit{} + for domain, hit := range w.HitsUniq.Domains { + dataArray = append(dataArray, KeyHit{Key: domain, Hit: hit}) + } + + // encode + json.NewEncoder(httpWriter).Encode(dataArray) + default: + http.Error(httpWriter, "Method not allowed", http.StatusMethodNotAllowed) + } +} + +func (w *RestAPI) GetNxDomainsHandler(httpWriter http.ResponseWriter, r *http.Request) { + w.RLock() + defer w.RUnlock() + + if !w.BasicAuth(httpWriter, r) { + http.Error(httpWriter, "Not authorized", http.StatusUnauthorized) + return + } + + httpWriter.Header().Set("Content-Type", "application/json") + + switch r.Method { + case http.MethodGet: + // convert to array + dataArray := []KeyHit{} + for domain, hit := range w.HitsUniq.NxDomains { + dataArray = append(dataArray, KeyHit{Key: domain, Hit: hit}) + } + + // encode + json.NewEncoder(httpWriter).Encode(dataArray) + + default: + http.Error(httpWriter, "Method not allowed", http.StatusMethodNotAllowed) + } +} + +func (w *RestAPI) GetSfDomainsHandler(httpWriter http.ResponseWriter, r *http.Request) { + w.RLock() + defer w.RUnlock() + + if !w.BasicAuth(httpWriter, r) { + http.Error(httpWriter, "Not authorized", http.StatusUnauthorized) + return + } + + httpWriter.Header().Set("Content-Type", "application/json") + + switch r.Method { + case http.MethodGet: + // return as array + dataArray := []KeyHit{} + for domain, hit := range w.HitsUniq.SfDomains { + dataArray = append(dataArray, KeyHit{Key: domain, Hit: hit}) + } + + // encode + json.NewEncoder(httpWriter).Encode(dataArray) + default: + http.Error(httpWriter, "Method not allowed", http.StatusMethodNotAllowed) + } +} + +func (w *RestAPI) GetSuspiciousHandler(httpWriter http.ResponseWriter, r *http.Request) { + w.RLock() + defer w.RUnlock() + + if !w.BasicAuth(httpWriter, r) { + http.Error(httpWriter, "Not authorized", http.StatusUnauthorized) + return + } + + httpWriter.Header().Set("Content-Type", "application/json") + + switch r.Method { + case http.MethodGet: + // return as array + dataArray := []*dnsutils.TransformSuspicious{} + for domain, suspicious := range w.HitsUniq.Suspicious { + suspicious.Domain = domain + dataArray = append(dataArray, suspicious) + } + + // encode + json.NewEncoder(httpWriter).Encode(dataArray) + default: + http.Error(httpWriter, "Method not allowed", http.StatusMethodNotAllowed) + } +} + +func (w *RestAPI) GetSearchHandler(httpWriter http.ResponseWriter, r *http.Request) { + w.RLock() + defer w.RUnlock() + + if !w.BasicAuth(httpWriter, r) { + http.Error(httpWriter, "Not authorized", http.StatusUnauthorized) + return + } + + switch r.Method { + case http.MethodGet: + + filter := r.URL.Query()["filter"] + if len(filter) == 0 { + http.Error(httpWriter, "Arguments are missing", http.StatusBadRequest) + } + + dataArray := []KeyHit{} + + // search by IP + for _, search := range w.HitsStream.Streams { + userHits, clientExists := search.Clients[filter[0]] + if clientExists { + for domain, hit := range userHits.Hits { + dataArray = append(dataArray, KeyHit{Key: domain, Hit: hit}) + } + } + } + + // search by domain + if len(dataArray) == 0 { + for _, search := range w.HitsStream.Streams { + domainHists, domainExists := search.Domains[filter[0]] + if domainExists { + for addr, hit := range domainHists.Hits { + dataArray = append(dataArray, KeyHit{Key: addr, Hit: hit}) + } + } + } + } + + // encode to json + httpWriter.Header().Set("Content-Type", "application/json") + json.NewEncoder(httpWriter).Encode(dataArray) + + default: + http.Error(httpWriter, "{\"error\": \"Method not allowed\"}", http.StatusMethodNotAllowed) + } +} + +func (w *RestAPI) GetStreamsHandler(httpWriter http.ResponseWriter, r *http.Request) { + w.RLock() + defer w.RUnlock() + + if !w.BasicAuth(httpWriter, r) { + http.Error(httpWriter, "Not authorized", http.StatusUnauthorized) + return + } + + httpWriter.Header().Set("Content-Type", "application/json") + + switch r.Method { + case http.MethodGet: + + dataArray := []KeyHit{} + for stream, hit := range w.Streams { + dataArray = append(dataArray, KeyHit{Key: stream, Hit: hit}) + } + + json.NewEncoder(httpWriter).Encode(dataArray) + default: + http.Error(httpWriter, "Method not allowed", http.StatusMethodNotAllowed) + } +} + +func (w *RestAPI) RecordDNSMessage(dm dnsutils.DNSMessage) { + w.Lock() + defer w.Unlock() + + if _, exists := w.Streams[dm.DNSTap.Identity]; !exists { + w.Streams[dm.DNSTap.Identity] = 1 + } else { + w.Streams[dm.DNSTap.Identity] += 1 + } + + // record suspicious domains only is enabled + if dm.Suspicious != nil { + if dm.Suspicious.Score > 0.0 { + if _, exists := w.HitsUniq.Suspicious[dm.DNS.Qname]; !exists { + w.HitsUniq.Suspicious[dm.DNS.Qname] = dm.Suspicious + } + } + } + + // uniq record for tld + // record public suffix only if enabled + if dm.PublicSuffix != nil { + if dm.PublicSuffix.QnamePublicSuffix != "-" { + if _, ok := w.HitsUniq.PublicSuffixes[dm.PublicSuffix.QnamePublicSuffix]; !ok { + w.HitsUniq.PublicSuffixes[dm.PublicSuffix.QnamePublicSuffix] = 1 + } else { + w.HitsUniq.PublicSuffixes[dm.PublicSuffix.QnamePublicSuffix]++ + } + } + } + + // uniq record for domains + if _, exists := w.HitsUniq.Domains[dm.DNS.Qname]; !exists { + w.HitsUniq.Domains[dm.DNS.Qname] = 1 + } else { + w.HitsUniq.Domains[dm.DNS.Qname] += 1 + } + + if dm.DNS.Rcode == dnsutils.DNSRcodeNXDomain { + if _, exists := w.HitsUniq.NxDomains[dm.DNS.Qname]; !exists { + w.HitsUniq.NxDomains[dm.DNS.Qname] = 1 + } else { + w.HitsUniq.NxDomains[dm.DNS.Qname] += 1 + } + } + + if dm.DNS.Rcode == dnsutils.DNSRcodeServFail { + if _, exists := w.HitsUniq.SfDomains[dm.DNS.Qname]; !exists { + w.HitsUniq.SfDomains[dm.DNS.Qname] = 1 + } else { + w.HitsUniq.SfDomains[dm.DNS.Qname] += 1 + } + } + + // uniq record for queries + if _, exists := w.HitsUniq.Clients[dm.NetworkInfo.QueryIP]; !exists { + w.HitsUniq.Clients[dm.NetworkInfo.QueryIP] = 1 + } else { + w.HitsUniq.Clients[dm.NetworkInfo.QueryIP] += 1 + } + + // uniq top qnames and clients + w.TopQnames.Record(dm.DNS.Qname, w.HitsUniq.Domains[dm.DNS.Qname]) + w.TopClients.Record(dm.NetworkInfo.QueryIP, w.HitsUniq.Clients[dm.NetworkInfo.QueryIP]) + if dm.PublicSuffix != nil { + w.TopTLDs.Record(dm.PublicSuffix.QnamePublicSuffix, w.HitsUniq.PublicSuffixes[dm.PublicSuffix.QnamePublicSuffix]) + } + if dm.DNS.Rcode == dnsutils.DNSRcodeNXDomain { + w.TopNonExistent.Record(dm.DNS.Qname, w.HitsUniq.NxDomains[dm.DNS.Qname]) + } + if dm.DNS.Rcode == dnsutils.DNSRcodeServFail { + w.TopServFail.Record(dm.DNS.Qname, w.HitsUniq.SfDomains[dm.DNS.Qname]) + } + + // record dns message per client source ip and domain + if _, exists := w.HitsStream.Streams[dm.DNSTap.Identity]; !exists { + w.HitsStream.Streams[dm.DNSTap.Identity] = SearchBy{Clients: make(map[string]*HitsRecord), + Domains: make(map[string]*HitsRecord)} + } + + // continue with the query IP + if _, exists := w.HitsStream.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP]; !exists { + w.HitsStream.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP] = &HitsRecord{Hits: make(map[string]int), TotalHits: 1} + } else { + w.HitsStream.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP].TotalHits += 1 + } + + // continue with Qname + if _, exists := w.HitsStream.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP].Hits[dm.DNS.Qname]; !exists { + w.HitsStream.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP].Hits[dm.DNS.Qname] = 1 + } else { + w.HitsStream.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP].Hits[dm.DNS.Qname] += 1 + } + + // domain doesn't exists in domains map? + if _, exists := w.HitsStream.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname]; !exists { + w.HitsStream.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname] = &HitsRecord{Hits: make(map[string]int), TotalHits: 1} + } else { + w.HitsStream.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname].TotalHits += 1 + } + + // domain doesn't exists in domains map? + if _, exists := w.HitsStream.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname].Hits[dm.NetworkInfo.QueryIP]; !exists { + w.HitsStream.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname].Hits[dm.NetworkInfo.QueryIP] = 1 + } else { + w.HitsStream.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname].Hits[dm.NetworkInfo.QueryIP] += 1 + } +} + +func (w *RestAPI) ListenAndServe() { + w.LogInfo("starting server...") + + mux := http.NewServeMux() + mux.HandleFunc("/tlds", w.GetTLDsHandler) + mux.HandleFunc("/tlds/top", w.GetTopTLDsHandler) + mux.HandleFunc("/streams", w.GetStreamsHandler) + mux.HandleFunc("/clients", w.GetClientsHandler) + mux.HandleFunc("/clients/top", w.GetTopClientsHandler) + mux.HandleFunc("/domains", w.GetDomainsHandler) + mux.HandleFunc("/domains/servfail", w.GetSfDomainsHandler) + mux.HandleFunc("/domains/top", w.GetTopDomainsHandler) + mux.HandleFunc("/domains/nx/top", w.GetTopNxDomainsHandler) + mux.HandleFunc("/domains/servfail/top", w.GetTopSfDomainsHandler) + mux.HandleFunc("/suspicious", w.GetSuspiciousHandler) + mux.HandleFunc("/search", w.GetSearchHandler) + mux.HandleFunc("/reset", w.DeleteResetHandler) + + var err error + var listener net.Listener + addrlisten := w.GetConfig().Loggers.RestAPI.ListenIP + ":" + strconv.Itoa(w.GetConfig().Loggers.RestAPI.ListenPort) + + // listening with tls enabled ? + if w.GetConfig().Loggers.RestAPI.TLSSupport { + w.LogInfo("tls support enabled") + var cer tls.Certificate + cer, err = tls.LoadX509KeyPair(w.GetConfig().Loggers.RestAPI.CertFile, w.GetConfig().Loggers.RestAPI.KeyFile) + if err != nil { + w.LogFatal("loading certificate failed:", err) + } + + // prepare tls configuration + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cer}, + MinVersion: tls.VersionTLS12, + } + + // update tls min version according to the user config + tlsConfig.MinVersion = netutils.TLSVersion[w.GetConfig().Loggers.RestAPI.TLSMinVersion] + + listener, err = tls.Listen(netutils.SocketTCP, addrlisten, tlsConfig) + + } else { + // basic listening + listener, err = net.Listen(netutils.SocketTCP, addrlisten) + } + + // something wrong ? + if err != nil { + w.LogFatal("listening failed:", err) + } + + w.httpserver = listener + w.httpmux = mux + w.LogInfo("is listening on %s", listener.Addr()) + + http.Serve(w.httpserver, w.httpmux) + + w.LogInfo("http server terminated") + w.doneAPI <- true +} + +func (w *RestAPI) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // start http server + go w.ListenAndServe() + + // goroutine to process transformed dns messages + go w.StartLogging() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + + w.httpserver.Close() + <-w.doneAPI + + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("input channel closed!") + return + } + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} + +func (w *RestAPI) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + for { + select { + case <-w.OnLoggerStopped(): + return + + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("output channel closed!") + return + } + // record the dnstap message + w.RecordDNSMessage(dm) + } + } +} diff --git a/loggers/restapi_test.go b/workers/restapi_test.go similarity index 98% rename from loggers/restapi_test.go rename to workers/restapi_test.go index 1a472ce4..e3a7f720 100644 --- a/loggers/restapi_test.go +++ b/workers/restapi_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "net/http" @@ -14,7 +14,7 @@ import ( func TestRestAPI_BadBasicAuth(t *testing.T) { // init the logger - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() g := NewRestAPI(config, logger.New(false), "test") tt := []struct { @@ -60,7 +60,7 @@ func TestRestAPI_BadBasicAuth(t *testing.T) { func TestRestAPI_MethodNotAllowed(t *testing.T) { // init the logger - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() g := NewRestAPI(config, logger.New(false), "test") // record one dns message to simulate some incoming data @@ -150,7 +150,7 @@ func TestRestAPI_MethodNotAllowed(t *testing.T) { func TestRestAPI_GetMethod(t *testing.T) { // init the logger - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() g := NewRestAPI(config, logger.New(false), "test") tt := []struct { diff --git a/workers/scalyr.go b/workers/scalyr.go new file mode 100644 index 00000000..30410b6c --- /dev/null +++ b/workers/scalyr.go @@ -0,0 +1,399 @@ +package workers + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/google/uuid" + "github.com/grafana/dskit/backoff" + + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" +) + +// ScalyrClient is a client for Scalyr(https://www.dataset.com/) +// This client is using the addEvents endpoint, described here: https://app.scalyr.com/help/api#addEvents +type ScalyrClient struct { + *GenericWorker + + mode string + textFormat []string + + session string // Session ID, used by scalyr, see API docs + + httpclient *http.Client + endpoint string // Where to send the data + apikey string // API Token to use for authorizing requests + parser string // Parser used by Scalyr + flush *time.Ticker // Timer that allows us to flush events periodically + + submissions chan []byte // Marshalled JSON to send to Scalyr + submitterDone chan bool // Will be written to when the HTTP submitter is done +} + +func NewScalyrClient(config *pkgconfig.Config, console *logger.Logger, name string) *ScalyrClient { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.ScalyrClient.ChannelBufferSize > 0 { + bufSize = config.Loggers.ScalyrClient.ChannelBufferSize + } + w := &ScalyrClient{GenericWorker: NewGenericWorker(config, console, name, "scalyr", bufSize, pkgconfig.DefaultMonitor)} + w.mode = pkgconfig.ModeText + w.endpoint = makeEndpoint("app.scalyr.com") + w.flush = time.NewTicker(30 * time.Second) + w.session = uuid.NewString() + w.submissions = make(chan []byte, 25) + w.submitterDone = make(chan bool) + w.ReadConfig() + return w +} + +func makeEndpoint(host string) string { + return fmt.Sprintf("https://%s/api/addEvents", host) +} + +func (w *ScalyrClient) ReadConfig() { + if len(w.GetConfig().Loggers.ScalyrClient.APIKey) == 0 { + w.LogFatal("No API Key configured for Scalyr Client") + } + w.apikey = w.GetConfig().Loggers.ScalyrClient.APIKey + + if len(w.GetConfig().Loggers.ScalyrClient.Mode) != 0 { + w.mode = w.GetConfig().Loggers.ScalyrClient.Mode + } + + if len(w.GetConfig().Loggers.ScalyrClient.Parser) == 0 && (w.mode == pkgconfig.ModeText || w.mode == pkgconfig.ModeJSON) { + w.LogFatal(fmt.Sprintf("No Scalyr parser configured for Scalyr Client in %s mode", w.mode)) + } + w.parser = w.GetConfig().Loggers.ScalyrClient.Parser + + if len(w.GetConfig().Loggers.ScalyrClient.TextFormat) > 0 { + w.textFormat = strings.Fields(w.GetConfig().Loggers.ScalyrClient.TextFormat) + } else { + w.textFormat = strings.Fields(w.GetConfig().Global.TextFormat) + } + + if host := w.GetConfig().Loggers.ScalyrClient.ServerURL; host != "" { + w.endpoint = makeEndpoint(host) + } + + if flushInterval := w.GetConfig().Loggers.ScalyrClient.FlushInterval; flushInterval != 0 { + w.flush = time.NewTicker(time.Duration(flushInterval) * time.Second) + } + + // tls client config + tlsOptions := netutils.TLSOptions{ + InsecureSkipVerify: w.GetConfig().Loggers.ScalyrClient.TLSInsecure, + MinVersion: w.GetConfig().Loggers.ScalyrClient.TLSMinVersion, + CAFile: w.GetConfig().Loggers.ScalyrClient.CAFile, + CertFile: w.GetConfig().Loggers.ScalyrClient.CertFile, + KeyFile: w.GetConfig().Loggers.ScalyrClient.KeyFile, + } + + tlsConfig, err := netutils.TLSClientConfig(tlsOptions) + if err != nil { + w.LogFatal("unable to parse tls confgi: ", err) + } + + // prepare http client + tr := &http.Transport{ + MaxIdleConns: 10, + IdleConnTimeout: 30 * time.Second, + DisableCompression: false, + TLSClientConfig: tlsConfig, + } + + // use proxy + if len(w.GetConfig().Loggers.ScalyrClient.ProxyURL) > 0 { + proxyURL, err := url.Parse(w.GetConfig().Loggers.ScalyrClient.ProxyURL) + if err != nil { + w.LogFatal("unable to parse proxy url: ", err) + } + tr.Proxy = http.ProxyURL(proxyURL) + } + + w.httpclient = &http.Client{Transport: tr} +} + +func (w *ScalyrClient) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // goroutine to process transformed dns messages + go w.StartLogging() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("input channel closed!") + return + } + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} + +func (w *ScalyrClient) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + sInfo := w.GetConfig().Loggers.ScalyrClient.SessionInfo + if sInfo == nil { + sInfo = make(map[string]string) + } + attrs := make(map[string]interface{}) + for k, v := range w.GetConfig().Loggers.ScalyrClient.Attrs { + attrs[k] = v + } + if len(w.parser) != 0 { + attrs["parser"] = w.parser + } + var events []event + + if host, ok := sInfo["serverHost"]; !ok || len(host) == 0 { + hostname, err := os.Hostname() + if err != nil { + hostname = "unknown-hostname" + } + sInfo["serverHost"] = hostname + } + + w.runSubmitter() + + for { + select { + case <-w.OnLoggerStopped(): + + if len(events) > 0 { + w.submitEventRecord(sInfo, events) + } + close(w.submissions) + + // Block until both threads are done + <-w.submitterDone + + return + + // incoming dns message to process + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("output channel closed!") + return + } + + switch w.mode { + case pkgconfig.ModeText: + attrs["message"] = string(dm.Bytes(w.textFormat, + w.GetConfig().Global.TextFormatDelimiter, + w.GetConfig().Global.TextFormatBoundary)) + case pkgconfig.ModeJSON: + attrs["message"] = dm + case pkgconfig.ModeFlatJSON: + var err error + if attrs, err = dm.Flatten(); err != nil { + w.LogError("unable to flatten: %e", err) + break + } + // Add user's attrs without overwriting flattened ones + for k, v := range w.GetConfig().Loggers.ScalyrClient.Attrs { + if _, ok := attrs[k]; !ok { + attrs[k] = v + } + } + } + events = append(events, event{ + TS: strconv.FormatInt(time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)).UnixNano(), 10), + Sev: SeverityInfo, + Attrs: attrs, + }) + if len(events) >= 400 { + // Maximum size of a POST is 6MB. 400 events would mean that each dnstap entry + // can be a little over 15 kB in JSON, which should be plenty. + w.submitEventRecord(sInfo, events) + events = []event{} + } + case <-w.flush.C: + if len(events) > 0 { + w.submitEventRecord(sInfo, events) + events = []event{} + } + } + } +} + +func (w *ScalyrClient) submitEventRecord(sessionInfo map[string]string, events []event) { + er := eventRecord{ + Session: w.session, + SessionInfo: sessionInfo, + Events: events, + } + buf, err := json.Marshal(er) + if err != nil { + // TODO should this panic? + w.LogError("Unable to create JSON from events: %e", err) + } + w.submissions <- buf +} + +func (w *ScalyrClient) runSubmitter() { + go func() { + for m := range w.submissions { + w.send(m) + } + w.submitterDone <- true + }() + w.LogInfo("HTTP Submitter started") +} + +func (w *ScalyrClient) send(buf []byte) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + MinBackoff := 500 * time.Millisecond + MaxBackoff := 5 * time.Minute + MaxRetries := 10 + + backoff := backoff.New(ctx, backoff.Config{ + MaxBackoff: MaxBackoff, + MaxRetries: MaxRetries, + MinBackoff: MinBackoff, + }) + + for { + post, err := http.NewRequest("POST", w.endpoint, bytes.NewReader(buf)) + if err != nil { + w.LogError("new http error: %s", err) + return + } + post = post.WithContext(ctx) + post.Header.Set("Content-Type", "application/json") + post.Header.Set("User-Agent", "dnscollector") + post.Header.Set("Authorization", fmt.Sprintf("Bearer %s", w.apikey)) + + // send post and read response + resp, err := w.httpclient.Do(post) + if err != nil { + w.LogError("do http error: %s", err) + return + } + + // success ? + if resp.StatusCode > 0 && resp.StatusCode != 429 && resp.StatusCode/100 != 5 { + break + } + + // something is wrong, retry ? + if resp.StatusCode/100 != 2 { + response, err := parseServerResponse(resp.Body) + if err != nil { + w.LogError("server returned HTTP status %s (%d), unable to decode response: %e", resp.Status, resp.StatusCode, err) + } else { + w.LogError("server returned HTTP status %s (%d), %s", resp.Status, resp.StatusCode, response.Message) + } + } + + // wait before retry + backoff.Wait() + + // Make sure it sends at least once before checking for retry. + if !backoff.Ongoing() { + break + } + } +} + +func parseServerResponse(body io.ReadCloser) (response, error) { + var response response + b, err := io.ReadAll(body) + if err != nil { + return response, err + } + err = json.Unmarshal(b, &response) + return response, err +} + +// Models +type scalyrSeverity uint + +const ( + SeverityFinest scalyrSeverity = iota + SeverityFiner + SeverityFine + SeverityInfo + SeverityWarning + SeverityError + SeverityFatal +) + +type event struct { + Thread string `json:"thread,omitempty"` + TS string `json:"ts"` + Sev scalyrSeverity `json:"sev,omitempty"` + Attrs map[string]interface{} `json:"attrs"` +} + +type thread struct { + ID string `json:"id"` + Name string `json:"name"` +} + +type eventRecord struct { + Token string `json:"token,omitempty"` + Session string `json:"session"` + SessionInfo map[string]string `json:"sessionInfo"` + Events []event `json:"events"` + Threads []thread `json:"threads,omitempty"` +} + +type response struct { + Status string `json:"status"` + Message string `json:"message"` +} diff --git a/workers/sniffer_afpacket.go b/workers/sniffer_afpacket.go new file mode 100644 index 00000000..58d298f7 --- /dev/null +++ b/workers/sniffer_afpacket.go @@ -0,0 +1,29 @@ +//go:build windows || darwin || freebsd +// +build windows darwin freebsd + +package workers + +import ( + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" +) + +type AfpacketSniffer struct { + *GenericWorker +} + +func NewAfpacketSniffer(next []Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *AfpacketSniffer { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Collectors.AfpacketLiveCapture.ChannelBufferSize > 0 { + bufSize = config.Collectors.AfpacketLiveCapture.ChannelBufferSize + } + w := &AfpacketSniffer{GenericWorker: NewGenericWorker(config, logger, name, "AFPACKET sniffer", bufSize, pkgconfig.DefaultMonitor)} + w.SetDefaultRoutes(next) + w.ReadConfig() + return w +} + +func (w *AfpacketSniffer) StartCollect() { + w.LogError("running collector failed...OS not supported!") + defer w.CollectDone() +} diff --git a/workers/sniffer_afpacket_linux.go b/workers/sniffer_afpacket_linux.go new file mode 100644 index 00000000..806479c1 --- /dev/null +++ b/workers/sniffer_afpacket_linux.go @@ -0,0 +1,284 @@ +//go:build linux +// +build linux + +package workers + +import ( + "context" + "encoding/binary" + "errors" + "net" + "os" + "syscall" + "time" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" + "github.com/google/gopacket" + "github.com/google/gopacket/layers" +) + +type AfpacketSniffer struct { + *GenericWorker + fd int +} + +func NewAfpacketSniffer(next []Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *AfpacketSniffer { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Collectors.AfpacketLiveCapture.ChannelBufferSize > 0 { + bufSize = config.Collectors.AfpacketLiveCapture.ChannelBufferSize + } + w := &AfpacketSniffer{GenericWorker: NewGenericWorker(config, logger, name, "afpacket sniffer", bufSize, pkgconfig.DefaultMonitor)} + w.SetDefaultRoutes(next) + return w +} + +func (w *AfpacketSniffer) Listen() error { + // raw socket + fd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, netutils.Htons(syscall.ETH_P_ALL)) + if err != nil { + return err + } + + // bind to device ? + if w.GetConfig().Collectors.AfpacketLiveCapture.Device != "" { + iface, err := net.InterfaceByName(w.GetConfig().Collectors.AfpacketLiveCapture.Device) + if err != nil { + return err + } + + ll := syscall.SockaddrLinklayer{ + Ifindex: iface.Index, + } + + if err := syscall.Bind(fd, &ll); err != nil { + return err + } + + w.LogInfo("binding with success to iface %q (index %d)", iface.Name, iface.Index) + } + + // set nano timestamp + err = syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_TIMESTAMPNS, 1) + if err != nil { + return err + } + + filter := netutils.GetBpfFilterPort(w.GetConfig().Collectors.AfpacketLiveCapture.Port) + err = netutils.ApplyBpfFilter(filter, fd) + if err != nil { + return err + } + + w.LogInfo("BPF filter applied") + + w.fd = fd + return nil +} + +func (w *AfpacketSniffer) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + if w.fd == 0 { + if err := w.Listen(); err != nil { + w.LogError("init raw socket failed: %v\n", err) + os.Exit(1) // nolint + } + } + + bufSize := w.GetConfig().Global.Worker.ChannelBufferSize + if w.GetConfig().Collectors.AfpacketLiveCapture.ChannelBufferSize > 0 { + bufSize = w.GetConfig().Collectors.AfpacketLiveCapture.ChannelBufferSize + } + dnsProcessor := NewDNSProcessor(w.GetConfig(), w.GetLogger(), w.GetName(), bufSize) + dnsProcessor.SetDefaultRoutes(w.GetDefaultRoutes()) + dnsProcessor.SetDefaultDropped(w.GetDroppedRoutes()) + go dnsProcessor.StartCollect() + + dnsChan := make(chan netutils.DNSPacket) + udpChan := make(chan gopacket.Packet) + tcpChan := make(chan gopacket.Packet) + fragIP4Chan := make(chan gopacket.Packet) + fragIP6Chan := make(chan gopacket.Packet) + + netDecoder := &netutils.NetDecoder{} + + // defrag ipv4 + go netutils.IPDefragger(fragIP4Chan, udpChan, tcpChan, w.GetConfig().Collectors.AfpacketLiveCapture.Port) + // defrag ipv6 + go netutils.IPDefragger(fragIP6Chan, udpChan, tcpChan, w.GetConfig().Collectors.AfpacketLiveCapture.Port) + // tcp assembly + go netutils.TCPAssembler(tcpChan, dnsChan, 0) + // udp processor + go netutils.UDPProcessor(udpChan, dnsChan, 0) + + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + go func(ctx context.Context) { + defer func() { + dnsProcessor.Stop() + netutils.RemoveBpfFilter(w.fd) + syscall.Close(w.fd) + w.LogInfo("read data terminated") + defer close(done) + }() + + buf := make([]byte, 65536) + oob := make([]byte, 100) + + for { + select { + case <-ctx.Done(): + w.LogInfo("stopping sniffer...") + syscall.Close(w.fd) + return + default: + var fdSet syscall.FdSet + fdSet.Bits[w.fd/64] |= 1 << (uint(w.fd) % 64) + + nReady, err := syscall.Select(w.fd+1, &fdSet, nil, nil, &syscall.Timeval{Sec: 1, Usec: 0}) + if err != nil { + if errors.Is(err, syscall.EINTR) { + continue + } + panic(err) + } + if nReady == 0 { + continue + } + + bufN, oobn, _, _, err := syscall.Recvmsg(w.fd, buf, oob, syscall.MSG_DONTWAIT) + if err != nil { + if errors.Is(err, syscall.EINTR) || errors.Is(err, syscall.EAGAIN) || errors.Is(err, syscall.EWOULDBLOCK) { + continue + } else { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"read data", err) + } + } + if bufN == 0 { + w.LogFatal(pkgconfig.PrefixLogWorker + "[" + w.GetName() + "] buf empty") + } + if bufN > len(buf) { + w.LogFatal(pkgconfig.PrefixLogWorker + "[" + w.GetName() + "] buf overflow") + } + if oobn == 0 { + w.LogFatal(pkgconfig.PrefixLogWorker + "[" + w.GetName() + "] oob missing") + } + + scms, err := syscall.ParseSocketControlMessage(oob[:oobn]) + if err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] control msg", err) + } + if len(scms) != 1 { + continue + } + scm := scms[0] + if scm.Header.Type != syscall.SCM_TIMESTAMPNS { + w.LogFatal(pkgconfig.PrefixLogWorker + "[" + w.GetName() + "] scm timestampns missing") + } + tsec := binary.LittleEndian.Uint32(scm.Data[:4]) + nsec := binary.LittleEndian.Uint32(scm.Data[8:12]) + timestamp := time.Unix(int64(tsec), int64(nsec)) + + // copy packet data from buffer + pkt := make([]byte, bufN) + copy(pkt, buf[:bufN]) + + // decode minimal layers + packet := gopacket.NewPacket(pkt, netDecoder, gopacket.NoCopy) + packet.Metadata().CaptureLength = len(packet.Data()) + packet.Metadata().Length = len(packet.Data()) + packet.Metadata().Timestamp = timestamp + + // some security checks + if packet.NetworkLayer() == nil { + continue + } + if packet.TransportLayer() == nil { + continue + } + + // ipv4 fragmented packet ? + if packet.NetworkLayer().LayerType() == layers.LayerTypeIPv4 { + if !w.GetConfig().Collectors.AfpacketLiveCapture.FragmentSupport { + continue + } + ip4 := packet.NetworkLayer().(*layers.IPv4) + if ip4.Flags&layers.IPv4MoreFragments == 1 || ip4.FragOffset > 0 { + fragIP4Chan <- packet + continue + } + } + + // ipv6 fragmented packet ? + if packet.NetworkLayer().LayerType() == layers.LayerTypeIPv6 { + if !w.GetConfig().Collectors.AfpacketLiveCapture.FragmentSupport { + continue + } + v6frag := packet.Layer(layers.LayerTypeIPv6Fragment) + if v6frag != nil { + fragIP6Chan <- packet + continue + } + } + + // tcp or udp packets ? + if packet.TransportLayer().LayerType() == layers.LayerTypeUDP { + udpChan <- packet + } + + if packet.TransportLayer().LayerType() == layers.LayerTypeTCP { + tcpChan <- packet + } + } + } + + }(ctx) + + // prepare dns message + dm := dnsutils.DNSMessage{} + + for { + select { + case <-w.OnStop(): + w.LogInfo("stop to listen...") + cancel() + <-done + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + // send the config to the dns processor + dnsProcessor.NewConfig() <- cfg + + // dns message to read ? + case dnsPacket := <-dnsChan: + // reset + dm.Init() + + dm.NetworkInfo.Family = dnsPacket.IPLayer.EndpointType().String() + dm.NetworkInfo.QueryIP = dnsPacket.IPLayer.Src().String() + dm.NetworkInfo.ResponseIP = dnsPacket.IPLayer.Dst().String() + dm.NetworkInfo.QueryPort = dnsPacket.TransportLayer.Src().String() + dm.NetworkInfo.ResponsePort = dnsPacket.TransportLayer.Dst().String() + dm.NetworkInfo.Protocol = dnsPacket.TransportLayer.EndpointType().String() + + dm.DNS.Payload = dnsPacket.Payload + dm.DNS.Length = len(dnsPacket.Payload) + + dm.DNSTap.Identity = w.GetConfig().GetServerIdentity() + + timestamp := dnsPacket.Timestamp.UnixNano() + seconds := timestamp / int64(time.Second) + dm.DNSTap.TimeSec = int(seconds) + dm.DNSTap.TimeNsec = int(timestamp - seconds*int64(time.Second)*int64(time.Nanosecond)) + + // send DNS message to DNS processor + dnsProcessor.GetInputChannel() <- dm + } + } +} diff --git a/collectors/sniffer_afpacket_test.go b/workers/sniffer_afpacket_test.go similarity index 65% rename from collectors/sniffer_afpacket_test.go rename to workers/sniffer_afpacket_test.go index dc6aba22..b924a3b0 100644 --- a/collectors/sniffer_afpacket_test.go +++ b/workers/sniffer_afpacket_test.go @@ -1,7 +1,7 @@ //go:build linux // +build linux -package collectors +package workers import ( "log" @@ -10,25 +10,24 @@ import ( "github.com/dmachard/go-dnscollector/dnsutils" "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" "github.com/dmachard/go-logger" ) func TestAfpacketSnifferRun(t *testing.T) { - g := pkgutils.NewFakeLogger() - c := NewAfpacketSniffer([]pkgutils.Worker{g}, pkgconfig.GetFakeConfig(), logger.New(false), "test") + g := GetWorkerForTest(pkgconfig.DefaultBufferSize) + c := NewAfpacketSniffer([]Worker{g}, pkgconfig.GetDefaultConfig(), logger.New(false), "test") if err := c.Listen(); err != nil { log.Fatal("collector sniffer listening error: ", err) } - go c.Run() + go c.StartCollect() // send dns query - net.LookupIP("dns.collector") + net.LookupIP(pkgconfig.ProgQname) // waiting message in channel for { msg := <-g.GetInputChannel() - if msg.DNSTap.Operation == dnsutils.DNSTapClientQuery && msg.DNS.Qname == "dns.collector" { + if msg.DNSTap.Operation == dnsutils.DNSTapClientQuery && msg.DNS.Qname == pkgconfig.ProgQname { break } } diff --git a/workers/sniffer_xdp.go b/workers/sniffer_xdp.go new file mode 100644 index 00000000..c2970836 --- /dev/null +++ b/workers/sniffer_xdp.go @@ -0,0 +1,211 @@ +//go:build linux || darwin || freebsd +// +build linux darwin freebsd + +package workers + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "net" + "time" + + "github.com/cilium/ebpf/link" + "github.com/cilium/ebpf/perf" + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" + "golang.org/x/sys/unix" +) + +type XDPSniffer struct { + *GenericWorker +} + +func NewXDPSniffer(next []Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *XDPSniffer { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Collectors.XdpLiveCapture.ChannelBufferSize > 0 { + bufSize = config.Collectors.XdpLiveCapture.ChannelBufferSize + } + w := &XDPSniffer{GenericWorker: NewGenericWorker(config, logger, name, "xdp sniffer", bufSize, pkgconfig.DefaultMonitor)} + w.SetDefaultRoutes(next) + return w +} + +func (w *XDPSniffer) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // init dns processor + bufSize := w.GetConfig().Global.Worker.ChannelBufferSize + if w.GetConfig().Collectors.XdpLiveCapture.ChannelBufferSize > 0 { + bufSize = w.GetConfig().Collectors.XdpLiveCapture.ChannelBufferSize + } + dnsProcessor := NewDNSProcessor(w.GetConfig(), w.GetLogger(), w.GetName(), bufSize) + dnsProcessor.SetDefaultRoutes(w.GetDefaultRoutes()) + dnsProcessor.SetDefaultDropped(w.GetDroppedRoutes()) + go dnsProcessor.StartCollect() + + // get network interface by name + iface, err := net.InterfaceByName(w.GetConfig().Collectors.XdpLiveCapture.Device) + if err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] lookup network iface: ", err) + } + + // Load pre-compiled programs into the kernel. + objs := netutils.BpfObjects{} + if err := netutils.LoadBpfObjects(&objs, nil); err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] loading BPF objects: ", err) + } + defer objs.Close() + + // Attach the program. + l, err := link.AttachXDP(link.XDPOptions{ + Program: objs.XdpSniffer, + Interface: iface.Index, + }) + if err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] could not attach XDP program: ", err) + } + defer l.Close() + + w.LogInfo("XDP program attached to iface %q (index %d)", iface.Name, iface.Index) + + perfEvent, err := perf.NewReader(objs.Pkts, 1<<24) + if err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] read event: ", err) + } + + dnsChan := make(chan dnsutils.DNSMessage) + + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + stopChan := make(chan struct{}) + + go func(ctx context.Context) { + defer func() { + dnsProcessor.Stop() + w.LogInfo("read data terminated") + defer close(done) + }() + + var pkt netutils.BpfPktEvent + var netErr net.Error + for { + select { + case <-ctx.Done(): + w.LogInfo("stopping sniffer...") + perfEvent.Close() + return + default: + // The data submitted via bpf_perf_event_output. + perfEvent.SetDeadline(time.Now().Add(1 * time.Second)) + record, err := perfEvent.Read() + if err != nil { + if errors.As(err, &netErr) && netErr.Timeout() { + continue + } + w.LogError("BPF reading map: %s", err) + break + } + if record.LostSamples != 0 { + w.LogError("BPF dump: Dropped %d samples from kernel perf buffer", record.LostSamples) + continue + } + + reader := bytes.NewReader(record.RawSample) + if err := binary.Read(reader, binary.LittleEndian, &pkt); err != nil { + w.LogError("BPF reading sample: %s", err) + break + } + + // adjust arrival time + timenow := time.Now().UTC() + var ts unix.Timespec + unix.ClockGettime(unix.CLOCK_MONOTONIC, &ts) + elapsed := time.Since(timenow) * time.Nanosecond + delta3 := time.Duration(uint64(unix.TimespecToNsec(ts))-pkt.Timestamp) * time.Nanosecond + tsAdjusted := timenow.Add(-(delta3 + elapsed)) + + // convert ip + var saddr, daddr net.IP + if pkt.IpVersion == 0x0800 { + saddr = netutils.GetIPAddress(pkt.SrcAddr, netutils.ConvertIP4) + daddr = netutils.GetIPAddress(pkt.DstAddr, netutils.ConvertIP4) + } else { + saddr = netutils.GetIPAddress(pkt.SrcAddr6, netutils.ConvertIP6) + daddr = netutils.GetIPAddress(pkt.DstAddr6, netutils.ConvertIP6) + } + + // prepare DnsMessage + dm := dnsutils.DNSMessage{} + dm.Init() + + dm.DNSTap.TimeSec = int(tsAdjusted.Unix()) + dm.DNSTap.TimeNsec = int(tsAdjusted.UnixNano() - tsAdjusted.Unix()*1e9) + + if pkt.SrcPort == 53 { + dm.DNSTap.Operation = dnsutils.DNSTapClientResponse + } else { + dm.DNSTap.Operation = dnsutils.DNSTapClientQuery + } + + dm.NetworkInfo.QueryIP = saddr.String() + dm.NetworkInfo.QueryPort = fmt.Sprint(pkt.SrcPort) + dm.NetworkInfo.ResponseIP = daddr.String() + dm.NetworkInfo.ResponsePort = fmt.Sprint(pkt.DstPort) + + if pkt.IpVersion == 0x0800 { + dm.NetworkInfo.Family = netutils.ProtoIPv4 + } else { + dm.NetworkInfo.Family = netutils.ProtoIPv6 + } + + if pkt.IpProto == 0x11 { + dm.NetworkInfo.Protocol = netutils.ProtoUDP + dm.DNS.Payload = record.RawSample[int(pkt.PktOffset)+int(pkt.PayloadOffset):] + dm.DNS.Length = len(dm.DNS.Payload) + } else { + dm.NetworkInfo.Protocol = netutils.ProtoTCP + dm.DNS.Payload = record.RawSample[int(pkt.PktOffset)+int(pkt.PayloadOffset)+2:] + dm.DNS.Length = len(dm.DNS.Payload) + } + + select { + case <-stopChan: + return + case dnsChan <- dm: + } + } + } + }(ctx) + + for { + select { + case <-w.OnStop(): + w.LogInfo("stop to listen...") + cancel() + close(stopChan) + <-done + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + + // send the config to the dns processor + dnsProcessor.NewConfig() <- cfg + + // dns message to read ? + case dm := <-dnsChan: + + // update identity with config ? + dm.DNSTap.Identity = w.GetConfig().GetServerIdentity() + + dnsProcessor.GetInputChannel() <- dm + } + } +} diff --git a/workers/sniffer_xdp_windows.go b/workers/sniffer_xdp_windows.go new file mode 100644 index 00000000..1465e0cb --- /dev/null +++ b/workers/sniffer_xdp_windows.go @@ -0,0 +1,29 @@ +//go:build windows +// +build windows + +package workers + +import ( + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" +) + +type XDPSniffer struct { + *GenericWorker +} + +func NewXDPSniffer(next []Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *XDPSniffer { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Collectors.XdpLiveCapture.ChannelBufferSize > 0 { + bufSize = config.Collectors.XdpLiveCapture.ChannelBufferSize + } + w := &XDPSniffer{GenericWorker: NewGenericWorker(config, logger, name, "xdp sniffer", bufSize, pkgconfig.DefaultMonitor)} + w.SetDefaultRoutes(next) + w.ReadConfig() + return w +} + +func (w *XDPSniffer) StartCollect() { + w.LogError("running collector failed...OS not supported!") + defer w.CollectDone() +} diff --git a/workers/statsd.go b/workers/statsd.go new file mode 100644 index 00000000..bf500b6b --- /dev/null +++ b/workers/statsd.go @@ -0,0 +1,320 @@ +package workers + +import ( + "bufio" + "crypto/tls" + "fmt" + "net" + "strconv" + "sync" + "time" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" + "github.com/dmachard/go-topmap" +) + +type StatsPerStream struct { + TotalPackets, TotalSentBytes, TotalReceivedBytes int + Clients, Domains, Nxdomains map[string]int + RRtypes, Rcodes, Operations, Transports, IPproto map[string]int + TopRcodes, TopOperations, TopIPproto, TopTransport, TopRRtypes *topmap.TopMap +} + +type StreamStats struct { + Streams map[string]*StatsPerStream +} + +type StatsdClient struct { + *GenericWorker + Stats StreamStats + sync.RWMutex +} + +func NewStatsdClient(config *pkgconfig.Config, logger *logger.Logger, name string) *StatsdClient { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.Statsd.ChannelBufferSize > 0 { + bufSize = config.Loggers.Statsd.ChannelBufferSize + } + w := &StatsdClient{GenericWorker: NewGenericWorker(config, logger, name, "statsd", bufSize, pkgconfig.DefaultMonitor)} + w.Stats = StreamStats{Streams: make(map[string]*StatsPerStream)} + w.ReadConfig() + return w +} + +func (w *StatsdClient) ReadConfig() { + if !netutils.IsValidTLS(w.GetConfig().Loggers.Statsd.TLSMinVersion) { + w.LogFatal(pkgconfig.PrefixLogWorker + "[" + w.GetName() + "]statd - invalid tls min version") + } +} + +func (w *StatsdClient) RecordDNSMessage(dm dnsutils.DNSMessage) { + w.Lock() + defer w.Unlock() + + // add stream + if _, exists := w.Stats.Streams[dm.DNSTap.Identity]; !exists { + w.Stats.Streams[dm.DNSTap.Identity] = &StatsPerStream{ + Clients: make(map[string]int), Domains: make(map[string]int), Nxdomains: make(map[string]int), + RRtypes: make(map[string]int), Rcodes: make(map[string]int), Operations: make(map[string]int), Transports: make(map[string]int), IPproto: make(map[string]int), + TopRcodes: topmap.NewTopMap(50), TopOperations: topmap.NewTopMap(50), TopIPproto: topmap.NewTopMap(50), TopRRtypes: topmap.NewTopMap(50), TopTransport: topmap.NewTopMap(50), + TotalPackets: 0, TotalSentBytes: 0, TotalReceivedBytes: 0, + } + } + + // global number of packets + w.Stats.Streams[dm.DNSTap.Identity].TotalPackets++ + + if dm.DNS.Type == dnsutils.DNSQuery { + w.Stats.Streams[dm.DNSTap.Identity].TotalReceivedBytes += dm.DNS.Length + } else { + w.Stats.Streams[dm.DNSTap.Identity].TotalSentBytes += dm.DNS.Length + } + + // count client and domains + if _, exists := w.Stats.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname]; !exists { + w.Stats.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname] = 1 + } else { + w.Stats.Streams[dm.DNSTap.Identity].Domains[dm.DNS.Qname] += 1 + } + if dm.DNS.Rcode == dnsutils.DNSRcodeNXDomain { + if _, exists := w.Stats.Streams[dm.DNSTap.Identity].Nxdomains[dm.DNS.Qname]; !exists { + w.Stats.Streams[dm.DNSTap.Identity].Nxdomains[dm.DNS.Qname] = 1 + } else { + w.Stats.Streams[dm.DNSTap.Identity].Nxdomains[dm.DNS.Qname] += 1 + } + } + if _, exists := w.Stats.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP]; !exists { + w.Stats.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP] = 1 + } else { + w.Stats.Streams[dm.DNSTap.Identity].Clients[dm.NetworkInfo.QueryIP] += 1 + } + + // record ip proto + if _, ok := w.Stats.Streams[dm.DNSTap.Identity].IPproto[dm.NetworkInfo.Family]; !ok { + w.Stats.Streams[dm.DNSTap.Identity].IPproto[dm.NetworkInfo.Family] = 1 + } else { + w.Stats.Streams[dm.DNSTap.Identity].IPproto[dm.NetworkInfo.Family]++ + } + w.Stats.Streams[dm.DNSTap.Identity].TopIPproto.Record( + dm.NetworkInfo.Family, + w.Stats.Streams[dm.DNSTap.Identity].IPproto[dm.NetworkInfo.Family], + ) + + // record transports + if _, ok := w.Stats.Streams[dm.DNSTap.Identity].Transports[dm.NetworkInfo.Protocol]; !ok { + w.Stats.Streams[dm.DNSTap.Identity].Transports[dm.NetworkInfo.Protocol] = 1 + } else { + w.Stats.Streams[dm.DNSTap.Identity].Transports[dm.NetworkInfo.Protocol]++ + } + w.Stats.Streams[dm.DNSTap.Identity].TopTransport.Record( + dm.NetworkInfo.Protocol, + w.Stats.Streams[dm.DNSTap.Identity].Transports[dm.NetworkInfo.Protocol], + ) + + // record rrtypes + if _, ok := w.Stats.Streams[dm.DNSTap.Identity].RRtypes[dm.DNS.Qtype]; !ok { + w.Stats.Streams[dm.DNSTap.Identity].RRtypes[dm.DNS.Qtype] = 1 + } else { + w.Stats.Streams[dm.DNSTap.Identity].RRtypes[dm.DNS.Qtype]++ + } + w.Stats.Streams[dm.DNSTap.Identity].TopRRtypes.Record( + dm.DNS.Qtype, + w.Stats.Streams[dm.DNSTap.Identity].RRtypes[dm.DNS.Qtype], + ) + + // record rcodes + if _, ok := w.Stats.Streams[dm.DNSTap.Identity].Rcodes[dm.DNS.Rcode]; !ok { + w.Stats.Streams[dm.DNSTap.Identity].Rcodes[dm.DNS.Rcode] = 1 + } else { + w.Stats.Streams[dm.DNSTap.Identity].Rcodes[dm.DNS.Rcode]++ + } + w.Stats.Streams[dm.DNSTap.Identity].TopRcodes.Record( + dm.DNS.Rcode, + w.Stats.Streams[dm.DNSTap.Identity].Rcodes[dm.DNS.Rcode], + ) + + // record operations + if _, ok := w.Stats.Streams[dm.DNSTap.Identity].Operations[dm.DNSTap.Operation]; !ok { + w.Stats.Streams[dm.DNSTap.Identity].Operations[dm.DNSTap.Operation] = 1 + } else { + w.Stats.Streams[dm.DNSTap.Identity].Operations[dm.DNSTap.Operation]++ + } + w.Stats.Streams[dm.DNSTap.Identity].TopOperations.Record( + dm.DNSTap.Operation, + w.Stats.Streams[dm.DNSTap.Identity].Operations[dm.DNSTap.Operation], + ) +} + +func (w *StatsdClient) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // goroutine to process transformed dns messages + go w.StartLogging() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("input channel closed!") + return + } + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} + +func (w *StatsdClient) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + // statd timer to push data + t2Interval := time.Duration(w.GetConfig().Loggers.Statsd.FlushInterval) * time.Second + t2 := time.NewTimer(t2Interval) + + for { + select { + case <-w.OnLoggerStopped(): + return + + // incoming dns message to process + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("output channel closed!") + return + } + + // record the dnstap message + w.RecordDNSMessage(dm) + + case <-t2.C: + address := w.GetConfig().Loggers.Statsd.RemoteAddress + ":" + strconv.Itoa(w.GetConfig().Loggers.Statsd.RemotePort) + connTimeout := time.Duration(w.GetConfig().Loggers.Statsd.ConnectTimeout) * time.Second + + // make the connection + var conn net.Conn + var err error + + switch w.GetConfig().Loggers.Statsd.Transport { + case netutils.SocketTCP, netutils.SocketUDP: + w.LogInfo("connecting to %s://%s", w.GetConfig().Loggers.Statsd.Transport, address) + conn, err = net.DialTimeout(w.GetConfig().Loggers.Statsd.Transport, address, connTimeout) + + case netutils.SocketTLS: + w.LogInfo("connecting to %s://%s", w.GetConfig().Loggers.Statsd.Transport, address) + + var tlsConfig *tls.Config + + tlsOptions := netutils.TLSOptions{ + InsecureSkipVerify: w.GetConfig().Loggers.Statsd.TLSInsecure, + MinVersion: w.GetConfig().Loggers.Statsd.TLSMinVersion, + CAFile: w.GetConfig().Loggers.Statsd.CAFile, + CertFile: w.GetConfig().Loggers.Statsd.CertFile, + KeyFile: w.GetConfig().Loggers.Statsd.KeyFile, + } + + tlsConfig, err = netutils.TLSClientConfig(tlsOptions) + if err == nil { + dialer := &net.Dialer{Timeout: connTimeout} + conn, err = tls.DialWithDialer(dialer, netutils.SocketTCP, address, tlsConfig) + } + default: + w.LogFatal("logger=statsd - invalid transport:", w.GetConfig().Loggers.Statsd.Transport) + } + + // something is wrong during connection ? + if err != nil { + w.LogError("dial error: %s", err) + } + + if conn != nil { + w.LogInfo("dialing with success, continue...") + + b := bufio.NewWriter(conn) + + prefix := w.GetConfig().Loggers.Statsd.Prefix + for streamID, stream := range w.Stats.Streams { + b.WriteString(fmt.Sprintf("%s_%s_total_bytes_received:%d|c\n", prefix, streamID, stream.TotalReceivedBytes)) + b.WriteString(fmt.Sprintf("%s_%s_total_bytes_sent:%d|c\n", prefix, streamID, stream.TotalSentBytes)) + + b.WriteString(fmt.Sprintf("%s_%s_total_requesters:%d|c\n", prefix, streamID, len(stream.Clients))) + + b.WriteString(fmt.Sprintf("%s_%s_total_domains:%d|c\n", prefix, streamID, len(stream.Domains))) + b.WriteString(fmt.Sprintf("%s_%s_total_domains_nx:%d|c\n", prefix, streamID, len(stream.Nxdomains))) + + b.WriteString(fmt.Sprintf("%s_%s_total_packets:%d|c\n", prefix, streamID, stream.TotalPackets)) + + // transport repartition + for _, v := range stream.TopTransport.Get() { + b.WriteString(fmt.Sprintf("%s_%s_total_packets_%s:%d|c\n", prefix, streamID, v.Name, v.Hit)) + } + + // ip proto repartition + for _, v := range stream.TopIPproto.Get() { + b.WriteString(fmt.Sprintf("%s_%s_total_packets_%s:%d|c\n", prefix, streamID, v.Name, v.Hit)) + } + + // qtypes repartition + for _, v := range stream.TopRRtypes.Get() { + b.WriteString(fmt.Sprintf("%s_%s_total_replies_rrtype_%s:%d|c\n", prefix, streamID, v.Name, v.Hit)) + } + + // top rcodes + for _, v := range stream.TopRcodes.Get() { + b.WriteString(fmt.Sprintf("%s_%s_total_replies_rcode_%s:%d|c\n", prefix, streamID, v.Name, v.Hit)) + } + } + + // send data + err = b.Flush() + if err != nil { + w.LogError("sent data error:", err.Error()) + } + } + + // reset the timer + t2.Reset(t2Interval) + } + } +} diff --git a/loggers/statsd_test.go b/workers/statsd_test.go similarity index 79% rename from loggers/statsd_test.go rename to workers/statsd_test.go index 91ae02d0..f9c53be6 100644 --- a/loggers/statsd_test.go +++ b/workers/statsd_test.go @@ -1,31 +1,31 @@ -package loggers +package workers import ( "net" "testing" "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" ) func TestStatsdRun(t *testing.T) { // init logger - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() config.Loggers.Statsd.FlushInterval = 1 g := NewStatsdClient(config, logger.New(false), "test") // fake msgpack receiver - fakeRcvr, err := net.ListenPacket(netlib.SocketUDP, "127.0.0.1:8125") + fakeRcvr, err := net.ListenPacket(netutils.SocketUDP, "127.0.0.1:8125") if err != nil { t.Fatal(err) } defer fakeRcvr.Close() // start the logger - go g.Run() + go g.StartCollect() // send fake dns message to logger dm := dnsutils.GetFakeDNSMessage() diff --git a/workers/stdout.go b/workers/stdout.go new file mode 100644 index 00000000..92f44c84 --- /dev/null +++ b/workers/stdout.go @@ -0,0 +1,205 @@ +package workers + +import ( + "bytes" + "encoding/json" + "io" + "log" + "os" + "strings" + "time" + + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + "github.com/google/gopacket/pcapgo" +) + +func IsStdoutValidMode(mode string) bool { + switch mode { + case + pkgconfig.ModeText, + pkgconfig.ModeJSON, + pkgconfig.ModeFlatJSON, + pkgconfig.ModePCAP: + return true + } + return false +} + +type StdOut struct { + *GenericWorker + textFormat []string + writerText *log.Logger + writerPcap *pcapgo.Writer +} + +func NewStdOut(config *pkgconfig.Config, console *logger.Logger, name string) *StdOut { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.Stdout.ChannelBufferSize > 0 { + bufSize = config.Loggers.Stdout.ChannelBufferSize + } + w := &StdOut{GenericWorker: NewGenericWorker(config, console, name, "stdout", bufSize, pkgconfig.DefaultMonitor)} + w.writerText = log.New(os.Stdout, "", 0) + w.ReadConfig() + return w +} + +func (w *StdOut) ReadConfig() { + if !IsStdoutValidMode(w.GetConfig().Loggers.Stdout.Mode) { + w.LogFatal("invalid mode: ", w.GetConfig().Loggers.Stdout.Mode) + } + + if len(w.GetConfig().Loggers.Stdout.TextFormat) > 0 { + w.textFormat = strings.Fields(w.GetConfig().Loggers.Stdout.TextFormat) + } else { + w.textFormat = strings.Fields(w.GetConfig().Global.TextFormat) + } +} + +func (w *StdOut) SetTextWriter(b *bytes.Buffer) { + w.writerText = log.New(os.Stdout, "", 0) + w.writerText.SetOutput(b) +} + +func (w *StdOut) SetPcapWriter(pcapWriter io.Writer) { + w.LogInfo("init pcap writer") + + w.writerPcap = pcapgo.NewWriter(pcapWriter) + if err := w.writerPcap.WriteFileHeader(65536, layers.LinkTypeEthernet); err != nil { + w.LogFatal("pcap init error", err) + } +} + +func (w *StdOut) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // goroutine to process transformed dns messages + go w.StartLogging() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("run: input channel closed!") + return + } + + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} + +func (w *StdOut) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + // standard output buffer + buffer := new(bytes.Buffer) + + if w.GetConfig().Loggers.Stdout.Mode == pkgconfig.ModePCAP && w.writerPcap == nil { + w.SetPcapWriter(os.Stdout) + } + + for { + select { + case <-w.OnLoggerStopped(): + return + + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("process: output channel closed!") + return + } + + switch w.GetConfig().Loggers.Stdout.Mode { + case pkgconfig.ModePCAP: + if len(dm.DNS.Payload) == 0 { + w.LogError("process: no dns payload to encode, drop it") + continue + } + + pkt, err := dm.ToPacketLayer() + if err != nil { + w.LogError("process: unable to pack layer: %s", err) + continue + } + + buf := gopacket.NewSerializeBuffer() + opts := gopacket.SerializeOptions{ + FixLengths: true, + ComputeChecksums: true, + } + for _, l := range pkt { + l.SerializeTo(buf, opts) + } + + bufSize := len(buf.Bytes()) + ci := gopacket.CaptureInfo{ + Timestamp: time.Unix(int64(dm.DNSTap.TimeSec), int64(dm.DNSTap.TimeNsec)), + CaptureLength: bufSize, + Length: bufSize, + } + + w.writerPcap.WritePacket(ci, buf.Bytes()) + + case pkgconfig.ModeText: + w.writerText.Print(dm.String(w.textFormat, w.GetConfig().Global.TextFormatDelimiter, w.GetConfig().Global.TextFormatBoundary)) + + case pkgconfig.ModeJSON: + json.NewEncoder(buffer).Encode(dm) + w.writerText.Print(buffer.String()) + buffer.Reset() + + case pkgconfig.ModeFlatJSON: + flat, err := dm.Flatten() + if err != nil { + w.LogError("process: flattening DNS message failed: %e", err) + } + json.NewEncoder(buffer).Encode(flat) + w.writerText.Print(buffer.String()) + buffer.Reset() + } + } + } +} diff --git a/loggers/stdout_test.go b/workers/stdout_test.go similarity index 89% rename from loggers/stdout_test.go rename to workers/stdout_test.go index 2a57426f..5a475628 100644 --- a/loggers/stdout_test.go +++ b/workers/stdout_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bytes" @@ -9,15 +9,13 @@ import ( "github.com/dmachard/go-dnscollector/dnsutils" "github.com/dmachard/go-dnscollector/pkgconfig" - "github.com/dmachard/go-dnscollector/pkgutils" - "github.com/dmachard/go-dnscollector/processors" "github.com/dmachard/go-logger" "github.com/google/gopacket/pcapgo" ) func Test_StdoutTextMode(t *testing.T) { - cfg := pkgconfig.GetFakeConfig() + cfg := pkgconfig.GetDefaultConfig() testcases := []struct { name string @@ -30,14 +28,14 @@ func Test_StdoutTextMode(t *testing.T) { name: "default_delimiter", delimiter: cfg.Global.TextFormatDelimiter, boundary: cfg.Global.TextFormatBoundary, - qname: "dns.collector", + qname: pkgconfig.ProgQname, expected: "- collector CLIENT_QUERY NOERROR 1.2.3.4 1234 - - 0b dns.collector A -\n", }, { name: "custom_delimiter", delimiter: ";", boundary: cfg.Global.TextFormatBoundary, - qname: "dns.collector", + qname: pkgconfig.ProgQname, expected: "-;collector;CLIENT_QUERY;NOERROR;1.2.3.4;1234;-;-;0b;dns.collector;A;-\n", }, { @@ -68,14 +66,14 @@ func Test_StdoutTextMode(t *testing.T) { // init logger and redirect stdout output to bytes buffer var stdout bytes.Buffer - cfg := pkgconfig.GetFakeConfig() + cfg := pkgconfig.GetDefaultConfig() cfg.Global.TextFormatDelimiter = tc.delimiter cfg.Global.TextFormatBoundary = tc.boundary g := NewStdOut(cfg, logger.New(false), "test") g.SetTextWriter(&stdout) - go g.Run() + go g.StartCollect() // print dns message to stdout buffer dm := dnsutils.GetFakeDNSMessage() @@ -114,12 +112,12 @@ func Test_StdoutJsonMode(t *testing.T) { // init logger and redirect stdout output to bytes buffer var stdout bytes.Buffer - cfg := pkgconfig.GetFakeConfig() + cfg := pkgconfig.GetDefaultConfig() cfg.Loggers.Stdout.Mode = tc.mode g := NewStdOut(cfg, logger.New(false), "test") g.SetTextWriter(&stdout) - go g.Run() + go g.StartCollect() // print dns message to stdout buffer dm := dnsutils.GetFakeDNSMessage() @@ -144,13 +142,13 @@ func Test_StdoutPcapMode(t *testing.T) { var pcap bytes.Buffer // init logger and run - cfg := pkgconfig.GetFakeConfig() + cfg := pkgconfig.GetDefaultConfig() cfg.Loggers.Stdout.Mode = "pcap" g := NewStdOut(cfg, logger.New(false), "test") g.SetPcapWriter(&pcap) - go g.Run() + go g.StartCollect() // send DNSMessage to channel dm := dnsutils.GetFakeDNSMessageWithPayload() @@ -185,13 +183,13 @@ func Test_StdoutPcapMode_NoDNSPayload(t *testing.T) { var pcap bytes.Buffer // init logger and run - cfg := pkgconfig.GetFakeConfig() + cfg := pkgconfig.GetDefaultConfig() cfg.Loggers.Stdout.Mode = "pcap" g := NewStdOut(cfg, logger, "test") g.SetPcapWriter(&pcap) - go g.Run() + go g.StartCollect() // send DNSMessage to channel dm := dnsutils.GetFakeDNSMessage() @@ -219,16 +217,16 @@ func Test_StdoutBufferLoggerIsFull(t *testing.T) { // init logger and redirect stdout output to bytes buffer var stdout bytes.Buffer - cfg := pkgconfig.GetFakeConfig() + cfg := pkgconfig.GetDefaultConfig() g := NewStdOut(cfg, lg, "test") g.SetTextWriter(&stdout) // init next logger with a buffer of one element - nxt := pkgutils.NewFakeLoggerWithBufferSize(1) + nxt := GetWorkerForTest(pkgconfig.DefaultBufferOne) g.AddDefaultRoute(nxt) // run collector - go g.Run() + go g.StartCollect() // add a shot of dnsmessages to collector dmIn := dnsutils.GetFakeDNSMessage() @@ -241,7 +239,7 @@ func Test_StdoutBufferLoggerIsFull(t *testing.T) { for entry := range logsChan { fmt.Println(entry) - pattern := regexp.MustCompile(processors.ExpectedBufferMsg511) + pattern := regexp.MustCompile(pkgconfig.ExpectedBufferMsg511) if pattern.MatchString(entry.Message) { break } @@ -249,7 +247,7 @@ func Test_StdoutBufferLoggerIsFull(t *testing.T) { // read dns message from dnstap consumer dmOut := <-nxt.GetInputChannel() - if dmOut.DNS.Qname != processors.ExpectedQname2 { + if dmOut.DNS.Qname != pkgconfig.ExpectedQname2 { t.Errorf("invalid qname in dns message: %s", dmOut.DNS.Qname) } @@ -262,7 +260,7 @@ func Test_StdoutBufferLoggerIsFull(t *testing.T) { time.Sleep(12 * time.Second) for entry := range logsChan { fmt.Println(entry) - pattern := regexp.MustCompile(processors.ExpectedBufferMsg1023) + pattern := regexp.MustCompile(pkgconfig.ExpectedBufferMsg1023) if pattern.MatchString(entry.Message) { break } @@ -270,7 +268,7 @@ func Test_StdoutBufferLoggerIsFull(t *testing.T) { // read dns message from dnstap consumer dmOut2 := <-nxt.GetInputChannel() - if dmOut2.DNS.Qname != processors.ExpectedQname2 { + if dmOut2.DNS.Qname != pkgconfig.ExpectedQname2 { t.Errorf("invalid qname in second dns message: %s", dmOut2.DNS.Qname) } diff --git a/workers/syslog.go b/workers/syslog.go new file mode 100644 index 00000000..0283567c --- /dev/null +++ b/workers/syslog.go @@ -0,0 +1,343 @@ +package workers + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "time" + + "strings" + + syslog "github.com/dmachard/go-clientsyslog" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" +) + +type Syslog struct { + *GenericWorker + severity, facility syslog.Priority + syslogWriter *syslog.Writer + syslogReady bool + transportReady, transportReconnect chan bool + textFormat []string +} + +func NewSyslog(config *pkgconfig.Config, console *logger.Logger, name string) *Syslog { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.Syslog.ChannelBufferSize > 0 { + bufSize = config.Loggers.Syslog.ChannelBufferSize + } + w := &Syslog{GenericWorker: NewGenericWorker(config, console, name, "syslog", bufSize, pkgconfig.DefaultMonitor)} + w.transportReady = make(chan bool) + w.transportReconnect = make(chan bool) + w.ReadConfig() + return w +} + +func (w *Syslog) ReadConfig() { + if !netutils.IsValidTLS(w.GetConfig().Loggers.Syslog.TLSMinVersion) { + w.LogFatal(pkgconfig.PrefixLogWorker + "invalid tls min version") + } + + if !pkgconfig.IsValidMode(w.GetConfig().Loggers.Syslog.Mode) { + w.LogFatal(pkgconfig.PrefixLogWorker + "invalid mode text or json expected") + } + severity, err := syslog.GetPriority(w.GetConfig().Loggers.Syslog.Severity) + if err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker + "invalid severity") + } + w.severity = severity + + facility, err := syslog.GetPriority(w.GetConfig().Loggers.Syslog.Facility) + if err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker + "invalid facility") + } + w.facility = facility + + if len(w.GetConfig().Loggers.Syslog.TextFormat) > 0 { + w.textFormat = strings.Fields(w.GetConfig().Loggers.Syslog.TextFormat) + } else { + w.textFormat = strings.Fields(w.GetConfig().Global.TextFormat) + } +} + +func (w *Syslog) ConnectToRemote() { + for { + if w.syslogWriter != nil { + w.syslogWriter.Close() + w.syslogWriter = nil + } + + var logWriter *syslog.Writer + var tlsConfig *tls.Config + var err error + + switch w.GetConfig().Loggers.Syslog.Transport { + case "local": + w.LogInfo("connecting to local syslog...") + logWriter, err = syslog.New(w.facility|w.severity, "") + case netutils.SocketUnix: + w.LogInfo("connecting to %s://%s ...", + w.GetConfig().Loggers.Syslog.Transport, + w.GetConfig().Loggers.Syslog.RemoteAddress) + logWriter, err = syslog.Dial("", + w.GetConfig().Loggers.Syslog.RemoteAddress, w.facility|w.severity, + w.GetConfig().Loggers.Syslog.Tag) + case netutils.SocketUDP, netutils.SocketTCP: + w.LogInfo("connecting to %s://%s ...", + w.GetConfig().Loggers.Syslog.Transport, + w.GetConfig().Loggers.Syslog.RemoteAddress) + logWriter, err = syslog.Dial(w.GetConfig().Loggers.Syslog.Transport, + w.GetConfig().Loggers.Syslog.RemoteAddress, w.facility|w.severity, + w.GetConfig().Loggers.Syslog.Tag) + case netutils.SocketTLS: + w.LogInfo("connecting to %s://%s ...", + w.GetConfig().Loggers.Syslog.Transport, + w.GetConfig().Loggers.Syslog.RemoteAddress) + + tlsOptions := netutils.TLSOptions{ + InsecureSkipVerify: w.GetConfig().Loggers.Syslog.TLSInsecure, + MinVersion: w.GetConfig().Loggers.Syslog.TLSMinVersion, + CAFile: w.GetConfig().Loggers.Syslog.CAFile, + CertFile: w.GetConfig().Loggers.Syslog.CertFile, + KeyFile: w.GetConfig().Loggers.Syslog.KeyFile, + } + + tlsConfig, err = netutils.TLSClientConfig(tlsOptions) + if err == nil { + logWriter, err = syslog.DialWithTLSConfig(w.GetConfig().Loggers.Syslog.Transport, + w.GetConfig().Loggers.Syslog.RemoteAddress, w.facility|w.severity, + w.GetConfig().Loggers.Syslog.Tag, + tlsConfig) + } + default: + w.LogFatal("invalid syslog transport: ", w.GetConfig().Loggers.Syslog.Transport) + } + + // something is wrong during connection ? + if err != nil { + w.LogError("%s", err) + w.LogInfo("retry to connect in %d seconds", w.GetConfig().Loggers.Syslog.RetryInterval) + time.Sleep(time.Duration(w.GetConfig().Loggers.Syslog.RetryInterval) * time.Second) + continue + } + + w.syslogWriter = logWriter + + // set syslog format + switch strings.ToLower(w.GetConfig().Loggers.Syslog.Formatter) { + case "unix": + w.syslogWriter.SetFormatter(syslog.UnixFormatter) + case "rfc3164": + w.syslogWriter.SetFormatter(syslog.RFC3164Formatter) + case "rfc5424", "": + w.syslogWriter.SetFormatter(syslog.RFC5424Formatter) + } + + // set syslog framer + switch strings.ToLower(w.GetConfig().Loggers.Syslog.Framer) { + case "none", "": + w.syslogWriter.SetFramer(syslog.DefaultFramer) + case "rfc5425": + w.syslogWriter.SetFramer(syslog.RFC5425MessageLengthFramer) + } + + // custom hostname + if len(w.GetConfig().Loggers.Syslog.Hostname) > 0 { + w.syslogWriter.SetHostname(w.GetConfig().Loggers.Syslog.Hostname) + } + // custom program name + if len(w.GetConfig().Loggers.Syslog.AppName) > 0 { + w.syslogWriter.SetProgram(w.GetConfig().Loggers.Syslog.AppName) + } + + // notify process that the transport is ready + // block the loop until a reconnect is needed + w.transportReady <- true + w.transportReconnect <- true + } +} + +func (w *Syslog) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // goroutine to process transformed dns messages + go w.StartLogging() + + // init remote conn + go w.ConnectToRemote() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + return + + // new config provided? + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("input channel closed!") + return + } + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} + +func (w *Syslog) FlushBuffer(buf *[]dnsutils.DNSMessage) { + buffer := new(bytes.Buffer) + var err error + + for _, dm := range *buf { + switch w.GetConfig().Loggers.Syslog.Mode { + case pkgconfig.ModeText: + // write the text line to the buffer + buffer.Write(dm.Bytes(w.textFormat, w.GetConfig().Global.TextFormatDelimiter, w.GetConfig().Global.TextFormatBoundary)) + + // replace NULL char from text line directly in the buffer + // because the NULL is a end of log in syslog + for i := 0; i < buffer.Len(); i++ { + if buffer.Bytes()[i] == 0 { + buffer.Bytes()[i] = w.GetConfig().Loggers.Syslog.ReplaceNullChar[0] + } + } + + // ensure it ends in a \n + buffer.WriteString("\n") + + // write the modified content of the buffer to s.syslogWriter + // and reset the buffer + _, err = buffer.WriteTo(w.syslogWriter) + + case pkgconfig.ModeJSON: + // encode to json the dns message + json.NewEncoder(buffer).Encode(dm) + + // write the content of the buffer to s.syslogWriter + // and reset the buffer + _, err = buffer.WriteTo(w.syslogWriter) + + case pkgconfig.ModeFlatJSON: + // get flatten object + flat, errflat := dm.Flatten() + if errflat != nil { + w.LogError("flattening DNS message failed: %e", err) + continue + } + + // encode to json + json.NewEncoder(buffer).Encode(flat) + + // write the content of the buffer to s.syslogWriter + // and reset the buffer + _, err = buffer.WriteTo(w.syslogWriter) + } + + if err != nil { + w.LogError("write error %s", err) + w.syslogReady = false + <-w.transportReconnect + break + } + } + + // reset buffer + *buf = nil +} + +func (w *Syslog) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + // init buffer + bufferDm := []dnsutils.DNSMessage{} + + // init flust timer for buffer + flushInterval := time.Duration(w.GetConfig().Loggers.Syslog.FlushInterval) * time.Second + flushTimer := time.NewTimer(flushInterval) + + w.LogInfo("processing dns messages...") + for { + select { + case <-w.OnLoggerStopped(): + // close connection + if w.syslogWriter != nil { + w.syslogWriter.Close() + } + return + + case <-w.transportReady: + w.LogInfo("syslog transport is ready") + w.syslogReady = true + + // incoming dns message to process + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("output channel closed!") + return + } + + // discar dns message if the connection is not ready + if !w.syslogReady { + continue + } + // append dns message to buffer + bufferDm = append(bufferDm, dm) + + // buffer is full ? + if len(bufferDm) >= w.GetConfig().Loggers.Syslog.BufferSize { + w.FlushBuffer(&bufferDm) + } + + // flush the buffer + case <-flushTimer.C: + if !w.syslogReady { + bufferDm = nil + } + + if len(bufferDm) > 0 { + w.FlushBuffer(&bufferDm) + } + + // restart timer + flushTimer.Reset(flushInterval) + } + } +} diff --git a/loggers/syslog_test.go b/workers/syslog_test.go similarity index 89% rename from loggers/syslog_test.go rename to workers/syslog_test.go index 9e4a2901..a4807997 100644 --- a/loggers/syslog_test.go +++ b/workers/syslog_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" @@ -8,9 +8,9 @@ import ( "time" "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" ) func Test_SyslogRunUdp(t *testing.T) { @@ -25,16 +25,16 @@ func Test_SyslogRunUdp(t *testing.T) { }{ { name: "unix_format", - transport: netlib.SocketUDP, + transport: netutils.SocketUDP, mode: pkgconfig.ModeText, - formatter: netlib.SocketUnix, + formatter: netutils.SocketUnix, framer: "", pattern: `<30>\D+ \d+ \d+:\d+:\d+.*`, listenAddr: ":4000", }, { name: "rfc3164_format", - transport: netlib.SocketUDP, + transport: netutils.SocketUDP, mode: pkgconfig.ModeText, formatter: "rfc3164", framer: "", @@ -43,7 +43,7 @@ func Test_SyslogRunUdp(t *testing.T) { }, { name: "rfc5424_format", - transport: netlib.SocketUDP, + transport: netutils.SocketUDP, mode: pkgconfig.ModeText, formatter: "rfc5424", framer: "", @@ -52,7 +52,7 @@ func Test_SyslogRunUdp(t *testing.T) { }, { name: "rfc5424_format_rfc5425_framer", - transport: netlib.SocketUDP, + transport: netutils.SocketUDP, mode: pkgconfig.ModeText, formatter: "rfc5424", framer: "rfc5425", @@ -64,7 +64,7 @@ func Test_SyslogRunUdp(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { // init logger - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() config.Loggers.Syslog.Transport = tc.transport config.Loggers.Syslog.RemoteAddress = tc.listenAddr config.Loggers.Syslog.Mode = tc.mode @@ -83,7 +83,7 @@ func Test_SyslogRunUdp(t *testing.T) { defer fakeRcvr.Close() // start the logger - go g.Run() + go g.StartCollect() // send fake dns message to logger time.Sleep(time.Second) @@ -121,16 +121,16 @@ func Test_SyslogRunTcp(t *testing.T) { }{ { name: "unix_format", - transport: netlib.SocketTCP, + transport: netutils.SocketTCP, mode: pkgconfig.ModeText, - formatter: netlib.SocketUnix, + formatter: netutils.SocketUnix, framer: "", pattern: `<30>\D+ \d+ \d+:\d+:\d+.*`, listenAddr: ":4000", }, { name: "rfc3164_format", - transport: netlib.SocketTCP, + transport: netutils.SocketTCP, mode: pkgconfig.ModeText, formatter: "rfc3164", framer: "", @@ -139,7 +139,7 @@ func Test_SyslogRunTcp(t *testing.T) { }, { name: "rfc5424_format", - transport: netlib.SocketTCP, + transport: netutils.SocketTCP, mode: pkgconfig.ModeText, formatter: "rfc5424", framer: "", @@ -148,7 +148,7 @@ func Test_SyslogRunTcp(t *testing.T) { }, { name: "rfc5425_format_rfc5425_framer", - transport: netlib.SocketTCP, + transport: netutils.SocketTCP, mode: pkgconfig.ModeText, formatter: "rfc5424", framer: "rfc5425", @@ -160,7 +160,7 @@ func Test_SyslogRunTcp(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { // init logger - config := pkgconfig.GetFakeConfig() + config := pkgconfig.GetDefaultConfig() config.Loggers.Syslog.Transport = tc.transport config.Loggers.Syslog.RemoteAddress = tc.listenAddr config.Loggers.Syslog.Mode = tc.mode @@ -179,7 +179,7 @@ func Test_SyslogRunTcp(t *testing.T) { defer fakeRcvr.Close() // start the logger - go g.Run() + go g.StartCollect() // accept conn from logger conn, err := fakeRcvr.Accept() @@ -210,11 +210,11 @@ func Test_SyslogRunTcp(t *testing.T) { func Test_SyslogRun_RemoveNullCharacter(t *testing.T) { // init logger - config := pkgconfig.GetFakeConfig() - config.Loggers.Syslog.Transport = netlib.SocketUDP + config := pkgconfig.GetDefaultConfig() + config.Loggers.Syslog.Transport = netutils.SocketUDP config.Loggers.Syslog.RemoteAddress = ":4000" config.Loggers.Syslog.Mode = pkgconfig.ModeText - config.Loggers.Syslog.Formatter = netlib.SocketUnix + config.Loggers.Syslog.Formatter = netutils.SocketUnix config.Loggers.Syslog.Framer = "" config.Loggers.Syslog.FlushInterval = 1 config.Loggers.Syslog.BufferSize = 0 @@ -229,7 +229,7 @@ func Test_SyslogRun_RemoveNullCharacter(t *testing.T) { defer fakeRcvr.Close() // start the logger - go g.Run() + go g.StartCollect() // send fake dns message to logger time.Sleep(time.Second) diff --git a/workers/tcpclient.go b/workers/tcpclient.go new file mode 100644 index 00000000..16211bb9 --- /dev/null +++ b/workers/tcpclient.go @@ -0,0 +1,326 @@ +package workers + +import ( + "bufio" + "crypto/tls" + "encoding/json" + "errors" + "io" + "net" + "strconv" + "strings" + "time" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/transformers" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" +) + +type TCPClient struct { + *GenericWorker + stopRead, doneRead chan bool + textFormat []string + transport string + transportWriter *bufio.Writer + transportConn net.Conn + transportReady, transportReconnect chan bool + writerReady bool +} + +func NewTCPClient(config *pkgconfig.Config, logger *logger.Logger, name string) *TCPClient { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Loggers.TCPClient.ChannelBufferSize > 0 { + bufSize = config.Loggers.TCPClient.ChannelBufferSize + } + w := &TCPClient{GenericWorker: NewGenericWorker(config, logger, name, "tcpclient", bufSize, pkgconfig.DefaultMonitor)} + w.transportReady = make(chan bool) + w.transportReconnect = make(chan bool) + w.stopRead = make(chan bool) + w.doneRead = make(chan bool) + w.ReadConfig() + return w +} + +func (w *TCPClient) ReadConfig() { + w.transport = w.GetConfig().Loggers.TCPClient.Transport + + // begin backward compatibility + if w.GetConfig().Loggers.TCPClient.TLSSupport { + w.transport = netutils.SocketTLS + } + if len(w.GetConfig().Loggers.TCPClient.SockPath) > 0 { + w.transport = netutils.SocketUnix + } + // end + + if len(w.GetConfig().Loggers.TCPClient.TextFormat) > 0 { + w.textFormat = strings.Fields(w.GetConfig().Loggers.TCPClient.TextFormat) + } else { + w.textFormat = strings.Fields(w.GetConfig().Global.TextFormat) + } +} + +func (w *TCPClient) Disconnect() { + if w.transportConn != nil { + w.LogInfo("closing tcp connection") + w.transportConn.Close() + } +} + +func (w *TCPClient) ReadFromConnection() { + buffer := make([]byte, 4096) + + go func() { + for { + _, err := w.transportConn.Read(buffer) + if err != nil { + if errors.Is(err, io.EOF) || errors.Is(err, net.ErrClosed) { + w.LogInfo("read from connection terminated") + break + } + w.LogError("Error on reading: %s", err.Error()) + } + // We just discard the data + } + }() + + // block goroutine until receive true event in stopRead channel + <-w.stopRead + w.doneRead <- true + + w.LogInfo("read goroutine terminated") +} + +func (w *TCPClient) ConnectToRemote() { + for { + if w.transportConn != nil { + w.transportConn.Close() + w.transportConn = nil + } + + address := w.GetConfig().Loggers.TCPClient.RemoteAddress + ":" + strconv.Itoa(w.GetConfig().Loggers.TCPClient.RemotePort) + connTimeout := time.Duration(w.GetConfig().Loggers.TCPClient.ConnectTimeout) * time.Second + + // make the connection + var conn net.Conn + var err error + + switch w.transport { + case netutils.SocketUnix: + address = w.GetConfig().Loggers.TCPClient.RemoteAddress + if len(w.GetConfig().Loggers.TCPClient.SockPath) > 0 { + address = w.GetConfig().Loggers.TCPClient.SockPath + } + w.LogInfo("connecting to %s://%s", w.transport, address) + conn, err = net.DialTimeout(w.transport, address, connTimeout) + + case netutils.SocketTCP: + w.LogInfo("connecting to %s://%s", w.transport, address) + conn, err = net.DialTimeout(w.transport, address, connTimeout) + + case netutils.SocketTLS: + w.LogInfo("connecting to %s://%s", w.transport, address) + + var tlsConfig *tls.Config + + tlsOptions := netutils.TLSOptions{ + InsecureSkipVerify: w.GetConfig().Loggers.TCPClient.TLSInsecure, + MinVersion: w.GetConfig().Loggers.TCPClient.TLSMinVersion, + CAFile: w.GetConfig().Loggers.TCPClient.CAFile, + CertFile: w.GetConfig().Loggers.TCPClient.CertFile, + KeyFile: w.GetConfig().Loggers.TCPClient.KeyFile, + } + + tlsConfig, err = netutils.TLSClientConfig(tlsOptions) + if err == nil { + dialer := &net.Dialer{Timeout: connTimeout} + conn, err = tls.DialWithDialer(dialer, netutils.SocketTCP, address, tlsConfig) + } + default: + w.LogFatal("invalid transport:", w.transport) + } + + // something is wrong during connection ? + if err != nil { + w.LogError("%s", err) + w.LogInfo("retry to connect in %d seconds", w.GetConfig().Loggers.TCPClient.RetryInterval) + time.Sleep(time.Duration(w.GetConfig().Loggers.TCPClient.RetryInterval) * time.Second) + continue + } + + w.transportConn = conn + + // block until framestream is ready + w.transportReady <- true + + // block until an error occurred, need to reconnect + w.transportReconnect <- true + } +} + +func (w *TCPClient) FlushBuffer(buf *[]dnsutils.DNSMessage) { + for _, dm := range *buf { + if w.GetConfig().Loggers.TCPClient.Mode == pkgconfig.ModeText { + w.transportWriter.Write(dm.Bytes(w.textFormat, + w.GetConfig().Global.TextFormatDelimiter, + w.GetConfig().Global.TextFormatBoundary)) + w.transportWriter.WriteString(w.GetConfig().Loggers.TCPClient.PayloadDelimiter) + } + + if w.GetConfig().Loggers.TCPClient.Mode == pkgconfig.ModeJSON { + json.NewEncoder(w.transportWriter).Encode(dm) + w.transportWriter.WriteString(w.GetConfig().Loggers.TCPClient.PayloadDelimiter) + } + + if w.GetConfig().Loggers.TCPClient.Mode == pkgconfig.ModeFlatJSON { + flat, err := dm.Flatten() + if err != nil { + w.LogError("flattening DNS message failed: %e", err) + continue + } + json.NewEncoder(w.transportWriter).Encode(flat) + w.transportWriter.WriteString(w.GetConfig().Loggers.TCPClient.PayloadDelimiter) + } + + // flush the transport buffer + err := w.transportWriter.Flush() + if err != nil { + w.LogError("send frame error", err.Error()) + w.writerReady = false + <-w.transportReconnect + break + } + } + + // reset buffer + *buf = nil +} + +func (w *TCPClient) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // prepare next channels + defaultRoutes, defaultNames := GetRoutes(w.GetDefaultRoutes()) + droppedRoutes, droppedNames := GetRoutes(w.GetDroppedRoutes()) + + // prepare transforms + subprocessors := transformers.NewTransforms(&w.GetConfig().OutgoingTransformers, w.GetLogger(), w.GetName(), w.GetOutputChannelAsList(), 0) + + // goroutine to process transformed dns messages + go w.StartLogging() + + // loop to process incoming messages + for { + select { + case <-w.OnStop(): + w.StopLogger() + subprocessors.Reset() + + w.stopRead <- true + <-w.doneRead + + return + + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + w.ReadConfig() + subprocessors.ReloadConfig(&cfg.OutgoingTransformers) + + case dm, opened := <-w.GetInputChannel(): + if !opened { + w.LogInfo("input channel closed!") + return + } + // count global messages + w.CountIngressTraffic() + + // apply tranforms, init dns message with additionnals parts if necessary + transformResult, err := subprocessors.ProcessMessage(&dm) + if err != nil { + w.LogError(err.Error()) + } + if transformResult == transformers.ReturnDrop { + w.SendDroppedTo(droppedRoutes, droppedNames, dm) + continue + } + + // send to output channel + w.CountEgressTraffic() + w.GetOutputChannel() <- dm + + // send to next ? + w.SendForwardedTo(defaultRoutes, defaultNames, dm) + } + } +} + +func (w *TCPClient) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() + + // init buffer + bufferDm := []dnsutils.DNSMessage{} + + // init flust timer for buffer + flushInterval := time.Duration(w.GetConfig().Loggers.TCPClient.FlushInterval) * time.Second + flushTimer := time.NewTimer(flushInterval) + + // init remote conn + go w.ConnectToRemote() + + w.LogInfo("ready to process") + for { + select { + case <-w.OnLoggerStopped(): + // closing remote connection if exist + w.Disconnect() + return + + case <-w.transportReady: + w.LogInfo("transport connected with success") + w.transportWriter = bufio.NewWriter(w.transportConn) + w.writerReady = true + + // read from the connection until we stop + go w.ReadFromConnection() + + // incoming dns message to process + case dm, opened := <-w.GetOutputChannel(): + if !opened { + w.LogInfo("output channel closed!") + return + } + + // drop dns message if the connection is not ready to avoid memory leak or + // to block the channel + if !w.writerReady { + continue + } + + // append dns message to buffer + bufferDm = append(bufferDm, dm) + + // buffer is full ? + if len(bufferDm) >= w.GetConfig().Loggers.TCPClient.BufferSize { + w.FlushBuffer(&bufferDm) + } + + // flush the buffer + case <-flushTimer.C: + if !w.writerReady { + bufferDm = nil + } + + if len(bufferDm) > 0 { + w.FlushBuffer(&bufferDm) + } + + // restart timer + flushTimer.Reset(flushInterval) + + } + } +} diff --git a/loggers/tcpclient_test.go b/workers/tcpclient_test.go similarity index 90% rename from loggers/tcpclient_test.go rename to workers/tcpclient_test.go index 27fb5354..807749d9 100644 --- a/loggers/tcpclient_test.go +++ b/workers/tcpclient_test.go @@ -1,4 +1,4 @@ -package loggers +package workers import ( "bufio" @@ -8,9 +8,9 @@ import ( "time" "github.com/dmachard/go-dnscollector/dnsutils" - "github.com/dmachard/go-dnscollector/netlib" "github.com/dmachard/go-dnscollector/pkgconfig" "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" ) func Test_TcpClientRun(t *testing.T) { @@ -34,7 +34,7 @@ func Test_TcpClientRun(t *testing.T) { for _, tc := range testcases { t.Run(tc.mode, func(t *testing.T) { // init logger - cfg := pkgconfig.GetFakeConfig() + cfg := pkgconfig.GetDefaultConfig() cfg.Loggers.TCPClient.FlushInterval = 1 cfg.Loggers.TCPClient.BufferSize = 0 cfg.Loggers.TCPClient.Mode = tc.mode @@ -44,14 +44,14 @@ func Test_TcpClientRun(t *testing.T) { g := NewTCPClient(cfg, logger.New(false), "test") // fake json receiver - fakeRcvr, err := net.Listen(netlib.SocketTCP, ":9999") + fakeRcvr, err := net.Listen(netutils.SocketTCP, ":9999") if err != nil { t.Fatal(err) } defer fakeRcvr.Close() // start the logger - go g.Run() + go g.StartCollect() // accept conn from logger conn, err := fakeRcvr.Accept() @@ -89,7 +89,7 @@ func Test_TcpClientRun(t *testing.T) { func Test_TcpClient_ConnectionAttempt(t *testing.T) { // init logger - cfg := pkgconfig.GetFakeConfig() + cfg := pkgconfig.GetDefaultConfig() cfg.Loggers.TCPClient.FlushInterval = 1 cfg.Loggers.TCPClient.Mode = pkgconfig.ModeText cfg.Loggers.TCPClient.RemoteAddress = "127.0.0.1" @@ -100,13 +100,13 @@ func Test_TcpClient_ConnectionAttempt(t *testing.T) { g := NewTCPClient(cfg, logger.New(true), "test") // start the logger - go g.Run() + go g.StartCollect() // just way to get connect attempt time.Sleep(time.Second * 3) // start receiver - fakeRcvr, err := net.Listen(netlib.SocketTCP, ":9999") + fakeRcvr, err := net.Listen(netutils.SocketTCP, ":9999") if err != nil { t.Fatal(err) } diff --git a/workers/tzsp.go b/workers/tzsp.go new file mode 100644 index 00000000..b65526fe --- /dev/null +++ b/workers/tzsp.go @@ -0,0 +1,25 @@ +//go:build windows || freebsd || darwin +// +build windows freebsd darwin + +package workers + +import ( + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" +) + +type TZSPSniffer struct { + *GenericWorker +} + +func NewTZSP(next []Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *TZSPSniffer { + w := &TZSPSniffer{GenericWorker: NewGenericWorker(config, logger, name, "tzsp", pkgconfig.DefaultBufferSize, pkgconfig.DefaultMonitor)} + w.SetDefaultRoutes(next) + w.ReadConfig() + return w +} + +func (w *TZSPSniffer) StartCollect() { + w.LogError("running collector failed...OS not supported!") + defer w.CollectDone() +} diff --git a/workers/tzsp_linux.go b/workers/tzsp_linux.go new file mode 100644 index 00000000..9d8ae598 --- /dev/null +++ b/workers/tzsp_linux.go @@ -0,0 +1,244 @@ +//go:build linux +// +build linux + +// Written by Noel Kuntze +// Updating by Denis Machard + +package workers + +import ( + "context" + "encoding/binary" + "errors" + "fmt" + "net" + "syscall" + "time" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" + "github.com/dmachard/go-netutils" + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + "github.com/rs/tzsp" +) + +type TZSPSniffer struct { + *GenericWorker + listen net.UDPConn +} + +func NewTZSP(next []Worker, config *pkgconfig.Config, logger *logger.Logger, name string) *TZSPSniffer { + bufSize := config.Global.Worker.ChannelBufferSize + if config.Collectors.Tzsp.ChannelBufferSize > 0 { + bufSize = config.Collectors.Tzsp.ChannelBufferSize + } + s := &TZSPSniffer{GenericWorker: NewGenericWorker(config, logger, name, "tzsp", bufSize, pkgconfig.DefaultMonitor)} + s.SetDefaultRoutes(next) + return s +} + +func (w *TZSPSniffer) Listen() error { + w.LogInfo("starting UDP server...") + + ServerAddr, err := net.ResolveUDPAddr("udp", + fmt.Sprintf("%s:%d", w.GetConfig().Collectors.Tzsp.ListenIP, w.GetConfig().Collectors.Tzsp.ListenPort), + ) + + if err != nil { + return err + } + + ServerConn, err := net.ListenUDP("udp", ServerAddr) + if err != nil { + return err + } + file, err := ServerConn.File() + + if err != nil { + return err + } + + err = syscall.SetsockoptInt(int(file.Fd()), syscall.SOL_SOCKET, syscall.SO_TIMESTAMPNS, 1) + if err != nil { + return err + } + + // calling File.Fd() disables the SetDeadline methods + err = syscall.SetNonblock(int(file.Fd()), true) + if err != nil { + return err + } + + w.LogInfo("is listening on %s", ServerConn.LocalAddr()) + w.listen = *ServerConn + return nil +} + +func (w *TZSPSniffer) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() + + // start server + if err := w.Listen(); err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] listening failed: ", err) + } + + // init dns processor + dnsProcessor := NewDNSProcessor(w.GetConfig(), w.GetLogger(), w.GetName(), w.GetConfig().Collectors.Tzsp.ChannelBufferSize) + dnsProcessor.SetDefaultRoutes(w.GetDefaultRoutes()) + dnsProcessor.SetDefaultDropped(w.GetDroppedRoutes()) + go dnsProcessor.StartCollect() + + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + + go func(ctx context.Context) { + defer func() { + dnsProcessor.Stop() + w.LogInfo("read data terminated") + defer close(done) + }() + + buf := make([]byte, 1024) + oob := make([]byte, 1024) + + var netErr net.Error + for { + select { + case <-ctx.Done(): + w.LogInfo("stopping UDP server...") + w.listen.Close() + return + default: + w.listen.SetReadDeadline(time.Now().Add(1 * time.Second)) + bufN, oobn, _, _, err := w.listen.ReadMsgUDPAddrPort(buf, oob) + if err != nil { + if errors.As(err, &netErr) && netErr.Timeout() { + continue + } + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] read msg", err) + } + if bufN == 0 { + w.LogFatal(pkgconfig.PrefixLogWorker + "[" + w.GetName() + "] read msg, buffer is empty") + } + if bufN > len(buf) { + w.LogFatal(pkgconfig.PrefixLogWorker + "[" + w.GetName() + "] read msg, bufer overflow") + } + if oobn == 0 { + w.LogFatal(pkgconfig.PrefixLogWorker + "[" + w.GetName() + "] read msg, oob missing") + } + scms, err := syscall.ParseSocketControlMessage(oob[:oobn]) + if err != nil { + w.LogFatal(pkgconfig.PrefixLogWorker+"["+w.GetName()+"] parse control msg", err) + } + if len(scms) != 1 { + w.LogInfo("len(scms) != 1") + continue + } + scm := scms[0] + if scm.Header.Type != syscall.SCM_TIMESTAMPNS { + w.LogFatal(pkgconfig.PrefixLogWorker + "[" + w.GetName() + "] scm timestampns missing") + } + tsec := binary.LittleEndian.Uint32(scm.Data[:4]) + nsec := binary.LittleEndian.Uint32(scm.Data[8:12]) + + // copy packet data from buffer + pkt := make([]byte, bufN) + copy(pkt, buf[:bufN]) + + tzspPacket, err := tzsp.Parse(pkt) + + if err != nil { + w.LogError("Failed to parse packet: ", err) + continue + } + + var eth layers.Ethernet + var ip4 layers.IPv4 + var ip6 layers.IPv6 + var tcp layers.TCP + var udp layers.UDP + parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip4, &ip6, &tcp, &udp) + decodedLayers := make([]gopacket.LayerType, 0, 4) + + // decode-it + parser.DecodeLayers(tzspPacket.Data, &decodedLayers) + + dm := dnsutils.DNSMessage{} + dm.Init() + + ignorePacket := false + for _, layertyp := range decodedLayers { + switch layertyp { + case layers.LayerTypeIPv4: + dm.NetworkInfo.Family = netutils.ProtoIPv4 + dm.NetworkInfo.QueryIP = ip4.SrcIP.String() + dm.NetworkInfo.ResponseIP = ip4.DstIP.String() + + case layers.LayerTypeIPv6: + dm.NetworkInfo.QueryIP = ip6.SrcIP.String() + dm.NetworkInfo.ResponseIP = ip6.DstIP.String() + dm.NetworkInfo.Family = netutils.ProtoIPv6 + + case layers.LayerTypeUDP: + dm.NetworkInfo.QueryPort = fmt.Sprint(int(udp.SrcPort)) + dm.NetworkInfo.ResponsePort = fmt.Sprint(int(udp.DstPort)) + dm.DNS.Payload = udp.Payload + dm.DNS.Length = len(udp.Payload) + dm.NetworkInfo.Protocol = netutils.ProtoUDP + + case layers.LayerTypeTCP: + if len(tcp.Payload) < 12 { + // packet way too short; 12 byte is the minimum size a DNS packet (header only, + // no questions, answers, authorities, or additional RRs) + continue + } + dnsLengthField := binary.BigEndian.Uint16(tcp.Payload[0:2]) + if len(tcp.Payload) < int(dnsLengthField) { + ignorePacket = true + continue + } + + dm.NetworkInfo.QueryPort = fmt.Sprint(int(tcp.SrcPort)) + dm.NetworkInfo.ResponsePort = fmt.Sprint(int(tcp.DstPort)) + dm.DNS.Payload = tcp.Payload[2:] + dm.DNS.Length = len(tcp.Payload[2:]) + dm.NetworkInfo.Protocol = netutils.ProtoTCP + } + } + + if !ignorePacket { + dm.DNSTap.Identity = w.GetConfig().GetServerIdentity() + + // set timestamp + dm.DNSTap.TimeSec = int(tsec) + dm.DNSTap.TimeNsec = int(nsec) + + // just decode QR + if len(dm.DNS.Payload) < 4 { + continue + } + + dnsProcessor.GetInputChannel() <- dm + } + } + } + }(ctx) + + // main loop + for { + select { + case <-w.OnStop(): + w.LogInfo("stopping read goroutine") + cancel() + <-done + return + + // save the new config + case cfg := <-w.NewConfig(): + w.SetConfig(cfg) + } + } +} diff --git a/workers/worker.go b/workers/worker.go new file mode 100644 index 00000000..40f5b6ba --- /dev/null +++ b/workers/worker.go @@ -0,0 +1,331 @@ +package workers + +import ( + "time" + + "github.com/dmachard/go-dnscollector/dnsutils" + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-dnscollector/telemetry" + "github.com/dmachard/go-logger" +) + +type Worker interface { + SetMetrics(metrics *telemetry.PrometheusCollector) + AddDefaultRoute(wrk Worker) + AddDroppedRoute(wrk Worker) + SetLoggers(loggers []Worker) + GetName() string + Stop() + StartCollect() + CountIngressTraffic() + CountEgressTraffic() + GetInputChannel() chan dnsutils.DNSMessage + ReadConfig() + ReloadConfig(config *pkgconfig.Config) +} + +type GenericWorker struct { + doneRun, stopRun, stopProcess, doneProcess, doneMonitor, stopMonitor chan bool + config *pkgconfig.Config + configChan chan *pkgconfig.Config + logger *logger.Logger + name, descr string + droppedRoutes, defaultRoutes []Worker + droppedWorker chan string + droppedWorkerCount map[string]int + dnsMessageIn, dnsMessageOut chan dnsutils.DNSMessage + + metrics *telemetry.PrometheusCollector + countIngress, countEgress, countForwarded, countDropped, countDiscarded chan int + totalIngress, totalEgress, totalForwarded, totalDropped, totalDiscarded int +} + +func NewGenericWorker(config *pkgconfig.Config, logger *logger.Logger, name string, descr string, bufferSize int, monitor bool) *GenericWorker { + logger.Info(pkgconfig.PrefixLogWorker+"[%s] %s - enabled", name, descr) + w := &GenericWorker{ + config: config, + configChan: make(chan *pkgconfig.Config), + logger: logger, + name: name, + descr: descr, + doneRun: make(chan bool), + doneMonitor: make(chan bool), + doneProcess: make(chan bool), + stopRun: make(chan bool), + stopMonitor: make(chan bool), + stopProcess: make(chan bool), + droppedWorker: make(chan string), + droppedWorkerCount: map[string]int{}, + dnsMessageIn: make(chan dnsutils.DNSMessage, bufferSize), + dnsMessageOut: make(chan dnsutils.DNSMessage, bufferSize), + countIngress: make(chan int), + countEgress: make(chan int), + countDiscarded: make(chan int), + countForwarded: make(chan int), + countDropped: make(chan int), + } + if monitor { + go w.Monitor() + } + return w +} + +func (w *GenericWorker) SetMetrics(metrics *telemetry.PrometheusCollector) { + w.metrics = metrics +} + +func (w *GenericWorker) GetName() string { return w.name } + +func (w *GenericWorker) GetConfig() *pkgconfig.Config { return w.config } + +func (w *GenericWorker) SetConfig(config *pkgconfig.Config) { w.config = config } + +func (w *GenericWorker) ReadConfig() {} + +func (w *GenericWorker) NewConfig() chan *pkgconfig.Config { return w.configChan } + +func (w *GenericWorker) GetLogger() *logger.Logger { return w.logger } + +func (w *GenericWorker) GetDroppedRoutes() []Worker { return w.droppedRoutes } + +func (w *GenericWorker) GetDefaultRoutes() []Worker { return w.defaultRoutes } + +func (w *GenericWorker) GetInputChannel() chan dnsutils.DNSMessage { return w.dnsMessageIn } + +func (w *GenericWorker) GetInputChannelAsList() []chan dnsutils.DNSMessage { + listChannel := []chan dnsutils.DNSMessage{} + listChannel = append(listChannel, w.GetInputChannel()) + return listChannel +} + +func (w *GenericWorker) GetOutputChannel() chan dnsutils.DNSMessage { return w.dnsMessageOut } + +func (w *GenericWorker) GetOutputChannelAsList() []chan dnsutils.DNSMessage { + listChannel := []chan dnsutils.DNSMessage{} + listChannel = append(listChannel, w.GetOutputChannel()) + return listChannel +} + +func (w *GenericWorker) AddDroppedRoute(wrk Worker) { + w.droppedRoutes = append(w.droppedRoutes, wrk) +} + +func (w *GenericWorker) AddDefaultRoute(wrk Worker) { + w.defaultRoutes = append(w.defaultRoutes, wrk) +} + +func (w *GenericWorker) SetDefaultRoutes(workers []Worker) { + w.defaultRoutes = workers +} + +func (w *GenericWorker) SetDefaultDropped(workers []Worker) { + w.droppedRoutes = workers +} + +func (w *GenericWorker) SetLoggers(loggers []Worker) { w.defaultRoutes = loggers } + +func (w *GenericWorker) Loggers() ([]chan dnsutils.DNSMessage, []string) { + return GetRoutes(w.defaultRoutes) +} + +func (w *GenericWorker) ReloadConfig(config *pkgconfig.Config) { + w.LogInfo("reload configuration...") + w.configChan <- config +} + +func (w *GenericWorker) LogInfo(msg string, v ...interface{}) { + w.logger.Info(pkgconfig.PrefixLogWorker+"["+w.name+"] "+w.descr+" - "+msg, v...) +} + +func (w *GenericWorker) LogError(msg string, v ...interface{}) { + w.logger.Error(pkgconfig.PrefixLogWorker+"["+w.name+"] "+w.descr+" - "+msg, v...) +} + +func (w *GenericWorker) LogFatal(v ...interface{}) { + w.logger.Fatal(v...) +} + +func (w *GenericWorker) OnStop() chan bool { + return w.stopRun +} + +func (w *GenericWorker) OnLoggerStopped() chan bool { + return w.stopProcess +} + +func (w *GenericWorker) StopLogger() { + w.stopProcess <- true + <-w.doneProcess +} + +func (w *GenericWorker) CollectDone() { + w.LogInfo("collection terminated") + w.doneRun <- true +} + +func (w *GenericWorker) LoggingDone() { + w.LogInfo("logging terminated") + w.doneProcess <- true +} + +func (w *GenericWorker) Stop() { + w.LogInfo("stopping monitor...") + w.stopMonitor <- true + <-w.doneMonitor + + w.LogInfo("stopping collect...") + w.stopRun <- true + <-w.doneRun +} + +func (w *GenericWorker) Monitor() { + defer func() { + if r := recover(); r != nil { + w.LogError("monitor - recovered panic: %v", r) + } + w.LogInfo("monitor terminated") + w.doneMonitor <- true + }() + + w.LogInfo("starting monitoring - refresh every %ds", w.config.Global.Worker.InternalMonitor) + timerMonitor := time.NewTimer(time.Duration(w.config.Global.Worker.InternalMonitor) * time.Second) + for { + select { + case <-w.countDiscarded: + w.totalDiscarded++ + + case <-w.countIngress: + w.totalIngress++ + + case <-w.countEgress: + w.totalEgress++ + + case <-w.countForwarded: + w.totalForwarded++ + + case <-w.countDropped: + w.totalDropped++ + + case loggerName := <-w.droppedWorker: + if _, ok := w.droppedWorkerCount[loggerName]; !ok { + w.droppedWorkerCount[loggerName] = 1 + } else { + w.droppedWorkerCount[loggerName]++ + } + + case <-w.stopMonitor: + close(w.droppedWorker) + timerMonitor.Stop() + return + + case <-timerMonitor.C: + for v, k := range w.droppedWorkerCount { + if k > 0 { + w.LogError("worker[%s] buffer is full, %d dnsmessage(s) dropped", v, k) + w.droppedWorkerCount[v] = 0 + } + } + + // // send to telemetry? + if w.config.Global.Telemetry.Enabled && w.metrics != nil { + if w.totalIngress > 0 || w.totalForwarded > 0 || w.totalDropped > 0 { + w.metrics.Record <- telemetry.WorkerStats{ + Name: w.GetName(), + TotalIngress: w.totalIngress, + TotalEgress: w.totalEgress, + TotalForwardedPolicy: w.totalForwarded, + TotalDroppedPolicy: w.totalDropped, + TotalDiscarded: w.totalDiscarded, + } + w.totalIngress = 0 + w.totalEgress = 0 + w.totalForwarded = 0 + w.totalDropped = 0 + w.totalDiscarded = 0 + } + } + + timerMonitor.Reset(time.Duration(w.config.Global.Worker.InternalMonitor) * time.Second) + } + } +} + +func (w *GenericWorker) WorkerIsBusy(name string) { + w.droppedWorker <- name +} + +func (w *GenericWorker) StartCollect() { + w.LogInfo("starting data collection") + defer w.CollectDone() +} + +func (w *GenericWorker) StartLogging() { + w.LogInfo("logging has started") + defer w.LoggingDone() +} + +func (w *GenericWorker) CountIngressTraffic() { + if w.config.Global.Telemetry.Enabled { + w.countIngress <- 1 + } +} + +func (w *GenericWorker) CountEgressTraffic() { + if w.config.Global.Telemetry.Enabled { + w.countEgress <- 1 + } +} + +func (w *GenericWorker) SendDroppedTo(routes []chan dnsutils.DNSMessage, routesName []string, dm dnsutils.DNSMessage) { + for i := range routes { + select { + case routes[i] <- dm: + if w.config.Global.Telemetry.Enabled { + w.countDropped <- 1 + } + default: + if w.config.Global.Telemetry.Enabled { + w.countDiscarded <- 1 + } + w.WorkerIsBusy(routesName[i]) + } + } +} + +func (w *GenericWorker) SendForwardedTo(routes []chan dnsutils.DNSMessage, routesName []string, dm dnsutils.DNSMessage) { + for i := range routes { + select { + case routes[i] <- dm: + if w.config.Global.Telemetry.Enabled { + w.countForwarded <- 1 + } + default: + if w.config.Global.Telemetry.Enabled { + w.countDiscarded <- 1 + } + w.WorkerIsBusy(routesName[i]) + } + } +} + +func GetRoutes(routes []Worker) ([]chan dnsutils.DNSMessage, []string) { + channels := []chan dnsutils.DNSMessage{} + names := []string{} + for _, p := range routes { + if c := p.GetInputChannel(); c != nil { + channels = append(channels, c) + names = append(names, p.GetName()) + } else { + panic("default routing to stanza=[" + p.GetName() + "] not supported") + } + } + return channels, names +} + +func GetName(name string) string { + return "[" + name + "] - " +} + +func GetWorkerForTest(bufferSize int) *GenericWorker { + return NewGenericWorker(pkgconfig.GetDefaultConfig(), logger.New(false), "testonly", "", bufferSize, pkgconfig.WorkerMonitorDisabled) +} diff --git a/workers/workers_test.go b/workers/workers_test.go new file mode 100644 index 00000000..ed2e4bfd --- /dev/null +++ b/workers/workers_test.go @@ -0,0 +1,12 @@ +package workers + +import ( + "testing" + + "github.com/dmachard/go-dnscollector/pkgconfig" + "github.com/dmachard/go-logger" +) + +func TestGenericWorker(t *testing.T) { + NewGenericWorker(pkgconfig.GetDefaultConfig(), logger.New(false), "testonly", "", pkgconfig.DefaultBufferSize, pkgconfig.WorkerMonitorDisabled) +} diff --git a/xdp/add_headers.sh b/xdp/add_headers.sh deleted file mode 100755 index 043e432c..00000000 --- a/xdp/add_headers.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash - -# Version of libbpf to fetch headers from -LIBBPF_VERSION=1.0.1 - -# The headers we want -prefix=libbpf-"$LIBBPF_VERSION" -headers=( - "$prefix"/src/bpf_endian.h - "$prefix"/src/bpf_helper_defs.h - "$prefix"/src/bpf_helpers.h -) - -# Fetch libbpf release and extract the desired headers -rm -rf headers/ && mkdir headers/ && cd headers/ -curl -sL "https://github.com/libbpf/libbpf/archive/refs/tags/v${LIBBPF_VERSION}.tar.gz" | \ - tar -xz --xform='s#.*/##' "${headers[@]}" - -# generate vmlinux -bpftool btf dump file /sys/kernel/btf/vmlinux format c > vmlinux.h \ No newline at end of file diff --git a/xdp/bpf_bpfeb.go b/xdp/bpf_bpfeb.go deleted file mode 100644 index 38e3b1ed..00000000 --- a/xdp/bpf_bpfeb.go +++ /dev/null @@ -1,135 +0,0 @@ -// Code generated by bpf2go; DO NOT EDIT. -//go:build arm64be || armbe || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64 -// +build arm64be armbe mips mips64 mips64p32 ppc64 s390 s390x sparc sparc64 - -package xdp - -import ( - "bytes" - _ "embed" - "fmt" - "io" - - "github.com/cilium/ebpf" -) - -type BpfPktEvent struct { - Timestamp uint64 - PktLen uint32 - PktOffset uint32 - IpVersion uint16 - IpProto uint16 - PayloadOffset uint16 - SrcAddr uint32 - SrcAddr6 [4]uint32 - SrcPort uint16 - DstAddr uint32 - DstAddr6 [4]uint32 - DstPort uint16 -} - -// loadBpf returns the embedded CollectionSpec for bpf. -func loadBpf() (*ebpf.CollectionSpec, error) { - reader := bytes.NewReader(_BpfBytes) - spec, err := ebpf.LoadCollectionSpecFromReader(reader) - if err != nil { - return nil, fmt.Errorf("can't load bpf: %w", err) - } - - return spec, err -} - -// loadBpfObjects loads bpf and converts it into a struct. -// -// The following types are suitable as obj argument: -// -// *bpfObjects -// *bpfPrograms -// *bpfMaps -// -// See ebpf.CollectionSpec.LoadAndAssign documentation for details. -func LoadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error { - spec, err := loadBpf() - if err != nil { - return err - } - - return spec.LoadAndAssign(obj, opts) -} - -// bpfSpecs contains maps and programs before they are loaded into the kernel. -// -// It can be passed ebpf.CollectionSpec.Assign. -type bpfSpecs struct { - bpfProgramSpecs - bpfMapSpecs -} - -// bpfSpecs contains programs before they are loaded into the kernel. -// -// It can be passed ebpf.CollectionSpec.Assign. -type bpfProgramSpecs struct { - XdpSniffer *ebpf.ProgramSpec `ebpf:"xdp_sniffer"` -} - -// bpfMapSpecs contains maps before they are loaded into the kernel. -// -// It can be passed ebpf.CollectionSpec.Assign. -type bpfMapSpecs struct { - Pkts *ebpf.MapSpec `ebpf:"pkts"` -} - -// bpfObjects contains all objects after they have been loaded into the kernel. -// -// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. -type BpfObjects struct { - bpfPrograms - bpfMaps -} - -func (o *BpfObjects) Close() error { - return _BpfClose( - &o.bpfPrograms, - &o.bpfMaps, - ) -} - -// bpfMaps contains all maps after they have been loaded into the kernel. -// -// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. -type bpfMaps struct { - Pkts *ebpf.Map `ebpf:"pkts"` -} - -func (m *bpfMaps) Close() error { - return _BpfClose( - m.Pkts, - ) -} - -// bpfPrograms contains all programs after they have been loaded into the kernel. -// -// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. -type bpfPrograms struct { - XdpSniffer *ebpf.Program `ebpf:"xdp_sniffer"` -} - -func (p *bpfPrograms) Close() error { - return _BpfClose( - p.XdpSniffer, - ) -} - -func _BpfClose(closers ...io.Closer) error { - for _, closer := range closers { - if err := closer.Close(); err != nil { - return err - } - } - return nil -} - -// Do not access this directly. -// -//go:embed bpf_bpfeb.o -var _BpfBytes []byte diff --git a/xdp/bpf_bpfeb.o b/xdp/bpf_bpfeb.o deleted file mode 100644 index 034b884f..00000000 Binary files a/xdp/bpf_bpfeb.o and /dev/null differ diff --git a/xdp/bpf_bpfel.go b/xdp/bpf_bpfel.go deleted file mode 100644 index d47c06af..00000000 --- a/xdp/bpf_bpfel.go +++ /dev/null @@ -1,135 +0,0 @@ -// Code generated by bpf2go; DO NOT EDIT. -//go:build 386 || amd64 || amd64p32 || arm || arm64 || mips64le || mips64p32le || mipsle || ppc64le || riscv64 -// +build 386 amd64 amd64p32 arm arm64 mips64le mips64p32le mipsle ppc64le riscv64 - -package xdp - -import ( - "bytes" - _ "embed" - "fmt" - "io" - - "github.com/cilium/ebpf" -) - -type BpfPktEvent struct { - Timestamp uint64 - PktLen uint32 - PktOffset uint32 - IpVersion uint16 - IpProto uint16 - PayloadOffset uint16 - SrcAddr uint32 - SrcAddr6 [4]uint32 - SrcPort uint16 - DstAddr uint32 - DstAddr6 [4]uint32 - DstPort uint16 -} - -// loadBpf returns the embedded CollectionSpec for bpf. -func loadBpf() (*ebpf.CollectionSpec, error) { - reader := bytes.NewReader(_BpfBytes) - spec, err := ebpf.LoadCollectionSpecFromReader(reader) - if err != nil { - return nil, fmt.Errorf("can't load bpf: %w", err) - } - - return spec, err -} - -// loadBpfObjects loads bpf and converts it into a struct. -// -// The following types are suitable as obj argument: -// -// *bpfObjects -// *bpfPrograms -// *bpfMaps -// -// See ebpf.CollectionSpec.LoadAndAssign documentation for details. -func LoadBpfObjects(obj interface{}, opts *ebpf.CollectionOptions) error { - spec, err := loadBpf() - if err != nil { - return err - } - - return spec.LoadAndAssign(obj, opts) -} - -// bpfSpecs contains maps and programs before they are loaded into the kernel. -// -// It can be passed ebpf.CollectionSpec.Assign. -type bpfSpecs struct { - bpfProgramSpecs - bpfMapSpecs -} - -// bpfSpecs contains programs before they are loaded into the kernel. -// -// It can be passed ebpf.CollectionSpec.Assign. -type bpfProgramSpecs struct { - XdpSniffer *ebpf.ProgramSpec `ebpf:"xdp_sniffer"` -} - -// bpfMapSpecs contains maps before they are loaded into the kernel. -// -// It can be passed ebpf.CollectionSpec.Assign. -type bpfMapSpecs struct { - Pkts *ebpf.MapSpec `ebpf:"pkts"` -} - -// bpfObjects contains all objects after they have been loaded into the kernel. -// -// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. -type BpfObjects struct { - bpfPrograms - bpfMaps -} - -func (o *BpfObjects) Close() error { - return _BpfClose( - &o.bpfPrograms, - &o.bpfMaps, - ) -} - -// bpfMaps contains all maps after they have been loaded into the kernel. -// -// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. -type bpfMaps struct { - Pkts *ebpf.Map `ebpf:"pkts"` -} - -func (m *bpfMaps) Close() error { - return _BpfClose( - m.Pkts, - ) -} - -// bpfPrograms contains all programs after they have been loaded into the kernel. -// -// It can be passed to loadBpfObjects or ebpf.CollectionSpec.LoadAndAssign. -type bpfPrograms struct { - XdpSniffer *ebpf.Program `ebpf:"xdp_sniffer"` -} - -func (p *bpfPrograms) Close() error { - return _BpfClose( - p.XdpSniffer, - ) -} - -func _BpfClose(closers ...io.Closer) error { - for _, closer := range closers { - if err := closer.Close(); err != nil { - return err - } - } - return nil -} - -// Do not access this directly. -// -//go:embed bpf_bpfel.o -var _BpfBytes []byte diff --git a/xdp/bpf_bpfel.o b/xdp/bpf_bpfel.o deleted file mode 100644 index 0eff94a2..00000000 Binary files a/xdp/bpf_bpfel.o and /dev/null differ diff --git a/xdp/xdp.go b/xdp/xdp.go deleted file mode 100644 index 8235d60c..00000000 --- a/xdp/xdp.go +++ /dev/null @@ -1,3 +0,0 @@ -package xdp - -//go:generate go run github.com/cilium/ebpf/cmd/bpf2go -type pkt_event bpf xdp_dns_kern.c -- -I./headers diff --git a/xdp/xdp_dns_kern.c b/xdp/xdp_dns_kern.c deleted file mode 100644 index 08ebc843..00000000 --- a/xdp/xdp_dns_kern.c +++ /dev/null @@ -1,134 +0,0 @@ - -//go:build exclude - -#include "vmlinux.h" -#include "bpf_helpers.h" -#include "bpf_endian.h" - -char __license[] SEC("license") = "Dual MIT/GPL"; - -#define ETH_P_IP 0x0800 -#define ETH_P_IPV6 0x86DD - -// packet_info -struct pkt_event { - __u64 timestamp; - __u32 pkt_len; - __u32 pkt_offset; - __u16 ip_version; - __u16 ip_proto; - __u16 payload_offset; - __u32 src_addr; - __u32 src_addr6[4]; - __u16 src_port; - __u32 dst_addr; - __u32 dst_addr6[4]; - __u16 dst_port; -} __attribute__((packed)); -struct pkt_event *unused_event __attribute__((unused)); - - -struct { - __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); - __uint(key_size, sizeof(__u32)); - __uint(value_size, sizeof(__u32)); - __uint(max_entries, 4); -} pkts SEC(".maps"); - -SEC("xdp") -int xdp_sniffer(struct xdp_md *ctx) { - void *data_end = (void *)(long)ctx->data_end; - void *data = (void *)(long)ctx->data; - - __u32 offset = sizeof(struct ethhdr); - - struct pkt_event pkt = {}; - pkt.timestamp = bpf_ktime_get_ns(); - pkt.pkt_len = data_end - data; - pkt.pkt_offset = sizeof(struct pkt_event); - - // enough data to read ethernet header ? - if (data + offset > data_end) - return XDP_PASS; - - // handle ethernet packet - struct ethhdr *eth = data; - pkt.ip_version = bpf_htons(eth->h_proto); - - // handle only IPv4 or IPv6 traffic - if (pkt.ip_version != ETH_P_IP && pkt.ip_version != ETH_P_IPV6) - return XDP_PASS; - - // IPv4 - get L4 protocol - if (pkt.ip_version == ETH_P_IP) { - if (data + offset + sizeof(struct iphdr) > data_end) - return XDP_PASS; - - struct iphdr *ip4h = (data + offset); - pkt.ip_proto = ip4h->protocol; - pkt.src_addr = bpf_htonl(ip4h->saddr); - pkt.dst_addr = bpf_htonl(ip4h->daddr); - - offset += sizeof(struct iphdr); - } - - // IPv6 - get L4 protocol - if (pkt.ip_version == ETH_P_IPV6) { - if (data + offset + sizeof(struct ipv6hdr) > data_end) - return XDP_PASS; - - struct ipv6hdr *ip6h = (data + offset) ; - pkt.ip_proto = ip6h->nexthdr; - - offset += sizeof(struct ipv6hdr); - - __builtin_memcpy(pkt.src_addr6, ip6h->saddr.in6_u.u6_addr32, sizeof(pkt.src_addr6)); - __builtin_memcpy(pkt.dst_addr6, ip6h->daddr.in6_u.u6_addr32, sizeof(pkt.dst_addr6)); - } - - // handle only UDP or TCP traffic - if (pkt.ip_proto != IPPROTO_UDP && pkt.ip_proto != IPPROTO_TCP) - return XDP_PASS; - - // TCP - get destination and source port - if (pkt.ip_proto == IPPROTO_TCP) { - if (data + offset + sizeof(struct tcphdr) > data_end) - return XDP_PASS; - - struct tcphdr *tcp = data + offset; - pkt.src_port = bpf_ntohs(tcp->source); - pkt.dst_port = bpf_ntohs(tcp->dest); - - u8 tcp_flags = ((u8 *)tcp)[13]; - - // ignore syn and ack packet - if (tcp_flags != 0x18) { - return XDP_PASS; - } - - offset += tcp->doff*4; - } - - // UDP - get destination and source port - if (pkt.ip_proto == IPPROTO_UDP) { - if (data + offset + sizeof(struct udphdr) > data_end) - return XDP_PASS; - - struct udphdr *udp = data + offset; - pkt.src_port = bpf_ntohs(udp->source); - pkt.dst_port = bpf_ntohs(udp->dest); - - offset += sizeof(struct udphdr); - } - - // handle only dns packet - if ( pkt.src_port != 53 && pkt.dst_port != 53) - return XDP_PASS; - - pkt.payload_offset = offset; - // write data in perf event - int ret = bpf_perf_event_output(ctx, &pkts, - BPF_F_CURRENT_CPU | ((__u64)pkt.pkt_len << 32), - &pkt, sizeof(pkt)); - return XDP_PASS; -} \ No newline at end of file